1 #if !defined(__CUSPARSEMATIMPL) 2 #define __CUSPARSEMATIMPL 3 4 #include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h> 5 6 #include <cusparse_v2.h> 7 8 #include <algorithm> 9 #include <vector> 10 11 #include <thrust/device_vector.h> 12 #include <thrust/device_ptr.h> 13 #include <thrust/device_malloc_allocator.h> 14 #include <thrust/transform.h> 15 #include <thrust/functional.h> 16 #include <thrust/sequence.h> 17 18 #if (CUSPARSE_VER_MAJOR > 10 || CUSPARSE_VER_MAJOR == 10 && CUSPARSE_VER_MINOR >= 2) /* According to cuda/10.1.168 on OLCF Summit */ 19 #define CHKERRCUSPARSE(stat) \ 20 do { \ 21 if (PetscUnlikely(stat)) { \ 22 const char *name = cusparseGetErrorName(stat); \ 23 const char *descr = cusparseGetErrorString(stat); \ 24 SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_LIB,"cuSPARSE error %d (%s) : %s",(int)stat,name,descr); \ 25 } \ 26 } while(0) 27 #else 28 #define CHKERRCUSPARSE(stat) do {if (PetscUnlikely(stat)) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusparse error %d",(int)stat);} while(0) 29 #endif 30 31 #if defined(PETSC_USE_COMPLEX) 32 #if defined(PETSC_USE_REAL_SINGLE) 33 #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k)) 34 #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i)) 35 #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m)) 36 #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l)) 37 #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h)) 38 #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j)) 39 #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f)) 40 const cuFloatComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f}; 41 const cuFloatComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f}; 42 #elif defined(PETSC_USE_REAL_DOUBLE) 43 #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k) cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k)) 44 #define cusparse_analysis(a,b,c,d,e,f,g,h,i) cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i)) 45 #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m)) 46 #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l) cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l)) 47 #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h) cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h)) 48 #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j) cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j)) 49 #define cusparse_hyb2csr(a,b,c,d,e,f) cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) 50 const cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0}; 51 const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0}; 52 #endif 53 #else 54 const PetscScalar PETSC_CUSPARSE_ONE = 1.0; 55 const PetscScalar PETSC_CUSPARSE_ZERO = 0.0; 56 #if defined(PETSC_USE_REAL_SINGLE) 57 #define cusparse_solve cusparseScsrsv_solve 58 #define cusparse_analysis cusparseScsrsv_analysis 59 #define cusparse_csr_spmv cusparseScsrmv 60 #define cusparse_csr2csc cusparseScsr2csc 61 #define cusparse_hyb_spmv cusparseShybmv 62 #define cusparse_csr2hyb cusparseScsr2hyb 63 #define cusparse_hyb2csr cusparseShyb2csr 64 #elif defined(PETSC_USE_REAL_DOUBLE) 65 #define cusparse_solve cusparseDcsrsv_solve 66 #define cusparse_analysis cusparseDcsrsv_analysis 67 #define cusparse_csr_spmv cusparseDcsrmv 68 #define cusparse_csr2csc cusparseDcsr2csc 69 #define cusparse_hyb_spmv cusparseDhybmv 70 #define cusparse_csr2hyb cusparseDcsr2hyb 71 #define cusparse_hyb2csr cusparseDhyb2csr 72 #endif 73 #endif 74 75 #define THRUSTINTARRAY32 thrust::device_vector<int> 76 #define THRUSTINTARRAY thrust::device_vector<PetscInt> 77 #define THRUSTARRAY thrust::device_vector<PetscScalar> 78 79 /* A CSR matrix structure */ 80 struct CsrMatrix { 81 PetscInt num_rows; 82 PetscInt num_cols; 83 PetscInt num_entries; 84 THRUSTINTARRAY32 *row_offsets; 85 THRUSTINTARRAY32 *column_indices; 86 THRUSTARRAY *values; 87 }; 88 89 //#define CUSPMATRIXCSR32 cusp::csr_matrix<int,PetscScalar,cusp::device_memory> 90 91 /* This is struct holding the relevant data needed to a MatSolve */ 92 struct Mat_SeqAIJCUSPARSETriFactorStruct { 93 /* Data needed for triangular solve */ 94 cusparseMatDescr_t descr; 95 cusparseSolveAnalysisInfo_t solveInfo; 96 cusparseOperation_t solveOp; 97 CsrMatrix *csrMat; 98 }; 99 100 /* This is struct holding the relevant data needed to a MatMult */ 101 struct Mat_SeqAIJCUSPARSEMultStruct { 102 void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */ 103 cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */ 104 THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */ 105 PetscScalar *alpha; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */ 106 PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/ 107 PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */ 108 }; 109 110 /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and 111 any indices used in a reordering */ 112 struct Mat_SeqAIJCUSPARSETriFactors { 113 Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */ 114 Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */ 115 Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */ 116 Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/ 117 THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */ 118 THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */ 119 THRUSTARRAY *workVector; 120 cusparseHandle_t handle; /* a handle to the cusparse library */ 121 PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */ 122 }; 123 124 /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */ 125 struct Mat_SeqAIJCUSPARSE { 126 Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */ 127 Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */ 128 THRUSTARRAY *workVector; /*pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */ 129 THRUSTINTARRAY32 *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */ 130 PetscInt nonzerorow; /* number of nonzero rows ... used in the flop calculations */ 131 MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */ 132 cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */ 133 cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */ 134 PetscObjectState nonzerostate; 135 }; 136 137 PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat); 138 PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream); 139 PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle); 140 PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat); 141 #endif 142