1 #ifndef PETSC_CUSPARSEMATIMPL_H 2 #define PETSC_CUSPARSEMATIMPL_H 3 4 #include <petscpkg_version.h> 5 #include <../src/vec/vec/impls/seq/cupm/vecseqcupm.hpp> /* for VecSeq_CUPM */ 6 #include <../src/sys/objects/device/impls/cupm/cupmthrustutility.hpp> 7 #include <petsc/private/petsclegacycupmblas.h> 8 9 #include <cusparse_v2.h> 10 11 #include <algorithm> 12 #include <vector> 13 14 #include <thrust/device_vector.h> 15 #include <thrust/device_ptr.h> 16 #include <thrust/device_malloc_allocator.h> 17 #include <thrust/transform.h> 18 #include <thrust/functional.h> 19 #include <thrust/sequence.h> 20 #include <thrust/system/system_error.h> 21 22 #if defined(PETSC_USE_COMPLEX) 23 #if defined(PETSC_USE_REAL_SINGLE) 24 const cuComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f}; 25 const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f}; 26 #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseCcsrilu02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i) 27 #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j) 28 #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02(a, b, c, d, (cuComplex *)e, f, g, h, i, j) 29 #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseCcsric02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i) 30 #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsric02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j) 31 #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j) cusparseCcsric02(a, b, c, d, (cuComplex *)e, f, g, h, i, j) 32 #elif defined(PETSC_USE_REAL_DOUBLE) 33 const cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0}; 34 const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0}; 35 #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseZcsrilu02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i) 36 #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j) 37 #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j) 38 #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseZcsric02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i) 39 #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsric02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j) 40 #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j) cusparseZcsric02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j) 41 #endif 42 #else 43 const PetscScalar PETSC_CUSPARSE_ONE = 1.0; 44 const PetscScalar PETSC_CUSPARSE_ZERO = 0.0; 45 #if defined(PETSC_USE_REAL_SINGLE) 46 #define cusparseXcsrilu02_bufferSize cusparseScsrilu02_bufferSize 47 #define cusparseXcsrilu02_analysis cusparseScsrilu02_analysis 48 #define cusparseXcsrilu02 cusparseScsrilu02 49 #define cusparseXcsric02_bufferSize cusparseScsric02_bufferSize 50 #define cusparseXcsric02_analysis cusparseScsric02_analysis 51 #define cusparseXcsric02 cusparseScsric02 52 #elif defined(PETSC_USE_REAL_DOUBLE) 53 #define cusparseXcsrilu02_bufferSize cusparseDcsrilu02_bufferSize 54 #define cusparseXcsrilu02_analysis cusparseDcsrilu02_analysis 55 #define cusparseXcsrilu02 cusparseDcsrilu02 56 #define cusparseXcsric02_bufferSize cusparseDcsric02_bufferSize 57 #define cusparseXcsric02_analysis cusparseDcsric02_analysis 58 #define cusparseXcsric02 cusparseDcsric02 59 #endif 60 #endif 61 62 #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) 63 #define csrsvInfo_t csrsv2Info_t 64 #define cusparseCreateCsrsvInfo cusparseCreateCsrsv2Info 65 #define cusparseDestroyCsrsvInfo cusparseDestroyCsrsv2Info 66 #if defined(PETSC_USE_COMPLEX) 67 #if defined(PETSC_USE_REAL_SINGLE) 68 #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j) cusparseCcsrsv2_bufferSize(a, b, c, d, e, (cuComplex *)(f), g, h, i, j) 69 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseCcsrsv2_analysis(a, b, c, d, e, (const cuComplex *)(f), g, h, i, j, k) 70 #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseCcsrsv2_solve(a, b, c, d, (const cuComplex *)(e), f, (const cuComplex *)(g), h, i, j, (const cuComplex *)(k), (cuComplex *)(l), m, n) 71 #elif defined(PETSC_USE_REAL_DOUBLE) 72 #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j) cusparseZcsrsv2_bufferSize(a, b, c, d, e, (cuDoubleComplex *)(f), g, h, i, j) 73 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseZcsrsv2_analysis(a, b, c, d, e, (const cuDoubleComplex *)(f), g, h, i, j, k) 74 #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseZcsrsv2_solve(a, b, c, d, (const cuDoubleComplex *)(e), f, (const cuDoubleComplex *)(g), h, i, j, (const cuDoubleComplex *)(k), (cuDoubleComplex *)(l), m, n) 75 #endif 76 #else /* not complex */ 77 #if defined(PETSC_USE_REAL_SINGLE) 78 #define cusparseXcsrsv_buffsize cusparseScsrsv2_bufferSize 79 #define cusparseXcsrsv_analysis cusparseScsrsv2_analysis 80 #define cusparseXcsrsv_solve cusparseScsrsv2_solve 81 #elif defined(PETSC_USE_REAL_DOUBLE) 82 #define cusparseXcsrsv_buffsize cusparseDcsrsv2_bufferSize 83 #define cusparseXcsrsv_analysis cusparseDcsrsv2_analysis 84 #define cusparseXcsrsv_solve cusparseDcsrsv2_solve 85 #endif 86 #endif 87 #else /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */ 88 #define csrsvInfo_t cusparseSolveAnalysisInfo_t 89 #define cusparseCreateCsrsvInfo cusparseCreateSolveAnalysisInfo 90 #define cusparseDestroyCsrsvInfo cusparseDestroySolveAnalysisInfo 91 #if defined(PETSC_USE_COMPLEX) 92 #if defined(PETSC_USE_REAL_SINGLE) 93 #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) cusparseCcsrsv_solve((a), (b), (c), (cuComplex *)(e), (f), (cuComplex *)(g), (h), (i), (j), (cuComplex *)(k), (cuComplex *)(l)) 94 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED) cusparseCcsrsv_analysis((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (h), (i)) 95 #elif defined(PETSC_USE_REAL_DOUBLE) 96 #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) \ 97 cusparseZcsrsv_solve((a), (b), (c), (cuDoubleComplex *)(e), (f), (cuDoubleComplex *)(g), (h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l)) 98 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED) cusparseZcsrsv_analysis((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (h), (i)) 99 #endif 100 #else /* not complex */ 101 #if defined(PETSC_USE_REAL_SINGLE) 102 #define cusparseXcsrsv_solve cusparseScsrsv_solve 103 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseScsrsv_analysis(a, b, c, d, e, f, g, h, i) 104 #elif defined(PETSC_USE_REAL_DOUBLE) 105 #define cusparseXcsrsv_solve cusparseDcsrsv_solve 106 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseDcsrsv_analysis(a, b, c, d, e, f, g, h, i) 107 #endif 108 #endif 109 #endif /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */ 110 111 #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) 112 #define cusparse_csr2csc cusparseCsr2cscEx2 113 #if defined(PETSC_USE_COMPLEX) 114 #if defined(PETSC_USE_REAL_SINGLE) 115 #define cusparse_scalartype CUDA_C_32F 116 #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgeam2(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t) 117 #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \ 118 cusparseCcsrgeam2_bufferSizeExt(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t) 119 #elif defined(PETSC_USE_REAL_DOUBLE) 120 #define cusparse_scalartype CUDA_C_64F 121 #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \ 122 cusparseZcsrgeam2(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t) 123 #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \ 124 cusparseZcsrgeam2_bufferSizeExt(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t) 125 #endif 126 #else /* not complex */ 127 #if defined(PETSC_USE_REAL_SINGLE) 128 #define cusparse_scalartype CUDA_R_32F 129 #define cusparse_csr_spgeam cusparseScsrgeam2 130 #define cusparse_csr_spgeam_bufferSize cusparseScsrgeam2_bufferSizeExt 131 #elif defined(PETSC_USE_REAL_DOUBLE) 132 #define cusparse_scalartype CUDA_R_64F 133 #define cusparse_csr_spgeam cusparseDcsrgeam2 134 #define cusparse_csr_spgeam_bufferSize cusparseDcsrgeam2_bufferSizeExt 135 #endif 136 #endif 137 #else /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */ 138 #if defined(PETSC_USE_COMPLEX) 139 #if defined(PETSC_USE_REAL_SINGLE) 140 #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseCcsrmv((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (cuComplex *)(h), (i), (j), (cuComplex *)(k), (cuComplex *)(l), (cuComplex *)(m)) 141 #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) cusparseCcsrmm((a), (b), (c), (d), (e), (f), (cuComplex *)(g), (h), (cuComplex *)(i), (j), (k), (cuComplex *)(l), (m), (cuComplex *)(n), (cuComplex *)(o), (p)) 142 #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l) cusparseCcsr2csc((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (cuComplex *)(h), (i), (j), (k), (l)) 143 #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h) cusparseChybmv((a), (b), (cuComplex *)(c), (d), (e), (cuComplex *)(f), (cuComplex *)(g), (cuComplex *)(h)) 144 #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j) cusparseCcsr2hyb((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (h), (i), (j)) 145 #define cusparse_hyb2csr(a, b, c, d, e, f) cusparseChyb2csr((a), (b), (c), (cuComplex *)(d), (e), (f)) 146 #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgemm(a, b, c, d, e, f, g, h, (cuComplex *)i, j, k, l, m, (cuComplex *)n, o, p, q, (cuComplex *)r, s, t) 147 #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) cusparseCcsrgeam(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s) 148 #elif defined(PETSC_USE_REAL_DOUBLE) 149 #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseZcsrmv((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (cuDoubleComplex *)(h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l), (cuDoubleComplex *)(m)) 150 #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \ 151 cusparseZcsrmm((a), (b), (c), (d), (e), (f), (cuDoubleComplex *)(g), (h), (cuDoubleComplex *)(i), (j), (k), (cuDoubleComplex *)(l), (m), (cuDoubleComplex *)(n), (cuDoubleComplex *)(o), (p)) 152 #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l) cusparseZcsr2csc((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (cuDoubleComplex *)(h), (i), (j), (k), (l)) 153 #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h) cusparseZhybmv((a), (b), (cuDoubleComplex *)(c), (d), (e), (cuDoubleComplex *)(f), (cuDoubleComplex *)(g), (cuDoubleComplex *)(h)) 154 #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j) cusparseZcsr2hyb((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (h), (i), (j)) 155 #define cusparse_hyb2csr(a, b, c, d, e, f) cusparseZhyb2csr((a), (b), (c), (cuDoubleComplex *)(d), (e), (f)) 156 #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseZcsrgemm(a, b, c, d, e, f, g, h, (cuDoubleComplex *)i, j, k, l, m, (cuDoubleComplex *)n, o, p, q, (cuDoubleComplex *)r, s, t) 157 #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) \ 158 cusparseZcsrgeam(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s) 159 #endif 160 #else 161 #if defined(PETSC_USE_REAL_SINGLE) 162 #define cusparse_csr_spmv cusparseScsrmv 163 #define cusparse_csr_spmm cusparseScsrmm 164 #define cusparse_csr2csc cusparseScsr2csc 165 #define cusparse_hyb_spmv cusparseShybmv 166 #define cusparse_csr2hyb cusparseScsr2hyb 167 #define cusparse_hyb2csr cusparseShyb2csr 168 #define cusparse_csr_spgemm cusparseScsrgemm 169 #define cusparse_csr_spgeam cusparseScsrgeam 170 #elif defined(PETSC_USE_REAL_DOUBLE) 171 #define cusparse_csr_spmv cusparseDcsrmv 172 #define cusparse_csr_spmm cusparseDcsrmm 173 #define cusparse_csr2csc cusparseDcsr2csc 174 #define cusparse_hyb_spmv cusparseDhybmv 175 #define cusparse_csr2hyb cusparseDcsr2hyb 176 #define cusparse_hyb2csr cusparseDhyb2csr 177 #define cusparse_csr_spgemm cusparseDcsrgemm 178 #define cusparse_csr_spgeam cusparseDcsrgeam 179 #endif 180 #endif 181 #endif /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */ 182 183 #define THRUSTINTARRAY32 thrust::device_vector<int> 184 #define THRUSTINTARRAY thrust::device_vector<PetscInt> 185 #define THRUSTARRAY thrust::device_vector<PetscScalar> 186 187 /* A CSR matrix structure */ 188 struct CsrMatrix { 189 PetscInt num_rows; 190 PetscInt num_cols; 191 PetscInt num_entries; 192 THRUSTINTARRAY32 *row_offsets; 193 THRUSTINTARRAY32 *column_indices; 194 THRUSTARRAY *values; 195 }; 196 197 /* This is struct holding the relevant data needed to a MatSolve */ 198 struct Mat_SeqAIJCUSPARSETriFactorStruct { 199 /* Data needed for triangular solve */ 200 cusparseMatDescr_t descr; 201 cusparseOperation_t solveOp; 202 CsrMatrix *csrMat; 203 #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) 204 csrsvInfo_t solveInfo; 205 cusparseSolvePolicy_t solvePolicy; /* whether level information is generated and used */ 206 #endif 207 int solveBufferSize; 208 void *solveBuffer; 209 size_t csr2cscBufferSize; /* to transpose the triangular factor (only used for CUDA >= 11.0) */ 210 void *csr2cscBuffer; 211 PetscScalar *AA_h; /* managed host buffer for moving values to the GPU */ 212 }; 213 214 /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and any indices used in a reordering */ 215 struct Mat_SeqAIJCUSPARSETriFactors { 216 #if PETSC_PKG_CUDA_VERSION_LT(11, 4, 0) 217 Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */ 218 Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */ 219 Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */ 220 Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/ 221 #endif 222 223 THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */ 224 THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */ 225 THRUSTARRAY *workVector; 226 cusparseHandle_t handle; /* a handle to the cusparse library */ 227 PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */ 228 cudaDeviceProp dev_prop; 229 PetscBool init_dev_prop; 230 231 PetscBool factorizeOnDevice; /* Do factorization on device or not */ 232 #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) 233 /* csrilu0/csric0 appeared in cusparse-8.0, but we use it along with cusparseSpSV, 234 which first appeared in cusparse-11.5 with cuda-11.3. 235 */ 236 PetscScalar *csrVal, *diag; // the diagonal D in UtDU of Cholesky 237 int *csrRowPtr32, *csrColIdx32; // i,j of M. cusparseScsrilu02/ic02() etc require 32-bit indices 238 239 PetscInt *csrRowPtr, *csrColIdx; // i, j of M on device for CUDA APIs that support 64-bit indices 240 PetscScalar *csrVal_h, *diag_h; // Since LU is done on host, we prepare a factored matrix in regular csr format on host and then copy it to device 241 PetscInt *csrRowPtr_h; // csrColIdx_h is temporary, so it is not here 242 243 /* Mixed mat descriptor types? yes, different cusparse APIs use different types */ 244 cusparseMatDescr_t matDescr_M; 245 cusparseSpMatDescr_t spMatDescr_L, spMatDescr_U; 246 cusparseSpSVDescr_t spsvDescr_L, spsvDescr_Lt, spsvDescr_U, spsvDescr_Ut; 247 248 cusparseDnVecDescr_t dnVecDescr_X, dnVecDescr_Y; 249 PetscScalar *X, *Y; /* data array of dnVec X and Y */ 250 251 /* Mixed size types? yes, CUDA-11.7.0 declared cusparseDcsrilu02_bufferSizeExt() that returns size_t but did not implement it! */ 252 int factBufferSize_M; /* M ~= LU or LLt */ 253 size_t spsvBufferSize_L, spsvBufferSize_Lt, spsvBufferSize_U, spsvBufferSize_Ut; 254 /* cusparse needs various buffers for factorization and solve of L, U, Lt, or Ut. 255 So save memory, we share the factorization buffer with one of spsvBuffer_L/U. 256 */ 257 void *factBuffer_M, *spsvBuffer_L, *spsvBuffer_U, *spsvBuffer_Lt, *spsvBuffer_Ut; 258 259 csrilu02Info_t ilu0Info_M; 260 csric02Info_t ic0Info_M; 261 int structural_zero, numerical_zero; 262 cusparseSolvePolicy_t policy_M; 263 264 /* In MatSolveTranspose() for ILU0, we use the two flags to do on-demand solve */ 265 PetscBool createdTransposeSpSVDescr; /* Have we created SpSV descriptors for Lt, Ut? */ 266 PetscBool updatedTransposeSpSVAnalysis; /* Have we updated SpSV analysis with the latest L, U values? */ 267 268 PetscLogDouble numericFactFlops; /* Estimated FLOPs in ILU0/ICC0 numeric factorization */ 269 #endif 270 }; 271 272 struct Mat_CusparseSpMV { 273 PetscBool initialized; /* Don't rely on spmvBuffer != NULL to test if the struct is initialized, */ 274 size_t spmvBufferSize; /* since I'm not sure if smvBuffer can be NULL even after cusparseSpMV_bufferSize() */ 275 void *spmvBuffer; 276 #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* these are present from CUDA 10.1, but PETSc code makes use of them from CUDA 11 on */ 277 cusparseDnVecDescr_t vecXDescr, vecYDescr; /* descriptor for the dense vectors in y=op(A)x */ 278 #endif 279 }; 280 281 /* This is struct holding the relevant data needed to a MatMult */ 282 struct Mat_SeqAIJCUSPARSEMultStruct { 283 void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */ 284 cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */ 285 THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */ 286 PetscScalar *alpha_one; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */ 287 PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/ 288 PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */ 289 #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) 290 cusparseSpMatDescr_t matDescr; /* descriptor for the matrix, used by SpMV and SpMM */ 291 Mat_CusparseSpMV cuSpMV[3]; /* different Mat_CusparseSpMV structs for non-transpose, transpose, conj-transpose */ 292 Mat_SeqAIJCUSPARSEMultStruct() : matDescr(NULL) 293 { 294 for (int i = 0; i < 3; i++) cuSpMV[i].initialized = PETSC_FALSE; 295 } 296 #endif 297 }; 298 299 /* This is a larger struct holding all the matrices for a SpMV, and SpMV Transpose */ 300 struct Mat_SeqAIJCUSPARSE { 301 Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */ 302 Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */ 303 THRUSTARRAY *workVector; /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */ 304 THRUSTINTARRAY32 *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */ 305 PetscInt nrows; /* number of rows of the matrix seen by GPU */ 306 MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */ 307 PetscBool use_cpu_solve; /* Use AIJ_Seq (I)LU solve */ 308 cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */ 309 cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */ 310 PetscObjectState nonzerostate; /* track nonzero state to possibly recreate the GPU matrix */ 311 #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) 312 size_t csr2cscBufferSize; /* stuff used to compute the matTranspose above */ 313 void *csr2cscBuffer; /* This is used as a C struct and is calloc'ed by PetscNew() */ 314 cusparseCsr2CscAlg_t csr2cscAlg; /* algorithms can be selected from command line options */ 315 cusparseSpMVAlg_t spmvAlg; 316 cusparseSpMMAlg_t spmmAlg; 317 #endif 318 THRUSTINTARRAY *csr2csc_i; 319 THRUSTINTARRAY *coords; /* permutation array used in MatSeqAIJCUSPARSEMergeMats */ 320 }; 321 322 typedef struct Mat_SeqAIJCUSPARSETriFactors *Mat_SeqAIJCUSPARSETriFactors_p; 323 324 PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat); 325 PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat, Mat, MatReuse, Mat *); 326 PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *); 327 328 using VecSeq_CUDA = Petsc::vec::cupm::impl::VecSeq_CUPM<Petsc::device::cupm::DeviceType::CUDA>; 329 330 static inline bool isCudaMem(const void *data) 331 { 332 using namespace Petsc::device::cupm; 333 auto mtype = PETSC_MEMTYPE_HOST; 334 335 PetscFunctionBegin; 336 PetscCallAbort(PETSC_COMM_SELF, impl::Interface<DeviceType::CUDA>::PetscCUPMGetMemType(data, &mtype)); 337 PetscFunctionReturn(PetscMemTypeDevice(mtype)); 338 } 339 340 #endif // PETSC_CUSPARSEMATIMPL_H 341