xref: /petsc/src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h (revision 9371c9d470a9602b6d10a8bf50c9b2280a79e45a)
1 #if !defined(CUSPARSEMATIMPL)
2 #define CUSPARSEMATIMPL
3 
4 #include <petscpkg_version.h>
5 #include <petsc/private/cudavecimpl.h>
6 #include <petscaijdevice.h>
7 
8 #include <cusparse_v2.h>
9 
10 #include <algorithm>
11 #include <vector>
12 
13 #include <thrust/device_vector.h>
14 #include <thrust/device_ptr.h>
15 #include <thrust/device_malloc_allocator.h>
16 #include <thrust/transform.h>
17 #include <thrust/functional.h>
18 #include <thrust/sequence.h>
19 #include <thrust/system/system_error.h>
20 
21 #define PetscCallThrust(body) \
22   do { \
23     try { \
24       body; \
25     } catch (thrust::system_error & e) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Error in Thrust %s", e.what()); } \
26   } while (0)
27 
28 #if defined(PETSC_USE_COMPLEX)
29 #if defined(PETSC_USE_REAL_SINGLE)
30 const cuComplex PETSC_CUSPARSE_ONE  = {1.0f, 0.0f};
31 const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
32 #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i)  cusparseCcsrilu02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
33 #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
34 #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j)          cusparseCcsrilu02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
35 #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i)   cusparseCcsric02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
36 #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j)  cusparseCcsric02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
37 #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j)           cusparseCcsric02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
38 #elif defined(PETSC_USE_REAL_DOUBLE)
39 const cuDoubleComplex PETSC_CUSPARSE_ONE  = {1.0, 0.0};
40 const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
41 #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i)  cusparseZcsrilu02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
42 #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
43 #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j)          cusparseZcsrilu02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
44 #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i)   cusparseZcsric02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
45 #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j)  cusparseZcsric02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
46 #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j)           cusparseZcsric02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
47 #endif
48 #else
49 const PetscScalar PETSC_CUSPARSE_ONE  = 1.0;
50 const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
51 #if defined(PETSC_USE_REAL_SINGLE)
52 #define cusparseXcsrilu02_bufferSize cusparseScsrilu02_bufferSize
53 #define cusparseXcsrilu02_analysis   cusparseScsrilu02_analysis
54 #define cusparseXcsrilu02            cusparseScsrilu02
55 #define cusparseXcsric02_bufferSize  cusparseScsric02_bufferSize
56 #define cusparseXcsric02_analysis    cusparseScsric02_analysis
57 #define cusparseXcsric02             cusparseScsric02
58 #elif defined(PETSC_USE_REAL_DOUBLE)
59 #define cusparseXcsrilu02_bufferSize cusparseDcsrilu02_bufferSize
60 #define cusparseXcsrilu02_analysis   cusparseDcsrilu02_analysis
61 #define cusparseXcsrilu02            cusparseDcsrilu02
62 #define cusparseXcsric02_bufferSize  cusparseDcsric02_bufferSize
63 #define cusparseXcsric02_analysis    cusparseDcsric02_analysis
64 #define cusparseXcsric02             cusparseDcsric02
65 #endif
66 #endif
67 
68 #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
69 #define csrsvInfo_t              csrsv2Info_t
70 #define cusparseCreateCsrsvInfo  cusparseCreateCsrsv2Info
71 #define cusparseDestroyCsrsvInfo cusparseDestroyCsrsv2Info
72 #if defined(PETSC_USE_COMPLEX)
73 #if defined(PETSC_USE_REAL_SINGLE)
74 #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j)          cusparseCcsrsv2_bufferSize(a, b, c, d, e, (cuComplex *)(f), g, h, i, j)
75 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k)       cusparseCcsrsv2_analysis(a, b, c, d, e, (const cuComplex *)(f), g, h, i, j, k)
76 #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseCcsrsv2_solve(a, b, c, d, (const cuComplex *)(e), f, (const cuComplex *)(g), h, i, j, (const cuComplex *)(k), (cuComplex *)(l), m, n)
77 #elif defined(PETSC_USE_REAL_DOUBLE)
78 #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j)          cusparseZcsrsv2_bufferSize(a, b, c, d, e, (cuDoubleComplex *)(f), g, h, i, j)
79 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k)       cusparseZcsrsv2_analysis(a, b, c, d, e, (const cuDoubleComplex *)(f), g, h, i, j, k)
80 #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseZcsrsv2_solve(a, b, c, d, (const cuDoubleComplex *)(e), f, (const cuDoubleComplex *)(g), h, i, j, (const cuDoubleComplex *)(k), (cuDoubleComplex *)(l), m, n)
81 #endif
82 #else /* not complex */
83 #if defined(PETSC_USE_REAL_SINGLE)
84 #define cusparseXcsrsv_buffsize cusparseScsrsv2_bufferSize
85 #define cusparseXcsrsv_analysis cusparseScsrsv2_analysis
86 #define cusparseXcsrsv_solve    cusparseScsrsv2_solve
87 #elif defined(PETSC_USE_REAL_DOUBLE)
88 #define cusparseXcsrsv_buffsize cusparseDcsrsv2_bufferSize
89 #define cusparseXcsrsv_analysis cusparseDcsrsv2_analysis
90 #define cusparseXcsrsv_solve    cusparseDcsrsv2_solve
91 #endif
92 #endif
93 #else
94 #define csrsvInfo_t              cusparseSolveAnalysisInfo_t
95 #define cusparseCreateCsrsvInfo  cusparseCreateSolveAnalysisInfo
96 #define cusparseDestroyCsrsvInfo cusparseDestroySolveAnalysisInfo
97 #if defined(PETSC_USE_COMPLEX)
98 #if defined(PETSC_USE_REAL_SINGLE)
99 #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k) cusparseCcsrsv_solve((a), (b), (c), (cuComplex *)(d), (e), (cuComplex *)(f), (g), (h), (i), (cuComplex *)(j), (cuComplex *)(k))
100 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i)    cusparseCcsrsv_analysis((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (h), (i))
101 #elif defined(PETSC_USE_REAL_DOUBLE)
102 #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k) cusparseZcsrsv_solve((a), (b), (c), (cuDoubleComplex *)(d), (e), (cuDoubleComplex *)(f), (g), (h), (i), (cuDoubleComplex *)(j), (cuDoubleComplex *)(k))
103 #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i)    cusparseZcsrsv_analysis((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (h), (i))
104 #endif
105 #else /* not complex */
106 #if defined(PETSC_USE_REAL_SINGLE)
107 #define cusparseXcsrsv_solve    cusparseScsrsv_solve
108 #define cusparseXcsrsv_analysis cusparseScsrsv_analysis
109 #elif defined(PETSC_USE_REAL_DOUBLE)
110 #define cusparseXcsrsv_solve    cusparseDcsrsv_solve
111 #define cusparseXcsrsv_analysis cusparseDcsrsv_analysis
112 #endif
113 #endif
114 #endif
115 
116 #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
117 #define cusparse_csr2csc cusparseCsr2cscEx2
118 #if defined(PETSC_USE_COMPLEX)
119 #if defined(PETSC_USE_REAL_SINGLE)
120 #define cusparse_scalartype                                                             CUDA_C_32F
121 #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgeam2(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
122 #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
123   cusparseCcsrgeam2_bufferSizeExt(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
124 #elif defined(PETSC_USE_REAL_DOUBLE)
125 #define cusparse_scalartype CUDA_C_64F
126 #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
127   cusparseZcsrgeam2(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
128 #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
129   cusparseZcsrgeam2_bufferSizeExt(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
130 #endif
131 #else /* not complex */
132 #if defined(PETSC_USE_REAL_SINGLE)
133 #define cusparse_scalartype            CUDA_R_32F
134 #define cusparse_csr_spgeam            cusparseScsrgeam2
135 #define cusparse_csr_spgeam_bufferSize cusparseScsrgeam2_bufferSizeExt
136 #elif defined(PETSC_USE_REAL_DOUBLE)
137 #define cusparse_scalartype            CUDA_R_64F
138 #define cusparse_csr_spgeam            cusparseDcsrgeam2
139 #define cusparse_csr_spgeam_bufferSize cusparseDcsrgeam2_bufferSizeExt
140 #endif
141 #endif
142 #else
143 #if defined(PETSC_USE_COMPLEX)
144 #if defined(PETSC_USE_REAL_SINGLE)
145 #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m)                        cusparseCcsrmv((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (cuComplex *)(h), (i), (j), (cuComplex *)(k), (cuComplex *)(l), (cuComplex *)(m))
146 #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p)               cusparseCcsrmm((a), (b), (c), (d), (e), (f), (cuComplex *)(g), (h), (cuComplex *)(i), (j), (k), (cuComplex *)(l), (m), (cuComplex *)(n), (cuComplex *)(o), (p))
147 #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l)                            cusparseCcsr2csc((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (cuComplex *)(h), (i), (j), (k), (l))
148 #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h)                                       cusparseChybmv((a), (b), (cuComplex *)(c), (d), (e), (cuComplex *)(f), (cuComplex *)(g), (cuComplex *)(h))
149 #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j)                                  cusparseCcsr2hyb((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (h), (i), (j))
150 #define cusparse_hyb2csr(a, b, c, d, e, f)                                              cusparseChyb2csr((a), (b), (c), (cuComplex *)(d), (e), (f))
151 #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgemm(a, b, c, d, e, f, g, h, (cuComplex *)i, j, k, l, m, (cuComplex *)n, o, p, q, (cuComplex *)r, s, t)
152 #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s)    cusparseCcsrgeam(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s)
153 #elif defined(PETSC_USE_REAL_DOUBLE)
154 #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseZcsrmv((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (cuDoubleComplex *)(h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l), (cuDoubleComplex *)(m))
155 #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
156   cusparseZcsrmm((a), (b), (c), (d), (e), (f), (cuDoubleComplex *)(g), (h), (cuDoubleComplex *)(i), (j), (k), (cuDoubleComplex *)(l), (m), (cuDoubleComplex *)(n), (cuDoubleComplex *)(o), (p))
157 #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l)                            cusparseZcsr2csc((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (cuDoubleComplex *)(h), (i), (j), (k), (l))
158 #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h)                                       cusparseZhybmv((a), (b), (cuDoubleComplex *)(c), (d), (e), (cuDoubleComplex *)(f), (cuDoubleComplex *)(g), (cuDoubleComplex *)(h))
159 #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j)                                  cusparseZcsr2hyb((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (h), (i), (j))
160 #define cusparse_hyb2csr(a, b, c, d, e, f)                                              cusparseZhyb2csr((a), (b), (c), (cuDoubleComplex *)(d), (e), (f))
161 #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseZcsrgemm(a, b, c, d, e, f, g, h, (cuDoubleComplex *)i, j, k, l, m, (cuDoubleComplex *)n, o, p, q, (cuDoubleComplex *)r, s, t)
162 #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) \
163   cusparseZcsrgeam(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s)
164 #endif
165 #else
166 #if defined(PETSC_USE_REAL_SINGLE)
167 #define cusparse_csr_spmv   cusparseScsrmv
168 #define cusparse_csr_spmm   cusparseScsrmm
169 #define cusparse_csr2csc    cusparseScsr2csc
170 #define cusparse_hyb_spmv   cusparseShybmv
171 #define cusparse_csr2hyb    cusparseScsr2hyb
172 #define cusparse_hyb2csr    cusparseShyb2csr
173 #define cusparse_csr_spgemm cusparseScsrgemm
174 #define cusparse_csr_spgeam cusparseScsrgeam
175 #elif defined(PETSC_USE_REAL_DOUBLE)
176 #define cusparse_csr_spmv   cusparseDcsrmv
177 #define cusparse_csr_spmm   cusparseDcsrmm
178 #define cusparse_csr2csc    cusparseDcsr2csc
179 #define cusparse_hyb_spmv   cusparseDhybmv
180 #define cusparse_csr2hyb    cusparseDcsr2hyb
181 #define cusparse_hyb2csr    cusparseDhyb2csr
182 #define cusparse_csr_spgemm cusparseDcsrgemm
183 #define cusparse_csr_spgeam cusparseDcsrgeam
184 #endif
185 #endif
186 #endif
187 
188 #define THRUSTINTARRAY32 thrust::device_vector<int>
189 #define THRUSTINTARRAY   thrust::device_vector<PetscInt>
190 #define THRUSTARRAY      thrust::device_vector<PetscScalar>
191 
192 /* A CSR matrix structure */
193 struct CsrMatrix {
194   PetscInt          num_rows;
195   PetscInt          num_cols;
196   PetscInt          num_entries;
197   THRUSTINTARRAY32 *row_offsets;
198   THRUSTINTARRAY32 *column_indices;
199   THRUSTARRAY      *values;
200 };
201 
202 /* This is struct holding the relevant data needed to a MatSolve */
203 struct Mat_SeqAIJCUSPARSETriFactorStruct {
204   /* Data needed for triangular solve */
205   cusparseMatDescr_t    descr;
206   cusparseOperation_t   solveOp;
207   CsrMatrix            *csrMat;
208   csrsvInfo_t           solveInfo;
209   cusparseSolvePolicy_t solvePolicy; /* whether level information is generated and used */
210   int                   solveBufferSize;
211   void                 *solveBuffer;
212   size_t                csr2cscBufferSize; /* to transpose the triangular factor (only used for CUDA >= 11.0) */
213   void                 *csr2cscBuffer;
214   PetscScalar          *AA_h; /* managed host buffer for moving values to the GPU */
215 };
216 
217 /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and any indices used in a reordering */
218 struct Mat_SeqAIJCUSPARSETriFactors {
219   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr;          /* pointer for lower triangular (factored matrix) on GPU */
220   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr;          /* pointer for upper triangular (factored matrix) on GPU */
221   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
222   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
223   THRUSTINTARRAY                    *rpermIndices;            /* indices used for any reordering */
224   THRUSTINTARRAY                    *cpermIndices;            /* indices used for any reordering */
225   THRUSTARRAY                       *workVector;
226   cusparseHandle_t                   handle;   /* a handle to the cusparse library */
227   PetscInt                           nnz;      /* number of nonzeros ... need this for accurate logging between ICC and ILU */
228   PetscScalar                       *a_band_d; /* GPU data for banded CSR LU factorization matrix diag(L)=1 */
229   int                               *i_band_d; /* this could be optimized away */
230   cudaDeviceProp                     dev_prop;
231   PetscBool                          init_dev_prop;
232 
233   /* csrilu0/csric0 appeared in cusparse-8.0, but we use it along with cusparseSpSV,
234      which first appeared in cusparse-11.5 with cuda-11.3.
235   */
236   PetscBool factorizeOnDevice; /* Do factorization on device or not */
237 #if CUSPARSE_VERSION >= 11500
238   PetscScalar *csrVal;
239   int         *csrRowPtr, *csrColIdx; /* a,i,j of M. Using int since some cusparse APIs only support 32-bit indices */
240 
241   /* Mixed mat descriptor types? yes, different cusparse APIs use different types */
242   cusparseMatDescr_t   matDescr_M;
243   cusparseSpMatDescr_t spMatDescr_L, spMatDescr_U;
244   cusparseSpSVDescr_t  spsvDescr_L, spsvDescr_Lt, spsvDescr_U, spsvDescr_Ut;
245 
246   cusparseDnVecDescr_t dnVecDescr_X, dnVecDescr_Y;
247   PetscScalar         *X, *Y; /* data array of dnVec X and Y */
248 
249   /* Mixed size types? yes, CUDA-11.7.0 declared cusparseDcsrilu02_bufferSizeExt() that returns size_t but did not implement it! */
250   int    factBufferSize_M; /* M ~= LU or LLt */
251   size_t spsvBufferSize_L, spsvBufferSize_Lt, spsvBufferSize_U, spsvBufferSize_Ut;
252   /* cusparse needs various buffers for factorization and solve of L, U, Lt, or Ut.
253      So save memory, we share the factorization buffer with one of spsvBuffer_L/U.
254   */
255   void  *factBuffer_M, *spsvBuffer_L, *spsvBuffer_U, *spsvBuffer_Lt, *spsvBuffer_Ut;
256 
257   csrilu02Info_t        ilu0Info_M;
258   csric02Info_t         ic0Info_M;
259   int                   structural_zero, numerical_zero;
260   cusparseSolvePolicy_t policy_M;
261 
262   /* In MatSolveTranspose() for ILU0, we use the two flags to do on-demand solve */
263   PetscBool createdTransposeSpSVDescr;    /* Have we created SpSV descriptors for Lt, Ut? */
264   PetscBool updatedTransposeSpSVAnalysis; /* Have we updated SpSV analysis with the latest L, U values? */
265 
266   PetscLogDouble numericFactFlops; /* Estimated FLOPs in ILU0/ICC0 numeric factorization */
267 #endif
268 };
269 
270 struct Mat_CusparseSpMV {
271   PetscBool initialized;    /* Don't rely on spmvBuffer != NULL to test if the struct is initialized, */
272   size_t    spmvBufferSize; /* since I'm not sure if smvBuffer can be NULL even after cusparseSpMV_bufferSize() */
273   void     *spmvBuffer;
274 #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)      /* these are present from CUDA 10.1, but PETSc code makes use of them from CUDA 11 on */
275   cusparseDnVecDescr_t vecXDescr, vecYDescr; /* descriptor for the dense vectors in y=op(A)x */
276 #endif
277 };
278 
279 /* This is struct holding the relevant data needed to a MatMult */
280 struct Mat_SeqAIJCUSPARSEMultStruct {
281   void              *mat;          /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
282   cusparseMatDescr_t descr;        /* Data needed to describe the matrix for a multiply */
283   THRUSTINTARRAY    *cprowIndices; /* compressed row indices used in the parallel SpMV */
284   PetscScalar       *alpha_one;    /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
285   PetscScalar       *beta_zero;    /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
286   PetscScalar       *beta_one;     /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
287 #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
288   cusparseSpMatDescr_t matDescr;  /* descriptor for the matrix, used by SpMV and SpMM */
289   Mat_CusparseSpMV     cuSpMV[3]; /* different Mat_CusparseSpMV structs for non-transpose, transpose, conj-transpose */
290   Mat_SeqAIJCUSPARSEMultStruct() : matDescr(NULL) {
291     for (int i = 0; i < 3; i++) cuSpMV[i].initialized = PETSC_FALSE;
292   }
293 #endif
294 };
295 
296 /* This is a larger struct holding all the matrices for a SpMV, and SpMV Transpose */
297 struct Mat_SeqAIJCUSPARSE {
298   Mat_SeqAIJCUSPARSEMultStruct *mat;            /* pointer to the matrix on the GPU */
299   Mat_SeqAIJCUSPARSEMultStruct *matTranspose;   /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
300   THRUSTARRAY                  *workVector;     /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
301   THRUSTINTARRAY32             *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
302   PetscInt                      nrows;          /* number of rows of the matrix seen by GPU */
303   MatCUSPARSEStorageFormat      format;         /* the storage format for the matrix on the device */
304   PetscBool                     use_cpu_solve;  /* Use AIJ_Seq (I)LU solve */
305   cudaStream_t                  stream;         /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
306   cusparseHandle_t              handle;         /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
307   PetscObjectState              nonzerostate;   /* track nonzero state to possibly recreate the GPU matrix */
308 #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
309   size_t               csr2cscBufferSize; /* stuff used to compute the matTranspose above */
310   void                *csr2cscBuffer;     /* This is used as a C struct and is calloc'ed by PetscNewLog() */
311   cusparseCsr2CscAlg_t csr2cscAlg;        /* algorithms can be selected from command line options */
312   cusparseSpMVAlg_t    spmvAlg;
313   cusparseSpMMAlg_t    spmmAlg;
314 #endif
315   THRUSTINTARRAY            *csr2csc_i;
316   PetscSplitCSRDataStructure deviceMat; /* Matrix on device for, eg, assembly */
317 
318   /* Stuff for basic COO support */
319   THRUSTINTARRAY *cooPerm;   /* permutation array that sorts the input coo entris by row and col */
320   THRUSTINTARRAY *cooPerm_a; /* ordered array that indicate i-th nonzero (after sorting) is the j-th unique nonzero */
321 
322   /* Stuff for extended COO support */
323   PetscBool   use_extended_coo; /* Use extended COO format */
324   PetscCount *jmap_d;           /* perm[disp+jmap[i]..disp+jmap[i+1]) gives indices of entries in v[] associated with i-th nonzero of the matrix */
325   PetscCount *perm_d;
326 
327   Mat_SeqAIJCUSPARSE() : use_extended_coo(PETSC_FALSE), perm_d(NULL), jmap_d(NULL) { }
328 };
329 
330 PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
331 PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(Mat, PetscCount, PetscInt[], PetscInt[]);
332 PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE_Basic(Mat, const PetscScalar[], InsertMode);
333 PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat, Mat, MatReuse, Mat *);
334 PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *);
335 
336 static inline bool isCudaMem(const void *data) {
337   cudaError_t                  cerr;
338   struct cudaPointerAttributes attr;
339   enum cudaMemoryType          mtype;
340   cerr = cudaPointerGetAttributes(&attr, data); /* Do not check error since before CUDA 11.0, passing a host pointer returns cudaErrorInvalidValue */
341   cudaGetLastError();                           /* Reset the last error */
342 #if (CUDART_VERSION < 10000)
343   mtype = attr.memoryType;
344 #else
345   mtype = attr.type;
346 #endif
347   if (cerr == cudaSuccess && mtype == cudaMemoryTypeDevice) return true;
348   else return false;
349 }
350 
351 #endif
352