xref: /petsc/src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h (revision 1873fc9fedfc3bd8fc2f82ccd29ab8b44a80f1db)
1 #if !defined(__CUSPARSEMATIMPL)
2 #define __CUSPARSEMATIMPL
3 
4 #include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
5 
6 #include <cusparse_v2.h>
7 
8 #include <algorithm>
9 #include <vector>
10 
11 #include <thrust/device_vector.h>
12 #include <thrust/device_ptr.h>
13 #include <thrust/device_malloc_allocator.h>
14 #include <thrust/transform.h>
15 #include <thrust/functional.h>
16 #include <thrust/sequence.h>
17 
18 #define CHKERRCUSPARSE(stat) \
19 do { \
20    if (PetscUnlikely(stat)) { \
21       const char *name  = cusparseGetErrorName(stat); \
22       const char *descr = cusparseGetErrorString(stat); \
23       SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_LIB,"cuSPARSE error %d (%s) : %s",(int)stat,name,descr); \
24    } \
25 } while(0)
26 
27 #if defined(PETSC_USE_COMPLEX)
28 #if defined(PETSC_USE_REAL_SINGLE)
29 #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k)        cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k))
30 #define cusparse_analysis(a,b,c,d,e,f,g,h,i)         cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i))
31 #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m))
32 #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l)    cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l))
33 #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h)           cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h))
34 #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j)        cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j))
35 #define cusparse_hyb2csr(a,b,c,d,e,f)                cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f))
36 const cuFloatComplex PETSC_CUSPARSE_ONE  = {1.0f, 0.0f};
37 const cuFloatComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
38 #elif defined(PETSC_USE_REAL_DOUBLE)
39 #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k)        cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k))
40 #define cusparse_analysis(a,b,c,d,e,f,g,h,i)         cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i))
41 #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m))
42 #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l)    cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l))
43 #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h)           cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h))
44 #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j)        cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j))
45 #define cusparse_hyb2csr(a,b,c,d,e,f)                cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
46 const cuDoubleComplex PETSC_CUSPARSE_ONE  = {1.0, 0.0};
47 const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
48 #endif
49 #else
50 const PetscScalar PETSC_CUSPARSE_ONE  = 1.0;
51 const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
52 #if defined(PETSC_USE_REAL_SINGLE)
53 #define cusparse_solve    cusparseScsrsv_solve
54 #define cusparse_analysis cusparseScsrsv_analysis
55 #define cusparse_csr_spmv cusparseScsrmv
56 #define cusparse_csr2csc  cusparseScsr2csc
57 #define cusparse_hyb_spmv cusparseShybmv
58 #define cusparse_csr2hyb  cusparseScsr2hyb
59 #define cusparse_hyb2csr  cusparseShyb2csr
60 #elif defined(PETSC_USE_REAL_DOUBLE)
61 #define cusparse_solve    cusparseDcsrsv_solve
62 #define cusparse_analysis cusparseDcsrsv_analysis
63 #define cusparse_csr_spmv cusparseDcsrmv
64 #define cusparse_csr2csc  cusparseDcsr2csc
65 #define cusparse_hyb_spmv cusparseDhybmv
66 #define cusparse_csr2hyb  cusparseDcsr2hyb
67 #define cusparse_hyb2csr  cusparseDhyb2csr
68 #endif
69 #endif
70 
71 #define THRUSTINTARRAY32 thrust::device_vector<int>
72 #define THRUSTINTARRAY thrust::device_vector<PetscInt>
73 #define THRUSTARRAY thrust::device_vector<PetscScalar>
74 
75 /* A CSR matrix structure */
76 struct CsrMatrix {
77   PetscInt         num_rows;
78   PetscInt         num_cols;
79   PetscInt         num_entries;
80   THRUSTINTARRAY32 *row_offsets;
81   THRUSTINTARRAY32 *column_indices;
82   THRUSTARRAY      *values;
83 };
84 
85 //#define CUSPMATRIXCSR32 cusp::csr_matrix<int,PetscScalar,cusp::device_memory>
86 
87 /* This is struct holding the relevant data needed to a MatSolve */
88 struct Mat_SeqAIJCUSPARSETriFactorStruct {
89   /* Data needed for triangular solve */
90   cusparseMatDescr_t          descr;
91   cusparseSolveAnalysisInfo_t solveInfo;
92   cusparseOperation_t         solveOp;
93   CsrMatrix                   *csrMat;
94 };
95 
96 /* This is struct holding the relevant data needed to a MatMult */
97 struct Mat_SeqAIJCUSPARSEMultStruct {
98   void               *mat;  /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
99   cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
100   THRUSTINTARRAY     *cprowIndices;   /* compressed row indices used in the parallel SpMV */
101   PetscScalar        *alpha; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
102   PetscScalar        *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
103   PetscScalar        *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
104 };
105 
106 /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and
107  any indices used in a reordering */
108 struct Mat_SeqAIJCUSPARSETriFactors {
109   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
110   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
111   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
112   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
113   THRUSTINTARRAY                    *rpermIndices;  /* indices used for any reordering */
114   THRUSTINTARRAY                    *cpermIndices;  /* indices used for any reordering */
115   THRUSTARRAY                       *workVector;
116   cusparseHandle_t                  handle;   /* a handle to the cusparse library */
117   PetscInt                          nnz;      /* number of nonzeros ... need this for accurate logging between ICC and ILU */
118 };
119 
120 /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */
121 struct Mat_SeqAIJCUSPARSE {
122   Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
123   Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
124   THRUSTARRAY                  *workVector; /*pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
125   PetscInt                     nonzerorow; /* number of nonzero rows ... used in the flop calculations */
126   MatCUSPARSEStorageFormat     format;   /* the storage format for the matrix on the device */
127   cudaStream_t                 stream;   /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
128   cusparseHandle_t             handle;   /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
129   PetscObjectState             nonzerostate;
130 };
131 
132 PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
133 PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
134 PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
135 PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
136 #endif
137