xref: /petsc/src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h (revision 966be33a19c9230d4aa438248a644248d45cc287)
1 #if !defined(__CUSPARSEMATIMPL)
2 #define __CUSPARSEMATIMPL
3 
4 #include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
5 
6 #if CUDA_VERSION>=4020
7 #include <cusparse_v2.h>
8 #else
9 #include <cusparse.h>
10 #endif
11 
12 #include <algorithm>
13 #include <vector>
14 
15 #include <thrust/device_vector.h>
16 #include <thrust/device_ptr.h>
17 #include <thrust/transform.h>
18 #include <thrust/functional.h>
19 
20 #if defined(PETSC_USE_COMPLEX)
21 #if defined(PETSC_USE_REAL_SINGLE)
22 #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k)        cusparseCcsrsv_solve((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h),(i),(cuComplex*)(j),(cuComplex*)(k))
23 #define cusparse_analysis(a,b,c,d,e,f,g,h,i)         cusparseCcsrsv_analysis((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(h),(i))
24 #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseCcsrmv((a),(b),(c),(d),(e),(cuComplex*)(f),(g),(cuComplex*)(h),(i),(j),(cuComplex*)(k),(cuComplex*)(l),(cuComplex*)(m))
25 #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l)    cusparseCcsr2csc((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j),(k),(l))
26 #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h)           cusparseChybmv((a),(b),(cuComplex*)(c),(d),(e),(cuComplex*)(f),(cuComplex*)(g),(cuComplex*)(h))
27 #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j)        cusparseCcsr2hyb((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(h),(i),(j))
28 #define cusparse_hyb2csr(a,b,c,d,e,f)                cusparseChyb2csr((a),(b),(c),(cuComplex*)(d),(e),(f))
29 cuFloatComplex ALPHA = {1.0f, 0.0f};
30 cuFloatComplex BETA  = {0.0f, 0.0f};
31 #elif defined(PETSC_USE_REAL_DOUBLE)
32 #define cusparse_solve(a,b,c,d,e,f,g,h,i,j,k)        cusparseZcsrsv_solve((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h),(i),(cuDoubleComplex*)(j),(cuDoubleComplex*)(k))
33 #define cusparse_analysis(a,b,c,d,e,f,g,h,i)         cusparseZcsrsv_analysis((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(h),(i))
34 #define cusparse_csr_spmv(a,b,c,d,e,f,g,h,i,j,k,l,m) cusparseZcsrmv((a),(b),(c),(d),(e),(cuDoubleComplex*)(f),(g),(cuDoubleComplex*)(h),(i),(j),(cuDoubleComplex*)(k),(cuDoubleComplex*)(l),(cuDoubleComplex*)(m))
35 #define cusparse_csr2csc(a,b,c,d,e,f,g,h,i,j,k,l)    cusparseZcsr2csc((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j),(k),(l))
36 #define cusparse_hyb_spmv(a,b,c,d,e,f,g,h)           cusparseZhybmv((a),(b),(cuDoubleComplex*)(c),(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(cuDoubleComplex*)(h))
37 #define cusparse_csr2hyb(a,b,c,d,e,f,g,h,i,j)        cusparseZcsr2hyb((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(h),(i),(j))
38 #define cusparse_hyb2csr(a,b,c,d,e,f)                cusparseZhyb2csr((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
39 cuDoubleComplex ALPHA = {1.0, 0.0};
40 cuDoubleComplex BETA  = {0.0, 0.0};
41 #endif
42 #else
43 PetscScalar ALPHA = 1.0;
44 PetscScalar BETA  = 0.0;
45 #if defined(PETSC_USE_REAL_SINGLE)
46 #define cusparse_solve    cusparseScsrsv_solve
47 #define cusparse_analysis cusparseScsrsv_analysis
48 #define cusparse_csr_spmv cusparseScsrmv
49 #define cusparse_csr2csc  cusparseScsr2csc
50 #define cusparse_hyb_spmv cusparseShybmv
51 #define cusparse_csr2hyb  cusparseScsr2hyb
52 #define cusparse_hyb2csr  cusparseShyb2csr
53 #elif defined(PETSC_USE_REAL_DOUBLE)
54 #define cusparse_solve    cusparseDcsrsv_solve
55 #define cusparse_analysis cusparseDcsrsv_analysis
56 #define cusparse_csr_spmv cusparseDcsrmv
57 #define cusparse_csr2csc  cusparseDcsr2csc
58 #define cusparse_hyb_spmv cusparseDhybmv
59 #define cusparse_csr2hyb  cusparseDcsr2hyb
60 #define cusparse_hyb2csr  cusparseDhyb2csr
61 #endif
62 #endif
63 
64 #define THRUSTINTARRAY32 thrust::device_vector<int>
65 #define THRUSTINTARRAY thrust::device_vector<PetscInt>
66 #define THRUSTARRAY thrust::device_vector<PetscScalar>
67 
68 /* A CSR matrix structure */
69 struct CsrMatrix {
70   PetscInt         num_rows;
71   PetscInt         num_cols;
72   PetscInt         num_entries;
73   THRUSTINTARRAY32 *row_offsets;
74   THRUSTINTARRAY32 *column_indices;
75   THRUSTARRAY      *values;
76 };
77 
78 //#define CUSPMATRIXCSR32 cusp::csr_matrix<int,PetscScalar,cusp::device_memory>
79 
80 /* This is struct holding the relevant data needed to a MatSolve */
81 struct Mat_SeqAIJCUSPARSETriFactorStruct {
82   /* Data needed for triangular solve */
83   cusparseMatDescr_t          descr;
84   cusparseSolveAnalysisInfo_t solveInfo;
85   cusparseOperation_t         solveOp;
86   CsrMatrix                   *csrMat;
87 };
88 
89 /* This is struct holding the relevant data needed to a MatMult */
90 struct Mat_SeqAIJCUSPARSEMultStruct {
91   void               *mat;  /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
92   cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
93   THRUSTINTARRAY     *cprowIndices;   /* compressed row indices used in the parallel SpMV */
94   PetscScalar        *alpha; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
95   PetscScalar        *beta; /* pointer to a device "scalar" storing the beta parameter in the SpMV */
96 };
97 
98 /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and
99  any indices used in a reordering */
100 struct Mat_SeqAIJCUSPARSETriFactors {
101   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
102   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
103   Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
104   Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
105   THRUSTINTARRAY                    *rpermIndices;  /* indices used for any reordering */
106   THRUSTINTARRAY                    *cpermIndices;  /* indices used for any reordering */
107   THRUSTARRAY                       *workVector;
108   cusparseHandle_t                  handle;   /* a handle to the cusparse library */
109   PetscInt                          nnz;      /* number of nonzeros ... need this for accurate logging between ICC and ILU */
110 };
111 
112 /* This is a larger struct holding all the matrices for a SpMV, and SpMV Tranpose */
113 struct Mat_SeqAIJCUSPARSE {
114   Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
115   Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
116   THRUSTARRAY                  *workVector; /*pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
117   PetscInt                     nonzerorow; /* number of nonzero rows ... used in the flop calculations */
118   MatCUSPARSEStorageFormat     format;   /* the storage format for the matrix on the device */
119   cudaStream_t                 stream;   /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
120   cusparseHandle_t             handle;   /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
121 };
122 
123 PETSC_INTERN PetscErrorCode MatCUSPARSECopyToGPU(Mat);
124 PETSC_INTERN PetscErrorCode MatCUSPARSESetStream(Mat, const cudaStream_t stream);
125 PETSC_INTERN PetscErrorCode MatCUSPARSESetHandle(Mat, const cusparseHandle_t handle);
126 PETSC_INTERN PetscErrorCode MatCUSPARSEClearHandle(Mat);
127 #endif
128