19ae82921SPaul Mullowney /* 29ae82921SPaul Mullowney Defines the basic matrix operations for the AIJ (compressed row) 3bc3f50f2SPaul Mullowney matrix storage format using the CUSPARSE library, 49ae82921SPaul Mullowney */ 5dced61a5SBarry Smith #define PETSC_SKIP_SPINLOCK 69ae82921SPaul Mullowney 73d13b8fdSMatthew G. Knepley #include <petscconf.h> 83d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 9087f3262SPaul Mullowney #include <../src/mat/impls/sbaij/seq/sbaij.h> 103d13b8fdSMatthew G. Knepley #include <../src/vec/vec/impls/dvecimpl.h> 11af0996ceSBarry Smith #include <petsc/private/vecimpl.h> 129ae82921SPaul Mullowney #undef VecType 133d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> 14bc3f50f2SPaul Mullowney 15e057df02SPaul Mullowney const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; 169ae82921SPaul Mullowney 17087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 18087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 19087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 20087f3262SPaul Mullowney 216fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 226fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 236fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 24087f3262SPaul Mullowney 256fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); 266fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 276fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 286fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 294416b707SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat); 306fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); 316fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 326fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 336fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 349ae82921SPaul Mullowney 357f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); 367f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); 377f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); 387f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); 397f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); 407f756511SDominic Meiser 419ae82921SPaul Mullowney #undef __FUNCT__ 42b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetStream" 43b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream) 44b06137fdSPaul Mullowney { 45b06137fdSPaul Mullowney cusparseStatus_t stat; 46b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 47b06137fdSPaul Mullowney 48b06137fdSPaul Mullowney PetscFunctionBegin; 49b06137fdSPaul Mullowney cusparsestruct->stream = stream; 50c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUDA(stat); 51b06137fdSPaul Mullowney PetscFunctionReturn(0); 52b06137fdSPaul Mullowney } 53b06137fdSPaul Mullowney 54b06137fdSPaul Mullowney #undef __FUNCT__ 55b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetHandle" 56b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle) 57b06137fdSPaul Mullowney { 58b06137fdSPaul Mullowney cusparseStatus_t stat; 59b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 60b06137fdSPaul Mullowney 61b06137fdSPaul Mullowney PetscFunctionBegin; 62b06137fdSPaul Mullowney if (cusparsestruct->handle) 63c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUDA(stat); 64b06137fdSPaul Mullowney cusparsestruct->handle = handle; 65c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); 66b06137fdSPaul Mullowney PetscFunctionReturn(0); 67b06137fdSPaul Mullowney } 68b06137fdSPaul Mullowney 69b06137fdSPaul Mullowney #undef __FUNCT__ 70b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSEClearHandle" 71b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSEClearHandle(Mat A) 72b06137fdSPaul Mullowney { 73b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 74b06137fdSPaul Mullowney PetscFunctionBegin; 75b06137fdSPaul Mullowney if (cusparsestruct->handle) 76b06137fdSPaul Mullowney cusparsestruct->handle = 0; 77b06137fdSPaul Mullowney PetscFunctionReturn(0); 78b06137fdSPaul Mullowney } 79b06137fdSPaul Mullowney 80b06137fdSPaul Mullowney #undef __FUNCT__ 819ae82921SPaul Mullowney #define __FUNCT__ "MatFactorGetSolverPackage_seqaij_cusparse" 829ae82921SPaul Mullowney PetscErrorCode MatFactorGetSolverPackage_seqaij_cusparse(Mat A,const MatSolverPackage *type) 839ae82921SPaul Mullowney { 849ae82921SPaul Mullowney PetscFunctionBegin; 859ae82921SPaul Mullowney *type = MATSOLVERCUSPARSE; 869ae82921SPaul Mullowney PetscFunctionReturn(0); 879ae82921SPaul Mullowney } 889ae82921SPaul Mullowney 89c708e6cdSJed Brown /*MC 90087f3262SPaul Mullowney MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices 91087f3262SPaul Mullowney on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported 92087f3262SPaul Mullowney algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer 93087f3262SPaul Mullowney performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the 94087f3262SPaul Mullowney CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these 95087f3262SPaul Mullowney algorithms are not recommended. This class does NOT support direct solver operations. 96c708e6cdSJed Brown 979ae82921SPaul Mullowney Level: beginner 98c708e6cdSJed Brown 99c708e6cdSJed Brown .seealso: PCFactorSetMatSolverPackage(), MatSolverPackage, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 100c708e6cdSJed Brown M*/ 1019ae82921SPaul Mullowney 1029ae82921SPaul Mullowney #undef __FUNCT__ 10342c9c57cSBarry Smith #define __FUNCT__ "MatGetFactor_seqaijcusparse_cusparse" 10442c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) 1059ae82921SPaul Mullowney { 1069ae82921SPaul Mullowney PetscErrorCode ierr; 107bc3f50f2SPaul Mullowney PetscInt n = A->rmap->n; 1089ae82921SPaul Mullowney 1099ae82921SPaul Mullowney PetscFunctionBegin; 110bc3f50f2SPaul Mullowney ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); 111404133a2SPaul Mullowney (*B)->factortype = ftype; 112bc3f50f2SPaul Mullowney ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); 1139ae82921SPaul Mullowney ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 1142205254eSKarl Rupp 115087f3262SPaul Mullowney if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { 11633d57670SJed Brown ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); 1179ae82921SPaul Mullowney (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; 1189ae82921SPaul Mullowney (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; 119087f3262SPaul Mullowney } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { 120087f3262SPaul Mullowney (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; 121087f3262SPaul Mullowney (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; 1229ae82921SPaul Mullowney } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); 123bc3f50f2SPaul Mullowney 124fa03d054SJed Brown ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); 12562a20339SJed Brown ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverPackage_C",MatFactorGetSolverPackage_seqaij_cusparse);CHKERRQ(ierr); 1269ae82921SPaul Mullowney PetscFunctionReturn(0); 1279ae82921SPaul Mullowney } 1289ae82921SPaul Mullowney 1299ae82921SPaul Mullowney #undef __FUNCT__ 130e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat_SeqAIJCUSPARSE" 131bc3f50f2SPaul Mullowney PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 132ca45077fSPaul Mullowney { 133aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1346e111a19SKarl Rupp 135ca45077fSPaul Mullowney PetscFunctionBegin; 1362692e278SPaul Mullowney #if CUDA_VERSION>=4020 137ca45077fSPaul Mullowney switch (op) { 138e057df02SPaul Mullowney case MAT_CUSPARSE_MULT: 139aa372e3fSPaul Mullowney cusparsestruct->format = format; 140ca45077fSPaul Mullowney break; 141e057df02SPaul Mullowney case MAT_CUSPARSE_ALL: 142aa372e3fSPaul Mullowney cusparsestruct->format = format; 143ca45077fSPaul Mullowney break; 144ca45077fSPaul Mullowney default: 14536d62e41SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); 146ca45077fSPaul Mullowney } 1472692e278SPaul Mullowney #else 1486c4ed002SBarry Smith if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format require CUDA 4.2 or later."); 1492692e278SPaul Mullowney #endif 150ca45077fSPaul Mullowney PetscFunctionReturn(0); 151ca45077fSPaul Mullowney } 1529ae82921SPaul Mullowney 153e057df02SPaul Mullowney /*@ 154e057df02SPaul Mullowney MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular 155e057df02SPaul Mullowney operation. Only the MatMult operation can use different GPU storage formats 156aa372e3fSPaul Mullowney for MPIAIJCUSPARSE matrices. 157e057df02SPaul Mullowney Not Collective 158e057df02SPaul Mullowney 159e057df02SPaul Mullowney Input Parameters: 1608468deeeSKarl Rupp + A - Matrix of type SEQAIJCUSPARSE 16136d62e41SPaul Mullowney . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. 1622692e278SPaul Mullowney - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) 163e057df02SPaul Mullowney 164e057df02SPaul Mullowney Output Parameter: 165e057df02SPaul Mullowney 166e057df02SPaul Mullowney Level: intermediate 167e057df02SPaul Mullowney 1688468deeeSKarl Rupp .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 169e057df02SPaul Mullowney @*/ 170e057df02SPaul Mullowney #undef __FUNCT__ 171e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat" 172e057df02SPaul Mullowney PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 173e057df02SPaul Mullowney { 174e057df02SPaul Mullowney PetscErrorCode ierr; 1756e111a19SKarl Rupp 176e057df02SPaul Mullowney PetscFunctionBegin; 177e057df02SPaul Mullowney PetscValidHeaderSpecific(A, MAT_CLASSID,1); 178e057df02SPaul Mullowney ierr = PetscTryMethod(A, "MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); 179e057df02SPaul Mullowney PetscFunctionReturn(0); 180e057df02SPaul Mullowney } 181e057df02SPaul Mullowney 1829ae82921SPaul Mullowney #undef __FUNCT__ 1839ae82921SPaul Mullowney #define __FUNCT__ "MatSetFromOptions_SeqAIJCUSPARSE" 1844416b707SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) 1859ae82921SPaul Mullowney { 1869ae82921SPaul Mullowney PetscErrorCode ierr; 187e057df02SPaul Mullowney MatCUSPARSEStorageFormat format; 1889ae82921SPaul Mullowney PetscBool flg; 189a183c035SDominic Meiser Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1906e111a19SKarl Rupp 1919ae82921SPaul Mullowney PetscFunctionBegin; 192e55864a3SBarry Smith ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); 193e057df02SPaul Mullowney ierr = PetscObjectOptionsBegin((PetscObject)A); 1949ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 195e057df02SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", 196a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 197e057df02SPaul Mullowney if (flg) { 198e057df02SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr); 199045c96e1SPaul Mullowney } 2009ae82921SPaul Mullowney } 2014c87dfd4SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", 202a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 2034c87dfd4SPaul Mullowney if (flg) { 2044c87dfd4SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); 2054c87dfd4SPaul Mullowney } 2069ae82921SPaul Mullowney ierr = PetscOptionsEnd();CHKERRQ(ierr); 2079ae82921SPaul Mullowney PetscFunctionReturn(0); 2089ae82921SPaul Mullowney 2099ae82921SPaul Mullowney } 2109ae82921SPaul Mullowney 2119ae82921SPaul Mullowney #undef __FUNCT__ 2129ae82921SPaul Mullowney #define __FUNCT__ "MatILUFactorSymbolic_SeqAIJCUSPARSE" 2136fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2149ae82921SPaul Mullowney { 2159ae82921SPaul Mullowney PetscErrorCode ierr; 2169ae82921SPaul Mullowney 2179ae82921SPaul Mullowney PetscFunctionBegin; 2189ae82921SPaul Mullowney ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2199ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2209ae82921SPaul Mullowney PetscFunctionReturn(0); 2219ae82921SPaul Mullowney } 2229ae82921SPaul Mullowney 2239ae82921SPaul Mullowney #undef __FUNCT__ 2249ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorSymbolic_SeqAIJCUSPARSE" 2256fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2269ae82921SPaul Mullowney { 2279ae82921SPaul Mullowney PetscErrorCode ierr; 2289ae82921SPaul Mullowney 2299ae82921SPaul Mullowney PetscFunctionBegin; 2309ae82921SPaul Mullowney ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2319ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2329ae82921SPaul Mullowney PetscFunctionReturn(0); 2339ae82921SPaul Mullowney } 2349ae82921SPaul Mullowney 2359ae82921SPaul Mullowney #undef __FUNCT__ 236087f3262SPaul Mullowney #define __FUNCT__ "MatICCFactorSymbolic_SeqAIJCUSPARSE" 237087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 238087f3262SPaul Mullowney { 239087f3262SPaul Mullowney PetscErrorCode ierr; 240087f3262SPaul Mullowney 241087f3262SPaul Mullowney PetscFunctionBegin; 242087f3262SPaul Mullowney ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 243087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 244087f3262SPaul Mullowney PetscFunctionReturn(0); 245087f3262SPaul Mullowney } 246087f3262SPaul Mullowney 247087f3262SPaul Mullowney #undef __FUNCT__ 248087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorSymbolic_SeqAIJCUSPARSE" 249087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 250087f3262SPaul Mullowney { 251087f3262SPaul Mullowney PetscErrorCode ierr; 252087f3262SPaul Mullowney 253087f3262SPaul Mullowney PetscFunctionBegin; 254087f3262SPaul Mullowney ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 255087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 256087f3262SPaul Mullowney PetscFunctionReturn(0); 257087f3262SPaul Mullowney } 258087f3262SPaul Mullowney 259087f3262SPaul Mullowney #undef __FUNCT__ 260087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILULowerTriMatrix" 261087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) 2629ae82921SPaul Mullowney { 2639ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 2649ae82921SPaul Mullowney PetscInt n = A->rmap->n; 2659ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 266aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 2679ae82921SPaul Mullowney cusparseStatus_t stat; 2689ae82921SPaul Mullowney const PetscInt *ai = a->i,*aj = a->j,*vi; 2699ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 2709ae82921SPaul Mullowney PetscInt *AiLo, *AjLo; 2719ae82921SPaul Mullowney PetscScalar *AALo; 2729ae82921SPaul Mullowney PetscInt i,nz, nzLower, offset, rowOffset; 273b175d8bbSPaul Mullowney PetscErrorCode ierr; 2749ae82921SPaul Mullowney 2759ae82921SPaul Mullowney PetscFunctionBegin; 276c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 2779ae82921SPaul Mullowney try { 2789ae82921SPaul Mullowney /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ 2799ae82921SPaul Mullowney nzLower=n+ai[n]-ai[1]; 2809ae82921SPaul Mullowney 2819ae82921SPaul Mullowney /* Allocate Space for the lower triangular matrix */ 282c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); 283c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(ierr); 284c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(ierr); 2859ae82921SPaul Mullowney 2869ae82921SPaul Mullowney /* Fill the lower triangular matrix */ 2879ae82921SPaul Mullowney AiLo[0] = (PetscInt) 0; 2889ae82921SPaul Mullowney AiLo[n] = nzLower; 2899ae82921SPaul Mullowney AjLo[0] = (PetscInt) 0; 2909ae82921SPaul Mullowney AALo[0] = (MatScalar) 1.0; 2919ae82921SPaul Mullowney v = aa; 2929ae82921SPaul Mullowney vi = aj; 2939ae82921SPaul Mullowney offset = 1; 2949ae82921SPaul Mullowney rowOffset= 1; 2959ae82921SPaul Mullowney for (i=1; i<n; i++) { 2969ae82921SPaul Mullowney nz = ai[i+1] - ai[i]; 297e057df02SPaul Mullowney /* additional 1 for the term on the diagonal */ 2989ae82921SPaul Mullowney AiLo[i] = rowOffset; 2999ae82921SPaul Mullowney rowOffset += nz+1; 3009ae82921SPaul Mullowney 3019ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjLo[offset]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 3029ae82921SPaul Mullowney ierr = PetscMemcpy(&(AALo[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 3039ae82921SPaul Mullowney 3049ae82921SPaul Mullowney offset += nz; 3059ae82921SPaul Mullowney AjLo[offset] = (PetscInt) i; 3069ae82921SPaul Mullowney AALo[offset] = (MatScalar) 1.0; 3079ae82921SPaul Mullowney offset += 1; 3089ae82921SPaul Mullowney 3099ae82921SPaul Mullowney v += nz; 3109ae82921SPaul Mullowney vi += nz; 3119ae82921SPaul Mullowney } 3122205254eSKarl Rupp 313aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 314aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 3152205254eSKarl Rupp 316aa372e3fSPaul Mullowney /* Create the matrix description */ 317c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); 318c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 319c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 320c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUDA(stat); 321c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); 322aa372e3fSPaul Mullowney 323aa372e3fSPaul Mullowney /* Create the solve analysis information */ 324c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); 325aa372e3fSPaul Mullowney 326aa372e3fSPaul Mullowney /* set the operation */ 327aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 328aa372e3fSPaul Mullowney 329aa372e3fSPaul Mullowney /* set the matrix */ 330aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 331aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = n; 332aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = n; 333aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = nzLower; 334aa372e3fSPaul Mullowney 335aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 336aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); 337aa372e3fSPaul Mullowney 338aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); 339aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); 340aa372e3fSPaul Mullowney 341aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); 342aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); 343aa372e3fSPaul Mullowney 344aa372e3fSPaul Mullowney /* perform the solve analysis */ 345aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 346aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 347aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 348c41cb2e2SAlejandro Lamas Daviña loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); 349aa372e3fSPaul Mullowney 350aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 351aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 3522205254eSKarl Rupp 353c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AiLo);CHKERRCUDA(ierr); 354c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AjLo);CHKERRCUDA(ierr); 355c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AALo);CHKERRCUDA(ierr); 3569ae82921SPaul Mullowney } catch(char *ex) { 3579ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 3589ae82921SPaul Mullowney } 3599ae82921SPaul Mullowney } 3609ae82921SPaul Mullowney PetscFunctionReturn(0); 3619ae82921SPaul Mullowney } 3629ae82921SPaul Mullowney 3639ae82921SPaul Mullowney #undef __FUNCT__ 364087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILUUpperTriMatrix" 365087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) 3669ae82921SPaul Mullowney { 3679ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 3689ae82921SPaul Mullowney PetscInt n = A->rmap->n; 3699ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 370aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 3719ae82921SPaul Mullowney cusparseStatus_t stat; 3729ae82921SPaul Mullowney const PetscInt *aj = a->j,*adiag = a->diag,*vi; 3739ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 3749ae82921SPaul Mullowney PetscInt *AiUp, *AjUp; 3759ae82921SPaul Mullowney PetscScalar *AAUp; 3769ae82921SPaul Mullowney PetscInt i,nz, nzUpper, offset; 3779ae82921SPaul Mullowney PetscErrorCode ierr; 3789ae82921SPaul Mullowney 3799ae82921SPaul Mullowney PetscFunctionBegin; 380c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 3819ae82921SPaul Mullowney try { 3829ae82921SPaul Mullowney /* next, figure out the number of nonzeros in the upper triangular matrix. */ 3839ae82921SPaul Mullowney nzUpper = adiag[0]-adiag[n]; 3849ae82921SPaul Mullowney 3859ae82921SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 386c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); 387c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); 388c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); 3899ae82921SPaul Mullowney 3909ae82921SPaul Mullowney /* Fill the upper triangular matrix */ 3919ae82921SPaul Mullowney AiUp[0]=(PetscInt) 0; 3929ae82921SPaul Mullowney AiUp[n]=nzUpper; 3939ae82921SPaul Mullowney offset = nzUpper; 3949ae82921SPaul Mullowney for (i=n-1; i>=0; i--) { 3959ae82921SPaul Mullowney v = aa + adiag[i+1] + 1; 3969ae82921SPaul Mullowney vi = aj + adiag[i+1] + 1; 3979ae82921SPaul Mullowney 398e057df02SPaul Mullowney /* number of elements NOT on the diagonal */ 3999ae82921SPaul Mullowney nz = adiag[i] - adiag[i+1]-1; 4009ae82921SPaul Mullowney 401e057df02SPaul Mullowney /* decrement the offset */ 4029ae82921SPaul Mullowney offset -= (nz+1); 4039ae82921SPaul Mullowney 404e057df02SPaul Mullowney /* first, set the diagonal elements */ 4059ae82921SPaul Mullowney AjUp[offset] = (PetscInt) i; 406*09f51544SAlejandro Lamas Daviña AAUp[offset] = (MatScalar)1./v[nz]; 4079ae82921SPaul Mullowney AiUp[i] = AiUp[i+1] - (nz+1); 4089ae82921SPaul Mullowney 4099ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset+1]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 4109ae82921SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset+1]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 4119ae82921SPaul Mullowney } 4122205254eSKarl Rupp 413aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 414aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 4152205254eSKarl Rupp 416aa372e3fSPaul Mullowney /* Create the matrix description */ 417c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); 418c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 419c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 420c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); 421c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); 422aa372e3fSPaul Mullowney 423aa372e3fSPaul Mullowney /* Create the solve analysis information */ 424c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); 425aa372e3fSPaul Mullowney 426aa372e3fSPaul Mullowney /* set the operation */ 427aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 428aa372e3fSPaul Mullowney 429aa372e3fSPaul Mullowney /* set the matrix */ 430aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 431aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = n; 432aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = n; 433aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = nzUpper; 434aa372e3fSPaul Mullowney 435aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 436aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); 437aa372e3fSPaul Mullowney 438aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); 439aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); 440aa372e3fSPaul Mullowney 441aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); 442aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); 443aa372e3fSPaul Mullowney 444aa372e3fSPaul Mullowney /* perform the solve analysis */ 445aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 446aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 447aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 448c41cb2e2SAlejandro Lamas Daviña upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); 449aa372e3fSPaul Mullowney 450aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 451aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 4522205254eSKarl Rupp 453c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AiUp);CHKERRCUDA(ierr); 454c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AjUp);CHKERRCUDA(ierr); 455c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AAUp);CHKERRCUDA(ierr); 4569ae82921SPaul Mullowney } catch(char *ex) { 4579ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 4589ae82921SPaul Mullowney } 4599ae82921SPaul Mullowney } 4609ae82921SPaul Mullowney PetscFunctionReturn(0); 4619ae82921SPaul Mullowney } 4629ae82921SPaul Mullowney 4639ae82921SPaul Mullowney #undef __FUNCT__ 464087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU" 465087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) 4669ae82921SPaul Mullowney { 4679ae82921SPaul Mullowney PetscErrorCode ierr; 4689ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 4699ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 4709ae82921SPaul Mullowney IS isrow = a->row,iscol = a->icol; 4719ae82921SPaul Mullowney PetscBool row_identity,col_identity; 4729ae82921SPaul Mullowney const PetscInt *r,*c; 4739ae82921SPaul Mullowney PetscInt n = A->rmap->n; 4749ae82921SPaul Mullowney 4759ae82921SPaul Mullowney PetscFunctionBegin; 476087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); 477087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); 4782205254eSKarl Rupp 479aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 480aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 481aa372e3fSPaul Mullowney cusparseTriFactors->nnz=a->nz; 4829ae82921SPaul Mullowney 483c41cb2e2SAlejandro Lamas Daviña A->valid_GPU_matrix = PETSC_CUDA_BOTH; 484e057df02SPaul Mullowney /*lower triangular indices */ 4859ae82921SPaul Mullowney ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); 4869ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 4872205254eSKarl Rupp if (!row_identity) { 488aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 489aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(r, r+n); 4902205254eSKarl Rupp } 4919ae82921SPaul Mullowney ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); 4929ae82921SPaul Mullowney 493e057df02SPaul Mullowney /*upper triangular indices */ 4949ae82921SPaul Mullowney ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); 4959ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 4962205254eSKarl Rupp if (!col_identity) { 497aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 498aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(c, c+n); 4992205254eSKarl Rupp } 5009ae82921SPaul Mullowney ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); 5019ae82921SPaul Mullowney PetscFunctionReturn(0); 5029ae82921SPaul Mullowney } 5039ae82921SPaul Mullowney 5049ae82921SPaul Mullowney #undef __FUNCT__ 505087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildICCTriMatrices" 506087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) 507087f3262SPaul Mullowney { 508087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 509087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 510aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 511aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 512087f3262SPaul Mullowney cusparseStatus_t stat; 513087f3262SPaul Mullowney PetscErrorCode ierr; 514087f3262SPaul Mullowney PetscInt *AiUp, *AjUp; 515087f3262SPaul Mullowney PetscScalar *AAUp; 516087f3262SPaul Mullowney PetscScalar *AALo; 517087f3262SPaul Mullowney PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; 518087f3262SPaul Mullowney Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; 519087f3262SPaul Mullowney const PetscInt *ai = b->i,*aj = b->j,*vj; 520087f3262SPaul Mullowney const MatScalar *aa = b->a,*v; 521087f3262SPaul Mullowney 522087f3262SPaul Mullowney PetscFunctionBegin; 523c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 524087f3262SPaul Mullowney try { 525087f3262SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 526c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); 527c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); 528c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); 529c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); 530087f3262SPaul Mullowney 531087f3262SPaul Mullowney /* Fill the upper triangular matrix */ 532087f3262SPaul Mullowney AiUp[0]=(PetscInt) 0; 533087f3262SPaul Mullowney AiUp[n]=nzUpper; 534087f3262SPaul Mullowney offset = 0; 535087f3262SPaul Mullowney for (i=0; i<n; i++) { 536087f3262SPaul Mullowney /* set the pointers */ 537087f3262SPaul Mullowney v = aa + ai[i]; 538087f3262SPaul Mullowney vj = aj + ai[i]; 539087f3262SPaul Mullowney nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ 540087f3262SPaul Mullowney 541087f3262SPaul Mullowney /* first, set the diagonal elements */ 542087f3262SPaul Mullowney AjUp[offset] = (PetscInt) i; 543*09f51544SAlejandro Lamas Daviña AAUp[offset] = (MatScalar)1.0/v[nz]; 544087f3262SPaul Mullowney AiUp[i] = offset; 545*09f51544SAlejandro Lamas Daviña AALo[offset] = (MatScalar)1.0/v[nz]; 546087f3262SPaul Mullowney 547087f3262SPaul Mullowney offset+=1; 548087f3262SPaul Mullowney if (nz>0) { 549087f3262SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset]), vj, nz*sizeof(PetscInt));CHKERRQ(ierr); 550087f3262SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 551087f3262SPaul Mullowney for (j=offset; j<offset+nz; j++) { 552087f3262SPaul Mullowney AAUp[j] = -AAUp[j]; 553087f3262SPaul Mullowney AALo[j] = AAUp[j]/v[nz]; 554087f3262SPaul Mullowney } 555087f3262SPaul Mullowney offset+=nz; 556087f3262SPaul Mullowney } 557087f3262SPaul Mullowney } 558087f3262SPaul Mullowney 559aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 560aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 561087f3262SPaul Mullowney 562aa372e3fSPaul Mullowney /* Create the matrix description */ 563c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); 564c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 565c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 566c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); 567c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); 568087f3262SPaul Mullowney 569aa372e3fSPaul Mullowney /* Create the solve analysis information */ 570c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); 571aa372e3fSPaul Mullowney 572aa372e3fSPaul Mullowney /* set the operation */ 573aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 574aa372e3fSPaul Mullowney 575aa372e3fSPaul Mullowney /* set the matrix */ 576aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 577aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = A->rmap->n; 578aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = A->cmap->n; 579aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = a->nz; 580aa372e3fSPaul Mullowney 581aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 582aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 583aa372e3fSPaul Mullowney 584aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 585aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 586aa372e3fSPaul Mullowney 587aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 588aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); 589aa372e3fSPaul Mullowney 590aa372e3fSPaul Mullowney /* perform the solve analysis */ 591aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 592aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 593aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 594c41cb2e2SAlejandro Lamas Daviña upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); 595aa372e3fSPaul Mullowney 596aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 597aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 598aa372e3fSPaul Mullowney 599aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 600aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 601aa372e3fSPaul Mullowney 602aa372e3fSPaul Mullowney /* Create the matrix description */ 603c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); 604c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 605c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 606c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); 607c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); 608aa372e3fSPaul Mullowney 609aa372e3fSPaul Mullowney /* Create the solve analysis information */ 610c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); 611aa372e3fSPaul Mullowney 612aa372e3fSPaul Mullowney /* set the operation */ 613aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; 614aa372e3fSPaul Mullowney 615aa372e3fSPaul Mullowney /* set the matrix */ 616aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 617aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = A->rmap->n; 618aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = A->cmap->n; 619aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = a->nz; 620aa372e3fSPaul Mullowney 621aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 622aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 623aa372e3fSPaul Mullowney 624aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 625aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 626aa372e3fSPaul Mullowney 627aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 628aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); 629aa372e3fSPaul Mullowney 630aa372e3fSPaul Mullowney /* perform the solve analysis */ 631aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 632aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 633aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 634c41cb2e2SAlejandro Lamas Daviña loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); 635aa372e3fSPaul Mullowney 636aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 637aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 638087f3262SPaul Mullowney 639c41cb2e2SAlejandro Lamas Daviña A->valid_GPU_matrix = PETSC_CUDA_BOTH; 640c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AiUp);CHKERRCUDA(ierr); 641c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AjUp);CHKERRCUDA(ierr); 642c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AAUp);CHKERRCUDA(ierr); 643c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AALo);CHKERRCUDA(ierr); 644087f3262SPaul Mullowney } catch(char *ex) { 645087f3262SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 646087f3262SPaul Mullowney } 647087f3262SPaul Mullowney } 648087f3262SPaul Mullowney PetscFunctionReturn(0); 649087f3262SPaul Mullowney } 650087f3262SPaul Mullowney 651087f3262SPaul Mullowney #undef __FUNCT__ 652087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU" 653087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) 6549ae82921SPaul Mullowney { 6559ae82921SPaul Mullowney PetscErrorCode ierr; 656087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 657087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 658087f3262SPaul Mullowney IS ip = a->row; 659087f3262SPaul Mullowney const PetscInt *rip; 660087f3262SPaul Mullowney PetscBool perm_identity; 661087f3262SPaul Mullowney PetscInt n = A->rmap->n; 662087f3262SPaul Mullowney 663087f3262SPaul Mullowney PetscFunctionBegin; 664087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); 665aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 666aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 667aa372e3fSPaul Mullowney cusparseTriFactors->nnz=(a->nz-n)*2 + n; 668aa372e3fSPaul Mullowney 669087f3262SPaul Mullowney /*lower triangular indices */ 670087f3262SPaul Mullowney ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); 671087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 672087f3262SPaul Mullowney if (!perm_identity) { 673aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 674aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(rip, rip+n); 675aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 676aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(rip, rip+n); 677087f3262SPaul Mullowney } 678087f3262SPaul Mullowney ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); 679087f3262SPaul Mullowney PetscFunctionReturn(0); 680087f3262SPaul Mullowney } 681087f3262SPaul Mullowney 682087f3262SPaul Mullowney #undef __FUNCT__ 6839ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorNumeric_SeqAIJCUSPARSE" 6846fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 6859ae82921SPaul Mullowney { 6869ae82921SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 6879ae82921SPaul Mullowney IS isrow = b->row,iscol = b->col; 6889ae82921SPaul Mullowney PetscBool row_identity,col_identity; 689b175d8bbSPaul Mullowney PetscErrorCode ierr; 6909ae82921SPaul Mullowney 6919ae82921SPaul Mullowney PetscFunctionBegin; 6929ae82921SPaul Mullowney ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 693e057df02SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 6949ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 6959ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 696bda325fcSPaul Mullowney if (row_identity && col_identity) { 697bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 698bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 699bda325fcSPaul Mullowney } else { 700bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 701bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 702bda325fcSPaul Mullowney } 7038dc1d2a3SPaul Mullowney 704e057df02SPaul Mullowney /* get the triangular factors */ 705087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 7069ae82921SPaul Mullowney PetscFunctionReturn(0); 7079ae82921SPaul Mullowney } 7089ae82921SPaul Mullowney 709087f3262SPaul Mullowney #undef __FUNCT__ 710087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorNumeric_SeqAIJCUSPARSE" 711087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 712087f3262SPaul Mullowney { 713087f3262SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 714087f3262SPaul Mullowney IS ip = b->row; 715087f3262SPaul Mullowney PetscBool perm_identity; 716b175d8bbSPaul Mullowney PetscErrorCode ierr; 717087f3262SPaul Mullowney 718087f3262SPaul Mullowney PetscFunctionBegin; 719087f3262SPaul Mullowney ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 720087f3262SPaul Mullowney 721087f3262SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 722087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 723087f3262SPaul Mullowney if (perm_identity) { 724087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 725087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 726087f3262SPaul Mullowney } else { 727087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 728087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 729087f3262SPaul Mullowney } 730087f3262SPaul Mullowney 731087f3262SPaul Mullowney /* get the triangular factors */ 732087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 733087f3262SPaul Mullowney PetscFunctionReturn(0); 734087f3262SPaul Mullowney } 7359ae82921SPaul Mullowney 736bda325fcSPaul Mullowney #undef __FUNCT__ 737bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEAnalyzeTransposeForSolve" 738b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) 739bda325fcSPaul Mullowney { 740bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 741aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 742aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 743aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 744aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 745bda325fcSPaul Mullowney cusparseStatus_t stat; 746aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 747aa372e3fSPaul Mullowney cusparseMatrixType_t matrixType; 748aa372e3fSPaul Mullowney cusparseFillMode_t fillMode; 749aa372e3fSPaul Mullowney cusparseDiagType_t diagType; 750b175d8bbSPaul Mullowney 751bda325fcSPaul Mullowney PetscFunctionBegin; 752bda325fcSPaul Mullowney 753aa372e3fSPaul Mullowney /*********************************************/ 754aa372e3fSPaul Mullowney /* Now the Transpose of the Lower Tri Factor */ 755aa372e3fSPaul Mullowney /*********************************************/ 756aa372e3fSPaul Mullowney 757aa372e3fSPaul Mullowney /* allocate space for the transpose of the lower triangular factor */ 758aa372e3fSPaul Mullowney loTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 759aa372e3fSPaul Mullowney 760aa372e3fSPaul Mullowney /* set the matrix descriptors of the lower triangular factor */ 761aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(loTriFactor->descr); 762aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(loTriFactor->descr); 763aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 764aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 765aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(loTriFactor->descr); 766aa372e3fSPaul Mullowney 767aa372e3fSPaul Mullowney /* Create the matrix description */ 768c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUDA(stat); 769c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUDA(stat); 770c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUDA(stat); 771c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUDA(stat); 772c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUDA(stat); 773aa372e3fSPaul Mullowney 774aa372e3fSPaul Mullowney /* Create the solve analysis information */ 775c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&loTriFactorT->solveInfo);CHKERRCUDA(stat); 776aa372e3fSPaul Mullowney 777aa372e3fSPaul Mullowney /* set the operation */ 778aa372e3fSPaul Mullowney loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 779aa372e3fSPaul Mullowney 780aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the lower triangular factor*/ 781aa372e3fSPaul Mullowney loTriFactorT->csrMat = new CsrMatrix; 782aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_rows; 783aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_cols; 784aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; 785aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactor->csrMat->num_rows+1); 786aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactor->csrMat->num_entries); 787aa372e3fSPaul Mullowney loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactor->csrMat->num_entries); 788aa372e3fSPaul Mullowney 789aa372e3fSPaul Mullowney /* compute the transpose of the lower triangular factor, i.e. the CSC */ 790aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, 791aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, 792aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 793aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 794aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 795aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 796aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 797aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 798c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 799aa372e3fSPaul Mullowney 800aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 801aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, 802aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, 803aa372e3fSPaul Mullowney loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), 804aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), 805c41cb2e2SAlejandro Lamas Daviña loTriFactorT->solveInfo);CHKERRCUDA(stat); 806aa372e3fSPaul Mullowney 807aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 808aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; 809aa372e3fSPaul Mullowney 810aa372e3fSPaul Mullowney /*********************************************/ 811aa372e3fSPaul Mullowney /* Now the Transpose of the Upper Tri Factor */ 812aa372e3fSPaul Mullowney /*********************************************/ 813aa372e3fSPaul Mullowney 814aa372e3fSPaul Mullowney /* allocate space for the transpose of the upper triangular factor */ 815aa372e3fSPaul Mullowney upTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 816aa372e3fSPaul Mullowney 817aa372e3fSPaul Mullowney /* set the matrix descriptors of the upper triangular factor */ 818aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(upTriFactor->descr); 819aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(upTriFactor->descr); 820aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 821aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 822aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(upTriFactor->descr); 823aa372e3fSPaul Mullowney 824aa372e3fSPaul Mullowney /* Create the matrix description */ 825c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUDA(stat); 826c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUDA(stat); 827c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUDA(stat); 828c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUDA(stat); 829c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUDA(stat); 830aa372e3fSPaul Mullowney 831aa372e3fSPaul Mullowney /* Create the solve analysis information */ 832c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&upTriFactorT->solveInfo);CHKERRCUDA(stat); 833aa372e3fSPaul Mullowney 834aa372e3fSPaul Mullowney /* set the operation */ 835aa372e3fSPaul Mullowney upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 836aa372e3fSPaul Mullowney 837aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the upper triangular factor*/ 838aa372e3fSPaul Mullowney upTriFactorT->csrMat = new CsrMatrix; 839aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_rows; 840aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_cols; 841aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; 842aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactor->csrMat->num_rows+1); 843aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactor->csrMat->num_entries); 844aa372e3fSPaul Mullowney upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactor->csrMat->num_entries); 845aa372e3fSPaul Mullowney 846aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 847aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, 848aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, 849aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 850aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 851aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 852aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 853aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 854aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 855c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 856aa372e3fSPaul Mullowney 857aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 858aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, 859aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, 860aa372e3fSPaul Mullowney upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), 861aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), 862c41cb2e2SAlejandro Lamas Daviña upTriFactorT->solveInfo);CHKERRCUDA(stat); 863aa372e3fSPaul Mullowney 864aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 865aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; 866bda325fcSPaul Mullowney PetscFunctionReturn(0); 867bda325fcSPaul Mullowney } 868bda325fcSPaul Mullowney 869bda325fcSPaul Mullowney #undef __FUNCT__ 870bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEGenerateTransposeForMult" 871b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEGenerateTransposeForMult(Mat A) 872bda325fcSPaul Mullowney { 873aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 874aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 875aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 876bda325fcSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 877bda325fcSPaul Mullowney cusparseStatus_t stat; 878aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 879b06137fdSPaul Mullowney cudaError_t err; 880b175d8bbSPaul Mullowney 881bda325fcSPaul Mullowney PetscFunctionBegin; 882aa372e3fSPaul Mullowney 883aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 884aa372e3fSPaul Mullowney matstructT = new Mat_SeqAIJCUSPARSEMultStruct; 885c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUDA(stat); 886aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 887c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUDA(stat); 888c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); 889aa372e3fSPaul Mullowney 890b06137fdSPaul Mullowney /* set alpha and beta */ 891c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstructT->alpha),sizeof(PetscScalar));CHKERRCUDA(err); 892c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstructT->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 893c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstructT->beta),sizeof(PetscScalar));CHKERRCUDA(err); 894c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstructT->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 895c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); 896b06137fdSPaul Mullowney 897aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 898aa372e3fSPaul Mullowney CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; 899aa372e3fSPaul Mullowney CsrMatrix *matrixT= new CsrMatrix; 900aa372e3fSPaul Mullowney matrixT->num_rows = A->rmap->n; 901aa372e3fSPaul Mullowney matrixT->num_cols = A->cmap->n; 902aa372e3fSPaul Mullowney matrixT->num_entries = a->nz; 903aa372e3fSPaul Mullowney matrixT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 904aa372e3fSPaul Mullowney matrixT->column_indices = new THRUSTINTARRAY32(a->nz); 905aa372e3fSPaul Mullowney matrixT->values = new THRUSTARRAY(a->nz); 906aa372e3fSPaul Mullowney 907aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 908aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 909aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, matrix->num_rows, 910aa372e3fSPaul Mullowney matrix->num_cols, matrix->num_entries, 911aa372e3fSPaul Mullowney matrix->values->data().get(), 912aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 913aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 914aa372e3fSPaul Mullowney matrixT->values->data().get(), 915aa372e3fSPaul Mullowney matrixT->column_indices->data().get(), 916aa372e3fSPaul Mullowney matrixT->row_offsets->data().get(), 917c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 918aa372e3fSPaul Mullowney 919aa372e3fSPaul Mullowney /* assign the pointer */ 920aa372e3fSPaul Mullowney matstructT->mat = matrixT; 921aa372e3fSPaul Mullowney 922aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 9232692e278SPaul Mullowney #if CUDA_VERSION>=5000 924aa372e3fSPaul Mullowney /* First convert HYB to CSR */ 925aa372e3fSPaul Mullowney CsrMatrix *temp= new CsrMatrix; 926aa372e3fSPaul Mullowney temp->num_rows = A->rmap->n; 927aa372e3fSPaul Mullowney temp->num_cols = A->cmap->n; 928aa372e3fSPaul Mullowney temp->num_entries = a->nz; 929aa372e3fSPaul Mullowney temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 930aa372e3fSPaul Mullowney temp->column_indices = new THRUSTINTARRAY32(a->nz); 931aa372e3fSPaul Mullowney temp->values = new THRUSTARRAY(a->nz); 932aa372e3fSPaul Mullowney 9332692e278SPaul Mullowney 934aa372e3fSPaul Mullowney stat = cusparse_hyb2csr(cusparsestruct->handle, 935aa372e3fSPaul Mullowney matstruct->descr, (cusparseHybMat_t)matstruct->mat, 936aa372e3fSPaul Mullowney temp->values->data().get(), 937aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 938c41cb2e2SAlejandro Lamas Daviña temp->column_indices->data().get());CHKERRCUDA(stat); 939aa372e3fSPaul Mullowney 940aa372e3fSPaul Mullowney /* Next, convert CSR to CSC (i.e. the matrix transpose) */ 941aa372e3fSPaul Mullowney CsrMatrix *tempT= new CsrMatrix; 942aa372e3fSPaul Mullowney tempT->num_rows = A->rmap->n; 943aa372e3fSPaul Mullowney tempT->num_cols = A->cmap->n; 944aa372e3fSPaul Mullowney tempT->num_entries = a->nz; 945aa372e3fSPaul Mullowney tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 946aa372e3fSPaul Mullowney tempT->column_indices = new THRUSTINTARRAY32(a->nz); 947aa372e3fSPaul Mullowney tempT->values = new THRUSTARRAY(a->nz); 948aa372e3fSPaul Mullowney 949aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, 950aa372e3fSPaul Mullowney temp->num_cols, temp->num_entries, 951aa372e3fSPaul Mullowney temp->values->data().get(), 952aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 953aa372e3fSPaul Mullowney temp->column_indices->data().get(), 954aa372e3fSPaul Mullowney tempT->values->data().get(), 955aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 956aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 957c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 958aa372e3fSPaul Mullowney 959aa372e3fSPaul Mullowney /* Last, convert CSC to HYB */ 960aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 961c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); 962aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 963aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 964aa372e3fSPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, 965aa372e3fSPaul Mullowney matstructT->descr, tempT->values->data().get(), 966aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 967aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 968c41cb2e2SAlejandro Lamas Daviña hybMat, 0, partition);CHKERRCUDA(stat); 969aa372e3fSPaul Mullowney 970aa372e3fSPaul Mullowney /* assign the pointer */ 971aa372e3fSPaul Mullowney matstructT->mat = hybMat; 972aa372e3fSPaul Mullowney 973aa372e3fSPaul Mullowney /* delete temporaries */ 974aa372e3fSPaul Mullowney if (tempT) { 975aa372e3fSPaul Mullowney if (tempT->values) delete (THRUSTARRAY*) tempT->values; 976aa372e3fSPaul Mullowney if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; 977aa372e3fSPaul Mullowney if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; 978aa372e3fSPaul Mullowney delete (CsrMatrix*) tempT; 979087f3262SPaul Mullowney } 980aa372e3fSPaul Mullowney if (temp) { 981aa372e3fSPaul Mullowney if (temp->values) delete (THRUSTARRAY*) temp->values; 982aa372e3fSPaul Mullowney if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; 983aa372e3fSPaul Mullowney if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; 984aa372e3fSPaul Mullowney delete (CsrMatrix*) temp; 985aa372e3fSPaul Mullowney } 9862692e278SPaul Mullowney #else 9872692e278SPaul Mullowney SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format for the Matrix Transpose (in MatMultTranspose) require CUDA 5.0 or later."); 9882692e278SPaul Mullowney #endif 989aa372e3fSPaul Mullowney } 990aa372e3fSPaul Mullowney /* assign the compressed row indices */ 991aa372e3fSPaul Mullowney matstructT->cprowIndices = new THRUSTINTARRAY; 992aa372e3fSPaul Mullowney 993aa372e3fSPaul Mullowney /* assign the pointer */ 994aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; 995bda325fcSPaul Mullowney PetscFunctionReturn(0); 996bda325fcSPaul Mullowney } 997bda325fcSPaul Mullowney 998bda325fcSPaul Mullowney #undef __FUNCT__ 999bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE" 10006fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 1001bda325fcSPaul Mullowney { 1002c41cb2e2SAlejandro Lamas Daviña PetscInt n = xx->map->n; 1003c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray, *barray; 1004c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> xGPU,bGPU; 1005bda325fcSPaul Mullowney cusparseStatus_t stat; 1006bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1007aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1008aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1009aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1010b175d8bbSPaul Mullowney PetscErrorCode ierr; 1011bda325fcSPaul Mullowney 1012bda325fcSPaul Mullowney PetscFunctionBegin; 1013aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1014aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1015bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1016aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1017aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1018bda325fcSPaul Mullowney } 1019bda325fcSPaul Mullowney 1020bda325fcSPaul Mullowney /* Get the GPU pointers */ 1021c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1022c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 1023c41cb2e2SAlejandro Lamas Daviña xGPU = thrust::device_pointer_cast(xarray); 1024c41cb2e2SAlejandro Lamas Daviña bGPU = thrust::device_pointer_cast(barray); 1025bda325fcSPaul Mullowney 1026aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1027c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), 1028c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()), 1029c41cb2e2SAlejandro Lamas Daviña xGPU); 1030aa372e3fSPaul Mullowney 1031aa372e3fSPaul Mullowney /* First, solve U */ 1032aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1033aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1034aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1035aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1036aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1037aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1038c41cb2e2SAlejandro Lamas Daviña xarray, tempGPU->data().get());CHKERRCUDA(stat); 1039aa372e3fSPaul Mullowney 1040aa372e3fSPaul Mullowney /* Then, solve L */ 1041aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1042aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1043aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1044aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1045aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1046aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1047c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 1048aa372e3fSPaul Mullowney 1049aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1050c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), 1051c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()), 1052aa372e3fSPaul Mullowney tempGPU->begin()); 1053aa372e3fSPaul Mullowney 1054aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1055c41cb2e2SAlejandro Lamas Daviña thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); 1056bda325fcSPaul Mullowney 1057bda325fcSPaul Mullowney /* restore */ 1058c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1059c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1060c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1061087f3262SPaul Mullowney 1062aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1063bda325fcSPaul Mullowney PetscFunctionReturn(0); 1064bda325fcSPaul Mullowney } 1065bda325fcSPaul Mullowney 1066bda325fcSPaul Mullowney #undef __FUNCT__ 1067bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering" 10686fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 1069bda325fcSPaul Mullowney { 1070c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray, *barray; 1071bda325fcSPaul Mullowney cusparseStatus_t stat; 1072bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1073aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1074aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1075aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1076b175d8bbSPaul Mullowney PetscErrorCode ierr; 1077bda325fcSPaul Mullowney 1078bda325fcSPaul Mullowney PetscFunctionBegin; 1079aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1080aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1081bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1082aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1083aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1084bda325fcSPaul Mullowney } 1085bda325fcSPaul Mullowney 1086bda325fcSPaul Mullowney /* Get the GPU pointers */ 1087c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1088c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 1089bda325fcSPaul Mullowney 1090aa372e3fSPaul Mullowney /* First, solve U */ 1091aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1092aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1093aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1094aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1095aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1096aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1097c41cb2e2SAlejandro Lamas Daviña barray, tempGPU->data().get());CHKERRCUDA(stat); 1098aa372e3fSPaul Mullowney 1099aa372e3fSPaul Mullowney /* Then, solve L */ 1100aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1101aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1102aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1103aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1104aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1105aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1106c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 1107bda325fcSPaul Mullowney 1108bda325fcSPaul Mullowney /* restore */ 1109c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1110c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1111c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1112aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1113bda325fcSPaul Mullowney PetscFunctionReturn(0); 1114bda325fcSPaul Mullowney } 1115bda325fcSPaul Mullowney 11169ae82921SPaul Mullowney #undef __FUNCT__ 11179ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE" 11186fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 11199ae82921SPaul Mullowney { 1120c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray, *barray; 1121c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> xGPU,bGPU; 11229ae82921SPaul Mullowney cusparseStatus_t stat; 11239ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1124aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1125aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1126aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1127b175d8bbSPaul Mullowney PetscErrorCode ierr; 1128ebc8f436SDominic Meiser VecType t; 1129ebc8f436SDominic Meiser PetscBool flg; 11309ae82921SPaul Mullowney 11319ae82921SPaul Mullowney PetscFunctionBegin; 1132ebc8f436SDominic Meiser ierr = VecGetType(bb,&t);CHKERRQ(ierr); 1133c41cb2e2SAlejandro Lamas Daviña ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); 1134c41cb2e2SAlejandro Lamas Daviña if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #2). Can only deal with %s\n.",t,VECSEQCUDA); 1135ebc8f436SDominic Meiser ierr = VecGetType(xx,&t);CHKERRQ(ierr); 1136c41cb2e2SAlejandro Lamas Daviña ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); 1137c41cb2e2SAlejandro Lamas Daviña if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #3). Can only deal with %s\n.",t,VECSEQCUDA); 1138ebc8f436SDominic Meiser 1139e057df02SPaul Mullowney /* Get the GPU pointers */ 1140c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1141c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 1142c41cb2e2SAlejandro Lamas Daviña xGPU = thrust::device_pointer_cast(xarray); 1143c41cb2e2SAlejandro Lamas Daviña bGPU = thrust::device_pointer_cast(barray); 11449ae82921SPaul Mullowney 1145aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1146c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), 1147c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), 1148c41cb2e2SAlejandro Lamas Daviña xGPU); 1149aa372e3fSPaul Mullowney 1150aa372e3fSPaul Mullowney /* Next, solve L */ 1151aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1152aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1153aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1154aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1155aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1156aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1157c41cb2e2SAlejandro Lamas Daviña xarray, tempGPU->data().get());CHKERRCUDA(stat); 1158aa372e3fSPaul Mullowney 1159aa372e3fSPaul Mullowney /* Then, solve U */ 1160aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1161aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1162aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1163aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1164aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1165aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1166c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 1167aa372e3fSPaul Mullowney 1168aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1169c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), 1170c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->end()), 1171aa372e3fSPaul Mullowney tempGPU->begin()); 1172aa372e3fSPaul Mullowney 1173aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1174c41cb2e2SAlejandro Lamas Daviña thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); 11759ae82921SPaul Mullowney 1176c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1177c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1178c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1179aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 11809ae82921SPaul Mullowney PetscFunctionReturn(0); 11819ae82921SPaul Mullowney } 11829ae82921SPaul Mullowney 11839ae82921SPaul Mullowney #undef __FUNCT__ 11849ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE_NaturalOrdering" 11856fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 11869ae82921SPaul Mullowney { 1187c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray, *barray; 11889ae82921SPaul Mullowney cusparseStatus_t stat; 11899ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1190aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1191aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1192aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1193b175d8bbSPaul Mullowney PetscErrorCode ierr; 11949ae82921SPaul Mullowney 11959ae82921SPaul Mullowney PetscFunctionBegin; 1196e057df02SPaul Mullowney /* Get the GPU pointers */ 1197c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1198c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 11999ae82921SPaul Mullowney 1200aa372e3fSPaul Mullowney /* First, solve L */ 1201aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1202aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1203aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1204aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1205aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1206aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1207c41cb2e2SAlejandro Lamas Daviña barray, tempGPU->data().get());CHKERRCUDA(stat); 1208aa372e3fSPaul Mullowney 1209aa372e3fSPaul Mullowney /* Next, solve U */ 1210aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1211aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1212aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1213aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1214aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1215aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1216c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 12179ae82921SPaul Mullowney 1218c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1219c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1220c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1221aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 12229ae82921SPaul Mullowney PetscFunctionReturn(0); 12239ae82921SPaul Mullowney } 12249ae82921SPaul Mullowney 12259ae82921SPaul Mullowney #undef __FUNCT__ 1226e057df02SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSECopyToGPU" 12276fa9248bSJed Brown static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) 12289ae82921SPaul Mullowney { 12299ae82921SPaul Mullowney 1230aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1231aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 12329ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 12339ae82921SPaul Mullowney PetscInt m = A->rmap->n,*ii,*ridx; 12349ae82921SPaul Mullowney PetscErrorCode ierr; 1235aa372e3fSPaul Mullowney cusparseStatus_t stat; 1236b06137fdSPaul Mullowney cudaError_t err; 12379ae82921SPaul Mullowney 12389ae82921SPaul Mullowney PetscFunctionBegin; 1239c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 12409ae82921SPaul Mullowney ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 1241ce814652SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&matstruct,cusparsestruct->format); 12429ae82921SPaul Mullowney try { 1243aa372e3fSPaul Mullowney cusparsestruct->nonzerorow=0; 1244aa372e3fSPaul Mullowney for (int j = 0; j<m; j++) cusparsestruct->nonzerorow += ((a->i[j+1]-a->i[j])>0); 12459ae82921SPaul Mullowney 12469ae82921SPaul Mullowney if (a->compressedrow.use) { 12479ae82921SPaul Mullowney m = a->compressedrow.nrows; 12489ae82921SPaul Mullowney ii = a->compressedrow.i; 12499ae82921SPaul Mullowney ridx = a->compressedrow.rindex; 12509ae82921SPaul Mullowney } else { 1251b06137fdSPaul Mullowney /* Forcing compressed row on the GPU */ 12529ae82921SPaul Mullowney int k=0; 1253854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow+1, &ii);CHKERRQ(ierr); 1254854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow, &ridx);CHKERRQ(ierr); 12559ae82921SPaul Mullowney ii[0]=0; 12569ae82921SPaul Mullowney for (int j = 0; j<m; j++) { 12579ae82921SPaul Mullowney if ((a->i[j+1]-a->i[j])>0) { 12589ae82921SPaul Mullowney ii[k] = a->i[j]; 12599ae82921SPaul Mullowney ridx[k]= j; 12609ae82921SPaul Mullowney k++; 12619ae82921SPaul Mullowney } 12629ae82921SPaul Mullowney } 1263aa372e3fSPaul Mullowney ii[cusparsestruct->nonzerorow] = a->nz; 1264aa372e3fSPaul Mullowney m = cusparsestruct->nonzerorow; 12659ae82921SPaul Mullowney } 12669ae82921SPaul Mullowney 1267aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 1268aa372e3fSPaul Mullowney matstruct = new Mat_SeqAIJCUSPARSEMultStruct; 1269c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUDA(stat); 1270c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 1271c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); 12729ae82921SPaul Mullowney 1273c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstruct->alpha),sizeof(PetscScalar));CHKERRCUDA(err); 1274c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstruct->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 1275c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstruct->beta),sizeof(PetscScalar));CHKERRCUDA(err); 1276c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstruct->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 1277c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); 1278b06137fdSPaul Mullowney 1279aa372e3fSPaul Mullowney /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ 1280aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1281aa372e3fSPaul Mullowney /* set the matrix */ 1282aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1283a65300a6SPaul Mullowney matrix->num_rows = m; 1284aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1285aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1286a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1287a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 12889ae82921SPaul Mullowney 1289aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1290aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1291aa372e3fSPaul Mullowney 1292aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1293aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1294aa372e3fSPaul Mullowney 1295aa372e3fSPaul Mullowney /* assign the pointer */ 1296aa372e3fSPaul Mullowney matstruct->mat = matrix; 1297aa372e3fSPaul Mullowney 1298aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 12992692e278SPaul Mullowney #if CUDA_VERSION>=4020 1300aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1301a65300a6SPaul Mullowney matrix->num_rows = m; 1302aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1303aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1304a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1305a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 1306aa372e3fSPaul Mullowney 1307aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1308aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1309aa372e3fSPaul Mullowney 1310aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1311aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1312aa372e3fSPaul Mullowney 1313aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 1314c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); 1315aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 1316aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 1317a65300a6SPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, matrix->num_rows, matrix->num_cols, 1318aa372e3fSPaul Mullowney matstruct->descr, matrix->values->data().get(), 1319aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 1320aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 1321c41cb2e2SAlejandro Lamas Daviña hybMat, 0, partition);CHKERRCUDA(stat); 1322aa372e3fSPaul Mullowney /* assign the pointer */ 1323aa372e3fSPaul Mullowney matstruct->mat = hybMat; 1324aa372e3fSPaul Mullowney 1325aa372e3fSPaul Mullowney if (matrix) { 1326aa372e3fSPaul Mullowney if (matrix->values) delete (THRUSTARRAY*)matrix->values; 1327aa372e3fSPaul Mullowney if (matrix->column_indices) delete (THRUSTINTARRAY32*)matrix->column_indices; 1328aa372e3fSPaul Mullowney if (matrix->row_offsets) delete (THRUSTINTARRAY32*)matrix->row_offsets; 1329aa372e3fSPaul Mullowney delete (CsrMatrix*)matrix; 1330087f3262SPaul Mullowney } 13312692e278SPaul Mullowney #endif 1332087f3262SPaul Mullowney } 1333ca45077fSPaul Mullowney 1334aa372e3fSPaul Mullowney /* assign the compressed row indices */ 1335aa372e3fSPaul Mullowney matstruct->cprowIndices = new THRUSTINTARRAY(m); 1336aa372e3fSPaul Mullowney matstruct->cprowIndices->assign(ridx,ridx+m); 1337aa372e3fSPaul Mullowney 1338aa372e3fSPaul Mullowney /* assign the pointer */ 1339aa372e3fSPaul Mullowney cusparsestruct->mat = matstruct; 1340aa372e3fSPaul Mullowney 13419ae82921SPaul Mullowney if (!a->compressedrow.use) { 13429ae82921SPaul Mullowney ierr = PetscFree(ii);CHKERRQ(ierr); 13439ae82921SPaul Mullowney ierr = PetscFree(ridx);CHKERRQ(ierr); 13449ae82921SPaul Mullowney } 1345aa372e3fSPaul Mullowney cusparsestruct->workVector = new THRUSTARRAY; 1346aa372e3fSPaul Mullowney cusparsestruct->workVector->resize(m); 13479ae82921SPaul Mullowney } catch(char *ex) { 13489ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 13499ae82921SPaul Mullowney } 1350c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 13512205254eSKarl Rupp 1352c41cb2e2SAlejandro Lamas Daviña A->valid_GPU_matrix = PETSC_CUDA_BOTH; 13532205254eSKarl Rupp 13549ae82921SPaul Mullowney ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 13559ae82921SPaul Mullowney } 13569ae82921SPaul Mullowney PetscFunctionReturn(0); 13579ae82921SPaul Mullowney } 13589ae82921SPaul Mullowney 13599ae82921SPaul Mullowney #undef __FUNCT__ 13602a7a6963SBarry Smith #define __FUNCT__ "MatCreateVecs_SeqAIJCUSPARSE" 13612a7a6963SBarry Smith static PetscErrorCode MatCreateVecs_SeqAIJCUSPARSE(Mat mat, Vec *right, Vec *left) 13629ae82921SPaul Mullowney { 13639ae82921SPaul Mullowney PetscErrorCode ierr; 136433d57670SJed Brown PetscInt rbs,cbs; 13659ae82921SPaul Mullowney 13669ae82921SPaul Mullowney PetscFunctionBegin; 136733d57670SJed Brown ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr); 13689ae82921SPaul Mullowney if (right) { 1369ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr); 13709ae82921SPaul Mullowney ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 137133d57670SJed Brown ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr); 1372c41cb2e2SAlejandro Lamas Daviña ierr = VecSetType(*right,VECSEQCUDA);CHKERRQ(ierr); 13739ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->cmap,&(*right)->map);CHKERRQ(ierr); 13749ae82921SPaul Mullowney } 13759ae82921SPaul Mullowney if (left) { 1376ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr); 13779ae82921SPaul Mullowney ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 137833d57670SJed Brown ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr); 1379c41cb2e2SAlejandro Lamas Daviña ierr = VecSetType(*left,VECSEQCUDA);CHKERRQ(ierr); 13809ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->rmap,&(*left)->map);CHKERRQ(ierr); 13819ae82921SPaul Mullowney } 13829ae82921SPaul Mullowney PetscFunctionReturn(0); 13839ae82921SPaul Mullowney } 13849ae82921SPaul Mullowney 1385c41cb2e2SAlejandro Lamas Daviña struct VecCUDAPlusEquals 1386aa372e3fSPaul Mullowney { 1387aa372e3fSPaul Mullowney template <typename Tuple> 1388aa372e3fSPaul Mullowney __host__ __device__ 1389aa372e3fSPaul Mullowney void operator()(Tuple t) 1390aa372e3fSPaul Mullowney { 1391aa372e3fSPaul Mullowney thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); 1392aa372e3fSPaul Mullowney } 1393aa372e3fSPaul Mullowney }; 1394aa372e3fSPaul Mullowney 13959ae82921SPaul Mullowney #undef __FUNCT__ 13969ae82921SPaul Mullowney #define __FUNCT__ "MatMult_SeqAIJCUSPARSE" 13976fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 13989ae82921SPaul Mullowney { 13999ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1400aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1401aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1402c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray,*yarray; 1403b175d8bbSPaul Mullowney PetscErrorCode ierr; 1404aa372e3fSPaul Mullowney cusparseStatus_t stat; 14059ae82921SPaul Mullowney 14069ae82921SPaul Mullowney PetscFunctionBegin; 1407e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1408e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1409c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1410c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1411aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1412aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1413aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1414aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, mat->num_entries, 1415b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), 1416c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstruct->beta, 1417c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 1418aa372e3fSPaul Mullowney } else { 14192692e278SPaul Mullowney #if CUDA_VERSION>=4020 1420aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1421aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1422b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1423c41cb2e2SAlejandro Lamas Daviña xarray, matstruct->beta, 1424c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 14252692e278SPaul Mullowney #endif 14269ae82921SPaul Mullowney } 1427c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1428c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1429aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 1430c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1431ca45077fSPaul Mullowney } 1432aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 14339ae82921SPaul Mullowney PetscFunctionReturn(0); 14349ae82921SPaul Mullowney } 14359ae82921SPaul Mullowney 14369ae82921SPaul Mullowney #undef __FUNCT__ 1437ca45077fSPaul Mullowney #define __FUNCT__ "MatMultTranspose_SeqAIJCUSPARSE" 14386fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 1439ca45077fSPaul Mullowney { 1440ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1441aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1442aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1443c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray,*yarray; 1444b175d8bbSPaul Mullowney PetscErrorCode ierr; 1445aa372e3fSPaul Mullowney cusparseStatus_t stat; 1446ca45077fSPaul Mullowney 1447ca45077fSPaul Mullowney PetscFunctionBegin; 1448e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1449e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1450aa372e3fSPaul Mullowney if (!matstructT) { 1451bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1452aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1453bda325fcSPaul Mullowney } 1454c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1455c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1456aa372e3fSPaul Mullowney 1457aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1458aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1459aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1460aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, 1461b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1462aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1463c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstructT->beta, 1464c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 1465aa372e3fSPaul Mullowney } else { 14662692e278SPaul Mullowney #if CUDA_VERSION>=4020 1467aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1468aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1469b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1470c41cb2e2SAlejandro Lamas Daviña xarray, matstructT->beta, 1471c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 14722692e278SPaul Mullowney #endif 1473ca45077fSPaul Mullowney } 1474c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1475c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1476aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 1477c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1478ca45077fSPaul Mullowney } 1479aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 1480ca45077fSPaul Mullowney PetscFunctionReturn(0); 1481ca45077fSPaul Mullowney } 1482ca45077fSPaul Mullowney 1483aa372e3fSPaul Mullowney 1484ca45077fSPaul Mullowney #undef __FUNCT__ 14859ae82921SPaul Mullowney #define __FUNCT__ "MatMultAdd_SeqAIJCUSPARSE" 14866fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 14879ae82921SPaul Mullowney { 14889ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1489aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1490aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1491c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> zptr; 1492c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray,*zarray; 1493b175d8bbSPaul Mullowney PetscErrorCode ierr; 1494aa372e3fSPaul Mullowney cusparseStatus_t stat; 14956e111a19SKarl Rupp 14969ae82921SPaul Mullowney PetscFunctionBegin; 1497e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1498e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 14999ae82921SPaul Mullowney try { 1500c41cb2e2SAlejandro Lamas Daviña ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); 1501c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1502c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr); 1503c41cb2e2SAlejandro Lamas Daviña zptr = thrust::device_pointer_cast(zarray); 15049ae82921SPaul Mullowney 1505e057df02SPaul Mullowney /* multiply add */ 1506aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1507aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1508b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1509b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1510b06137fdSPaul Mullowney size of the workVector */ 1511aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1512a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1513b06137fdSPaul Mullowney mat->num_entries, matstruct->alpha, matstruct->descr, 1514aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1515c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstruct->beta, 1516c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1517aa372e3fSPaul Mullowney } else { 15182692e278SPaul Mullowney #if CUDA_VERSION>=4020 1519aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1520a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1521aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1522b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1523c41cb2e2SAlejandro Lamas Daviña xarray, matstruct->beta, 1524c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1525a65300a6SPaul Mullowney } 15262692e278SPaul Mullowney #endif 1527aa372e3fSPaul Mullowney } 1528aa372e3fSPaul Mullowney 1529aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1530c41cb2e2SAlejandro Lamas Daviña thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), 1531c41cb2e2SAlejandro Lamas Daviña thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1532c41cb2e2SAlejandro Lamas Daviña VecCUDAPlusEquals()); 1533c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1534c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); 15359ae82921SPaul Mullowney 15369ae82921SPaul Mullowney } catch(char *ex) { 15379ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 15389ae82921SPaul Mullowney } 1539c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 15409ae82921SPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 15419ae82921SPaul Mullowney PetscFunctionReturn(0); 15429ae82921SPaul Mullowney } 15439ae82921SPaul Mullowney 15449ae82921SPaul Mullowney #undef __FUNCT__ 1545b175d8bbSPaul Mullowney #define __FUNCT__ "MatMultTransposeAdd_SeqAIJCUSPARSE" 15466fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 1547ca45077fSPaul Mullowney { 1548ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1549aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1550aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1551c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> zptr; 1552c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray,*zarray; 1553b175d8bbSPaul Mullowney PetscErrorCode ierr; 1554aa372e3fSPaul Mullowney cusparseStatus_t stat; 15556e111a19SKarl Rupp 1556ca45077fSPaul Mullowney PetscFunctionBegin; 1557e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1558e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1559aa372e3fSPaul Mullowney if (!matstructT) { 1560bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1561aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1562bda325fcSPaul Mullowney } 1563aa372e3fSPaul Mullowney 1564ca45077fSPaul Mullowney try { 1565c41cb2e2SAlejandro Lamas Daviña ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); 1566c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1567c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr); 1568c41cb2e2SAlejandro Lamas Daviña zptr = thrust::device_pointer_cast(zarray); 1569ca45077fSPaul Mullowney 1570e057df02SPaul Mullowney /* multiply add with matrix transpose */ 1571aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1572aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1573b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1574b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1575b06137fdSPaul Mullowney size of the workVector */ 1576aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1577a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1578b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1579aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1580c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstructT->beta, 1581c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1582aa372e3fSPaul Mullowney } else { 15832692e278SPaul Mullowney #if CUDA_VERSION>=4020 1584aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1585a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1586aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1587b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1588c41cb2e2SAlejandro Lamas Daviña xarray, matstructT->beta, 1589c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1590a65300a6SPaul Mullowney } 15912692e278SPaul Mullowney #endif 1592aa372e3fSPaul Mullowney } 1593aa372e3fSPaul Mullowney 1594aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1595c41cb2e2SAlejandro Lamas Daviña thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))), 1596c41cb2e2SAlejandro Lamas Daviña thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1597c41cb2e2SAlejandro Lamas Daviña VecCUDAPlusEquals()); 1598ca45077fSPaul Mullowney 1599c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1600c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); 1601ca45077fSPaul Mullowney 1602ca45077fSPaul Mullowney } catch(char *ex) { 1603ca45077fSPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 1604ca45077fSPaul Mullowney } 1605c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1606ca45077fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 1607ca45077fSPaul Mullowney PetscFunctionReturn(0); 1608ca45077fSPaul Mullowney } 1609ca45077fSPaul Mullowney 1610ca45077fSPaul Mullowney #undef __FUNCT__ 16119ae82921SPaul Mullowney #define __FUNCT__ "MatAssemblyEnd_SeqAIJCUSPARSE" 16126fa9248bSJed Brown static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) 16139ae82921SPaul Mullowney { 16149ae82921SPaul Mullowney PetscErrorCode ierr; 16156e111a19SKarl Rupp 16169ae82921SPaul Mullowney PetscFunctionBegin; 16179ae82921SPaul Mullowney ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); 1618bc3f50f2SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 1619e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); 1620bc3f50f2SPaul Mullowney } 16219ae82921SPaul Mullowney if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0); 1622bbf3fe20SPaul Mullowney A->ops->mult = MatMult_SeqAIJCUSPARSE; 1623bbf3fe20SPaul Mullowney A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1624bbf3fe20SPaul Mullowney A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1625bbf3fe20SPaul Mullowney A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 16269ae82921SPaul Mullowney PetscFunctionReturn(0); 16279ae82921SPaul Mullowney } 16289ae82921SPaul Mullowney 16299ae82921SPaul Mullowney /* --------------------------------------------------------------------------------*/ 16309ae82921SPaul Mullowney #undef __FUNCT__ 16319ae82921SPaul Mullowney #define __FUNCT__ "MatCreateSeqAIJCUSPARSE" 1632e057df02SPaul Mullowney /*@ 16339ae82921SPaul Mullowney MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format 1634e057df02SPaul Mullowney (the default parallel PETSc format). This matrix will ultimately pushed down 1635e057df02SPaul Mullowney to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix 1636e057df02SPaul Mullowney assembly performance the user should preallocate the matrix storage by setting 1637e057df02SPaul Mullowney the parameter nz (or the array nnz). By setting these parameters accurately, 1638e057df02SPaul Mullowney performance during matrix assembly can be increased by more than a factor of 50. 16399ae82921SPaul Mullowney 16409ae82921SPaul Mullowney Collective on MPI_Comm 16419ae82921SPaul Mullowney 16429ae82921SPaul Mullowney Input Parameters: 16439ae82921SPaul Mullowney + comm - MPI communicator, set to PETSC_COMM_SELF 16449ae82921SPaul Mullowney . m - number of rows 16459ae82921SPaul Mullowney . n - number of columns 16469ae82921SPaul Mullowney . nz - number of nonzeros per row (same for all rows) 16479ae82921SPaul Mullowney - nnz - array containing the number of nonzeros in the various rows 16480298fd71SBarry Smith (possibly different for each row) or NULL 16499ae82921SPaul Mullowney 16509ae82921SPaul Mullowney Output Parameter: 16519ae82921SPaul Mullowney . A - the matrix 16529ae82921SPaul Mullowney 16539ae82921SPaul Mullowney It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), 16549ae82921SPaul Mullowney MatXXXXSetPreallocation() paradgm instead of this routine directly. 16559ae82921SPaul Mullowney [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] 16569ae82921SPaul Mullowney 16579ae82921SPaul Mullowney Notes: 16589ae82921SPaul Mullowney If nnz is given then nz is ignored 16599ae82921SPaul Mullowney 16609ae82921SPaul Mullowney The AIJ format (also called the Yale sparse matrix format or 16619ae82921SPaul Mullowney compressed row storage), is fully compatible with standard Fortran 77 16629ae82921SPaul Mullowney storage. That is, the stored row and column indices can begin at 16639ae82921SPaul Mullowney either one (as in Fortran) or zero. See the users' manual for details. 16649ae82921SPaul Mullowney 16659ae82921SPaul Mullowney Specify the preallocated storage with either nz or nnz (not both). 16660298fd71SBarry Smith Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory 16679ae82921SPaul Mullowney allocation. For large problems you MUST preallocate memory or you 16689ae82921SPaul Mullowney will get TERRIBLE performance, see the users' manual chapter on matrices. 16699ae82921SPaul Mullowney 16709ae82921SPaul Mullowney By default, this format uses inodes (identical nodes) when possible, to 16719ae82921SPaul Mullowney improve numerical efficiency of matrix-vector products and solves. We 16729ae82921SPaul Mullowney search for consecutive rows with the same nonzero structure, thereby 16739ae82921SPaul Mullowney reusing matrix information to achieve increased efficiency. 16749ae82921SPaul Mullowney 16759ae82921SPaul Mullowney Level: intermediate 16769ae82921SPaul Mullowney 1677e057df02SPaul Mullowney .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE 16789ae82921SPaul Mullowney @*/ 16799ae82921SPaul Mullowney PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) 16809ae82921SPaul Mullowney { 16819ae82921SPaul Mullowney PetscErrorCode ierr; 16829ae82921SPaul Mullowney 16839ae82921SPaul Mullowney PetscFunctionBegin; 16849ae82921SPaul Mullowney ierr = MatCreate(comm,A);CHKERRQ(ierr); 16859ae82921SPaul Mullowney ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); 16869ae82921SPaul Mullowney ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 16879ae82921SPaul Mullowney ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); 16889ae82921SPaul Mullowney PetscFunctionReturn(0); 16899ae82921SPaul Mullowney } 16909ae82921SPaul Mullowney 16919ae82921SPaul Mullowney #undef __FUNCT__ 16929ae82921SPaul Mullowney #define __FUNCT__ "MatDestroy_SeqAIJCUSPARSE" 16936fa9248bSJed Brown static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) 16949ae82921SPaul Mullowney { 16959ae82921SPaul Mullowney PetscErrorCode ierr; 1696ab25e6cbSDominic Meiser 16979ae82921SPaul Mullowney PetscFunctionBegin; 16989ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 1699c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) { 1700ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); 17019ae82921SPaul Mullowney } 17029ae82921SPaul Mullowney } else { 1703ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); 1704aa372e3fSPaul Mullowney } 17059ae82921SPaul Mullowney ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); 17069ae82921SPaul Mullowney PetscFunctionReturn(0); 17079ae82921SPaul Mullowney } 17089ae82921SPaul Mullowney 17099ae82921SPaul Mullowney #undef __FUNCT__ 17109ae82921SPaul Mullowney #define __FUNCT__ "MatCreate_SeqAIJCUSPARSE" 17118cc058d9SJed Brown PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) 17129ae82921SPaul Mullowney { 17139ae82921SPaul Mullowney PetscErrorCode ierr; 1714aa372e3fSPaul Mullowney cusparseStatus_t stat; 1715aa372e3fSPaul Mullowney cusparseHandle_t handle=0; 17169ae82921SPaul Mullowney 17179ae82921SPaul Mullowney PetscFunctionBegin; 17189ae82921SPaul Mullowney ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); 17199ae82921SPaul Mullowney if (B->factortype==MAT_FACTOR_NONE) { 1720e057df02SPaul Mullowney /* you cannot check the inode.use flag here since the matrix was just created. 1721e057df02SPaul Mullowney now build a GPU matrix data structure */ 17229ae82921SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSE; 17239ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->mat = 0; 1724aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->matTranspose = 0; 1725aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->workVector = 0; 1726e057df02SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->format = MAT_CUSPARSE_CSR; 1727aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 1728aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = 0; 1729c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreate(&handle);CHKERRCUDA(stat); 1730aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = handle; 1731aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 17329ae82921SPaul Mullowney } else { 17339ae82921SPaul Mullowney /* NEXT, set the pointers to the triangular factors */ 1734debe9ee2SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSETriFactors; 17359ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtr = 0; 17369ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtr = 0; 1737aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtrTranspose = 0; 1738aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtrTranspose = 0; 1739aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->rpermIndices = 0; 1740aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->cpermIndices = 0; 1741aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->workVector = 0; 1742aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = 0; 1743c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreate(&handle);CHKERRCUDA(stat); 1744aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = handle; 1745aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->nnz = 0; 17469ae82921SPaul Mullowney } 1747aa372e3fSPaul Mullowney 17489ae82921SPaul Mullowney B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; 17499ae82921SPaul Mullowney B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; 17502a7a6963SBarry Smith B->ops->getvecs = MatCreateVecs_SeqAIJCUSPARSE; 17519ae82921SPaul Mullowney B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; 1752ca45077fSPaul Mullowney B->ops->mult = MatMult_SeqAIJCUSPARSE; 1753ca45077fSPaul Mullowney B->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1754ca45077fSPaul Mullowney B->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1755ca45077fSPaul Mullowney B->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 17562205254eSKarl Rupp 17579ae82921SPaul Mullowney ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 17582205254eSKarl Rupp 1759c41cb2e2SAlejandro Lamas Daviña B->valid_GPU_matrix = PETSC_CUDA_UNALLOCATED; 17602205254eSKarl Rupp 1761bdf89e91SBarry Smith ierr = PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); 17629ae82921SPaul Mullowney PetscFunctionReturn(0); 17639ae82921SPaul Mullowney } 17649ae82921SPaul Mullowney 1765e057df02SPaul Mullowney /*M 1766e057df02SPaul Mullowney MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. 1767e057df02SPaul Mullowney 1768e057df02SPaul Mullowney A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either 17692692e278SPaul Mullowney CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. 17702692e278SPaul Mullowney All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. 1771e057df02SPaul Mullowney 1772e057df02SPaul Mullowney Options Database Keys: 1773e057df02SPaul Mullowney + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() 1774aa372e3fSPaul Mullowney . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1775aa372e3fSPaul Mullowney . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1776e057df02SPaul Mullowney 1777e057df02SPaul Mullowney Level: beginner 1778e057df02SPaul Mullowney 17798468deeeSKarl Rupp .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 1780e057df02SPaul Mullowney M*/ 17817f756511SDominic Meiser 178242c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat,MatFactorType,Mat*); 178342c9c57cSBarry Smith 17840f39cd5aSBarry Smith 178542c9c57cSBarry Smith #undef __FUNCT__ 178642c9c57cSBarry Smith #define __FUNCT__ "MatSolverPackageRegister_CUSPARSE" 178729b38603SBarry Smith PETSC_EXTERN PetscErrorCode MatSolverPackageRegister_CUSPARSE(void) 178842c9c57cSBarry Smith { 178942c9c57cSBarry Smith PetscErrorCode ierr; 179042c9c57cSBarry Smith 179142c9c57cSBarry Smith PetscFunctionBegin; 179242c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 179342c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 179442c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 179542c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 179642c9c57cSBarry Smith PetscFunctionReturn(0); 179742c9c57cSBarry Smith } 179829b38603SBarry Smith 179981e08676SBarry Smith 18007f756511SDominic Meiser #undef __FUNCT__ 18017f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSE_Destroy" 18027f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) 18037f756511SDominic Meiser { 18047f756511SDominic Meiser cusparseStatus_t stat; 18057f756511SDominic Meiser cusparseHandle_t handle; 18067f756511SDominic Meiser 18077f756511SDominic Meiser PetscFunctionBegin; 18087f756511SDominic Meiser if (*cusparsestruct) { 18097f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format); 18107f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format); 18117f756511SDominic Meiser delete (*cusparsestruct)->workVector; 18127f756511SDominic Meiser if (handle = (*cusparsestruct)->handle) { 1813c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroy(handle);CHKERRCUDA(stat); 18147f756511SDominic Meiser } 18157f756511SDominic Meiser delete *cusparsestruct; 18167f756511SDominic Meiser *cusparsestruct = 0; 18177f756511SDominic Meiser } 18187f756511SDominic Meiser PetscFunctionReturn(0); 18197f756511SDominic Meiser } 18207f756511SDominic Meiser 18217f756511SDominic Meiser #undef __FUNCT__ 18227f756511SDominic Meiser #define __FUNCT__ "CsrMatrix_Destroy" 18237f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) 18247f756511SDominic Meiser { 18257f756511SDominic Meiser PetscFunctionBegin; 18267f756511SDominic Meiser if (*mat) { 18277f756511SDominic Meiser delete (*mat)->values; 18287f756511SDominic Meiser delete (*mat)->column_indices; 18297f756511SDominic Meiser delete (*mat)->row_offsets; 18307f756511SDominic Meiser delete *mat; 18317f756511SDominic Meiser *mat = 0; 18327f756511SDominic Meiser } 18337f756511SDominic Meiser PetscFunctionReturn(0); 18347f756511SDominic Meiser } 18357f756511SDominic Meiser 18367f756511SDominic Meiser #undef __FUNCT__ 18377f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactorStruct_Destroy" 18387f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) 18397f756511SDominic Meiser { 18407f756511SDominic Meiser cusparseStatus_t stat; 18417f756511SDominic Meiser PetscErrorCode ierr; 18427f756511SDominic Meiser 18437f756511SDominic Meiser PetscFunctionBegin; 18447f756511SDominic Meiser if (*trifactor) { 1845c41cb2e2SAlejandro Lamas Daviña if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUDA(stat); } 1846c41cb2e2SAlejandro Lamas Daviña if ((*trifactor)->solveInfo) { stat = cusparseDestroySolveAnalysisInfo((*trifactor)->solveInfo);CHKERRCUDA(stat); } 18477f756511SDominic Meiser ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); 18487f756511SDominic Meiser delete *trifactor; 18497f756511SDominic Meiser *trifactor = 0; 18507f756511SDominic Meiser } 18517f756511SDominic Meiser PetscFunctionReturn(0); 18527f756511SDominic Meiser } 18537f756511SDominic Meiser 18547f756511SDominic Meiser #undef __FUNCT__ 18557f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSEMultStruct_Destroy" 18567f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) 18577f756511SDominic Meiser { 18587f756511SDominic Meiser CsrMatrix *mat; 18597f756511SDominic Meiser cusparseStatus_t stat; 18607f756511SDominic Meiser cudaError_t err; 18617f756511SDominic Meiser 18627f756511SDominic Meiser PetscFunctionBegin; 18637f756511SDominic Meiser if (*matstruct) { 18647f756511SDominic Meiser if ((*matstruct)->mat) { 18657f756511SDominic Meiser if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { 18667f756511SDominic Meiser cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; 1867c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroyHybMat(hybMat);CHKERRCUDA(stat); 18687f756511SDominic Meiser } else { 18697f756511SDominic Meiser mat = (CsrMatrix*)(*matstruct)->mat; 18707f756511SDominic Meiser CsrMatrix_Destroy(&mat); 18717f756511SDominic Meiser } 18727f756511SDominic Meiser } 1873c41cb2e2SAlejandro Lamas Daviña if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUDA(stat); } 18747f756511SDominic Meiser delete (*matstruct)->cprowIndices; 1875c41cb2e2SAlejandro Lamas Daviña if ((*matstruct)->alpha) { err=cudaFree((*matstruct)->alpha);CHKERRCUDA(err); } 1876c41cb2e2SAlejandro Lamas Daviña if ((*matstruct)->beta) { err=cudaFree((*matstruct)->beta);CHKERRCUDA(err); } 18777f756511SDominic Meiser delete *matstruct; 18787f756511SDominic Meiser *matstruct = 0; 18797f756511SDominic Meiser } 18807f756511SDominic Meiser PetscFunctionReturn(0); 18817f756511SDominic Meiser } 18827f756511SDominic Meiser 18837f756511SDominic Meiser #undef __FUNCT__ 18847f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactors_Destroy" 18857f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) 18867f756511SDominic Meiser { 18877f756511SDominic Meiser cusparseHandle_t handle; 18887f756511SDominic Meiser cusparseStatus_t stat; 18897f756511SDominic Meiser 18907f756511SDominic Meiser PetscFunctionBegin; 18917f756511SDominic Meiser if (*trifactors) { 18927f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtr); 18937f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtr); 18947f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose); 18957f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose); 18967f756511SDominic Meiser delete (*trifactors)->rpermIndices; 18977f756511SDominic Meiser delete (*trifactors)->cpermIndices; 18987f756511SDominic Meiser delete (*trifactors)->workVector; 18997f756511SDominic Meiser if (handle = (*trifactors)->handle) { 1900c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroy(handle);CHKERRCUDA(stat); 19017f756511SDominic Meiser } 19027f756511SDominic Meiser delete *trifactors; 19037f756511SDominic Meiser *trifactors = 0; 19047f756511SDominic Meiser } 19057f756511SDominic Meiser PetscFunctionReturn(0); 19067f756511SDominic Meiser } 19077f756511SDominic Meiser 1908