19ae82921SPaul Mullowney /* 29ae82921SPaul Mullowney Defines the basic matrix operations for the AIJ (compressed row) 3bc3f50f2SPaul Mullowney matrix storage format using the CUSPARSE library, 49ae82921SPaul Mullowney */ 5dced61a5SBarry Smith #define PETSC_SKIP_SPINLOCK 69ae82921SPaul Mullowney 73d13b8fdSMatthew G. Knepley #include <petscconf.h> 83d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 9087f3262SPaul Mullowney #include <../src/mat/impls/sbaij/seq/sbaij.h> 103d13b8fdSMatthew G. Knepley #include <../src/vec/vec/impls/dvecimpl.h> 11af0996ceSBarry Smith #include <petsc/private/vecimpl.h> 129ae82921SPaul Mullowney #undef VecType 133d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> 14bc3f50f2SPaul Mullowney 15e057df02SPaul Mullowney const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; 169ae82921SPaul Mullowney 17087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 18087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 19087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 20087f3262SPaul Mullowney 216fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 226fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 236fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 24087f3262SPaul Mullowney 256fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); 266fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 276fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 286fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 298c34d3f5SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptions *PetscOptionsObject,Mat); 306fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); 316fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 326fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 336fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 349ae82921SPaul Mullowney 357f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); 367f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); 377f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); 387f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); 397f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); 407f756511SDominic Meiser 419ae82921SPaul Mullowney #undef __FUNCT__ 42b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetStream" 43b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream) 44b06137fdSPaul Mullowney { 45b06137fdSPaul Mullowney cusparseStatus_t stat; 46b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 47b06137fdSPaul Mullowney 48b06137fdSPaul Mullowney PetscFunctionBegin; 49b06137fdSPaul Mullowney cusparsestruct->stream = stream; 50b06137fdSPaul Mullowney stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSP(stat); 51b06137fdSPaul Mullowney PetscFunctionReturn(0); 52b06137fdSPaul Mullowney } 53b06137fdSPaul Mullowney 54b06137fdSPaul Mullowney #undef __FUNCT__ 55b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetHandle" 56b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle) 57b06137fdSPaul Mullowney { 58b06137fdSPaul Mullowney cusparseStatus_t stat; 59b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 60b06137fdSPaul Mullowney 61b06137fdSPaul Mullowney PetscFunctionBegin; 62b06137fdSPaul Mullowney if (cusparsestruct->handle) 63b06137fdSPaul Mullowney stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUSP(stat); 64b06137fdSPaul Mullowney cusparsestruct->handle = handle; 65b06137fdSPaul Mullowney stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSP(stat); 66b06137fdSPaul Mullowney PetscFunctionReturn(0); 67b06137fdSPaul Mullowney } 68b06137fdSPaul Mullowney 69b06137fdSPaul Mullowney #undef __FUNCT__ 70b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSEClearHandle" 71b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSEClearHandle(Mat A) 72b06137fdSPaul Mullowney { 73b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 74b06137fdSPaul Mullowney PetscFunctionBegin; 75b06137fdSPaul Mullowney if (cusparsestruct->handle) 76b06137fdSPaul Mullowney cusparsestruct->handle = 0; 77b06137fdSPaul Mullowney PetscFunctionReturn(0); 78b06137fdSPaul Mullowney } 79b06137fdSPaul Mullowney 80b06137fdSPaul Mullowney #undef __FUNCT__ 819ae82921SPaul Mullowney #define __FUNCT__ "MatFactorGetSolverPackage_seqaij_cusparse" 829ae82921SPaul Mullowney PetscErrorCode MatFactorGetSolverPackage_seqaij_cusparse(Mat A,const MatSolverPackage *type) 839ae82921SPaul Mullowney { 849ae82921SPaul Mullowney PetscFunctionBegin; 859ae82921SPaul Mullowney *type = MATSOLVERCUSPARSE; 869ae82921SPaul Mullowney PetscFunctionReturn(0); 879ae82921SPaul Mullowney } 889ae82921SPaul Mullowney 89c708e6cdSJed Brown /*MC 90087f3262SPaul Mullowney MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices 91087f3262SPaul Mullowney on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported 92087f3262SPaul Mullowney algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer 93087f3262SPaul Mullowney performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the 94087f3262SPaul Mullowney CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these 95087f3262SPaul Mullowney algorithms are not recommended. This class does NOT support direct solver operations. 96c708e6cdSJed Brown 979ae82921SPaul Mullowney Level: beginner 98c708e6cdSJed Brown 99c708e6cdSJed Brown .seealso: PCFactorSetMatSolverPackage(), MatSolverPackage, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 100c708e6cdSJed Brown M*/ 1019ae82921SPaul Mullowney 1029ae82921SPaul Mullowney #undef __FUNCT__ 10342c9c57cSBarry Smith #define __FUNCT__ "MatGetFactor_seqaijcusparse_cusparse" 10442c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) 1059ae82921SPaul Mullowney { 1069ae82921SPaul Mullowney PetscErrorCode ierr; 107bc3f50f2SPaul Mullowney PetscInt n = A->rmap->n; 1089ae82921SPaul Mullowney 1099ae82921SPaul Mullowney PetscFunctionBegin; 110bc3f50f2SPaul Mullowney ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); 111404133a2SPaul Mullowney (*B)->factortype = ftype; 112bc3f50f2SPaul Mullowney ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); 1139ae82921SPaul Mullowney ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 1142205254eSKarl Rupp 115087f3262SPaul Mullowney if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { 11633d57670SJed Brown ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); 1179ae82921SPaul Mullowney (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; 1189ae82921SPaul Mullowney (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; 119087f3262SPaul Mullowney } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { 120087f3262SPaul Mullowney (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; 121087f3262SPaul Mullowney (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; 1229ae82921SPaul Mullowney } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); 123bc3f50f2SPaul Mullowney 124fa03d054SJed Brown ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); 12562a20339SJed Brown ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverPackage_C",MatFactorGetSolverPackage_seqaij_cusparse);CHKERRQ(ierr); 1269ae82921SPaul Mullowney PetscFunctionReturn(0); 1279ae82921SPaul Mullowney } 1289ae82921SPaul Mullowney 1299ae82921SPaul Mullowney #undef __FUNCT__ 130e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat_SeqAIJCUSPARSE" 131bc3f50f2SPaul Mullowney PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 132ca45077fSPaul Mullowney { 133aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1346e111a19SKarl Rupp 135ca45077fSPaul Mullowney PetscFunctionBegin; 1362692e278SPaul Mullowney #if CUDA_VERSION>=4020 137ca45077fSPaul Mullowney switch (op) { 138e057df02SPaul Mullowney case MAT_CUSPARSE_MULT: 139aa372e3fSPaul Mullowney cusparsestruct->format = format; 140ca45077fSPaul Mullowney break; 141e057df02SPaul Mullowney case MAT_CUSPARSE_ALL: 142aa372e3fSPaul Mullowney cusparsestruct->format = format; 143ca45077fSPaul Mullowney break; 144ca45077fSPaul Mullowney default: 14536d62e41SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); 146ca45077fSPaul Mullowney } 1472692e278SPaul Mullowney #else 148*6c4ed002SBarry Smith if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format require CUDA 4.2 or later."); 1492692e278SPaul Mullowney #endif 150ca45077fSPaul Mullowney PetscFunctionReturn(0); 151ca45077fSPaul Mullowney } 1529ae82921SPaul Mullowney 153e057df02SPaul Mullowney /*@ 154e057df02SPaul Mullowney MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular 155e057df02SPaul Mullowney operation. Only the MatMult operation can use different GPU storage formats 156aa372e3fSPaul Mullowney for MPIAIJCUSPARSE matrices. 157e057df02SPaul Mullowney Not Collective 158e057df02SPaul Mullowney 159e057df02SPaul Mullowney Input Parameters: 1608468deeeSKarl Rupp + A - Matrix of type SEQAIJCUSPARSE 16136d62e41SPaul Mullowney . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. 1622692e278SPaul Mullowney - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) 163e057df02SPaul Mullowney 164e057df02SPaul Mullowney Output Parameter: 165e057df02SPaul Mullowney 166e057df02SPaul Mullowney Level: intermediate 167e057df02SPaul Mullowney 1688468deeeSKarl Rupp .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 169e057df02SPaul Mullowney @*/ 170e057df02SPaul Mullowney #undef __FUNCT__ 171e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat" 172e057df02SPaul Mullowney PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 173e057df02SPaul Mullowney { 174e057df02SPaul Mullowney PetscErrorCode ierr; 1756e111a19SKarl Rupp 176e057df02SPaul Mullowney PetscFunctionBegin; 177e057df02SPaul Mullowney PetscValidHeaderSpecific(A, MAT_CLASSID,1); 178e057df02SPaul Mullowney ierr = PetscTryMethod(A, "MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); 179e057df02SPaul Mullowney PetscFunctionReturn(0); 180e057df02SPaul Mullowney } 181e057df02SPaul Mullowney 1829ae82921SPaul Mullowney #undef __FUNCT__ 1839ae82921SPaul Mullowney #define __FUNCT__ "MatSetFromOptions_SeqAIJCUSPARSE" 1848c34d3f5SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptions *PetscOptionsObject,Mat A) 1859ae82921SPaul Mullowney { 1869ae82921SPaul Mullowney PetscErrorCode ierr; 187e057df02SPaul Mullowney MatCUSPARSEStorageFormat format; 1889ae82921SPaul Mullowney PetscBool flg; 189a183c035SDominic Meiser Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1906e111a19SKarl Rupp 1919ae82921SPaul Mullowney PetscFunctionBegin; 192e55864a3SBarry Smith ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); 193e057df02SPaul Mullowney ierr = PetscObjectOptionsBegin((PetscObject)A); 1949ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 195e057df02SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", 196a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 197e057df02SPaul Mullowney if (flg) { 198e057df02SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr); 199045c96e1SPaul Mullowney } 2009ae82921SPaul Mullowney } 2014c87dfd4SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", 202a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 2034c87dfd4SPaul Mullowney if (flg) { 2044c87dfd4SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); 2054c87dfd4SPaul Mullowney } 2069ae82921SPaul Mullowney ierr = PetscOptionsEnd();CHKERRQ(ierr); 2079ae82921SPaul Mullowney PetscFunctionReturn(0); 2089ae82921SPaul Mullowney 2099ae82921SPaul Mullowney } 2109ae82921SPaul Mullowney 2119ae82921SPaul Mullowney #undef __FUNCT__ 2129ae82921SPaul Mullowney #define __FUNCT__ "MatILUFactorSymbolic_SeqAIJCUSPARSE" 2136fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2149ae82921SPaul Mullowney { 2159ae82921SPaul Mullowney PetscErrorCode ierr; 2169ae82921SPaul Mullowney 2179ae82921SPaul Mullowney PetscFunctionBegin; 2189ae82921SPaul Mullowney ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2199ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2209ae82921SPaul Mullowney PetscFunctionReturn(0); 2219ae82921SPaul Mullowney } 2229ae82921SPaul Mullowney 2239ae82921SPaul Mullowney #undef __FUNCT__ 2249ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorSymbolic_SeqAIJCUSPARSE" 2256fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2269ae82921SPaul Mullowney { 2279ae82921SPaul Mullowney PetscErrorCode ierr; 2289ae82921SPaul Mullowney 2299ae82921SPaul Mullowney PetscFunctionBegin; 2309ae82921SPaul Mullowney ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2319ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2329ae82921SPaul Mullowney PetscFunctionReturn(0); 2339ae82921SPaul Mullowney } 2349ae82921SPaul Mullowney 2359ae82921SPaul Mullowney #undef __FUNCT__ 236087f3262SPaul Mullowney #define __FUNCT__ "MatICCFactorSymbolic_SeqAIJCUSPARSE" 237087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 238087f3262SPaul Mullowney { 239087f3262SPaul Mullowney PetscErrorCode ierr; 240087f3262SPaul Mullowney 241087f3262SPaul Mullowney PetscFunctionBegin; 242087f3262SPaul Mullowney ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 243087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 244087f3262SPaul Mullowney PetscFunctionReturn(0); 245087f3262SPaul Mullowney } 246087f3262SPaul Mullowney 247087f3262SPaul Mullowney #undef __FUNCT__ 248087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorSymbolic_SeqAIJCUSPARSE" 249087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 250087f3262SPaul Mullowney { 251087f3262SPaul Mullowney PetscErrorCode ierr; 252087f3262SPaul Mullowney 253087f3262SPaul Mullowney PetscFunctionBegin; 254087f3262SPaul Mullowney ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 255087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 256087f3262SPaul Mullowney PetscFunctionReturn(0); 257087f3262SPaul Mullowney } 258087f3262SPaul Mullowney 259087f3262SPaul Mullowney #undef __FUNCT__ 260087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILULowerTriMatrix" 261087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) 2629ae82921SPaul Mullowney { 2639ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 2649ae82921SPaul Mullowney PetscInt n = A->rmap->n; 2659ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 266aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 2679ae82921SPaul Mullowney cusparseStatus_t stat; 2689ae82921SPaul Mullowney const PetscInt *ai = a->i,*aj = a->j,*vi; 2699ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 2709ae82921SPaul Mullowney PetscInt *AiLo, *AjLo; 2719ae82921SPaul Mullowney PetscScalar *AALo; 2729ae82921SPaul Mullowney PetscInt i,nz, nzLower, offset, rowOffset; 273b175d8bbSPaul Mullowney PetscErrorCode ierr; 2749ae82921SPaul Mullowney 2759ae82921SPaul Mullowney PetscFunctionBegin; 2769ae82921SPaul Mullowney if (A->valid_GPU_matrix == PETSC_CUSP_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUSP_CPU) { 2779ae82921SPaul Mullowney try { 2789ae82921SPaul Mullowney /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ 2799ae82921SPaul Mullowney nzLower=n+ai[n]-ai[1]; 2809ae82921SPaul Mullowney 2819ae82921SPaul Mullowney /* Allocate Space for the lower triangular matrix */ 2829ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUSP(ierr); 2839ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUSP(ierr); 2849ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUSP(ierr); 2859ae82921SPaul Mullowney 2869ae82921SPaul Mullowney /* Fill the lower triangular matrix */ 2879ae82921SPaul Mullowney AiLo[0] = (PetscInt) 0; 2889ae82921SPaul Mullowney AiLo[n] = nzLower; 2899ae82921SPaul Mullowney AjLo[0] = (PetscInt) 0; 2909ae82921SPaul Mullowney AALo[0] = (MatScalar) 1.0; 2919ae82921SPaul Mullowney v = aa; 2929ae82921SPaul Mullowney vi = aj; 2939ae82921SPaul Mullowney offset = 1; 2949ae82921SPaul Mullowney rowOffset= 1; 2959ae82921SPaul Mullowney for (i=1; i<n; i++) { 2969ae82921SPaul Mullowney nz = ai[i+1] - ai[i]; 297e057df02SPaul Mullowney /* additional 1 for the term on the diagonal */ 2989ae82921SPaul Mullowney AiLo[i] = rowOffset; 2999ae82921SPaul Mullowney rowOffset += nz+1; 3009ae82921SPaul Mullowney 3019ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjLo[offset]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 3029ae82921SPaul Mullowney ierr = PetscMemcpy(&(AALo[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 3039ae82921SPaul Mullowney 3049ae82921SPaul Mullowney offset += nz; 3059ae82921SPaul Mullowney AjLo[offset] = (PetscInt) i; 3069ae82921SPaul Mullowney AALo[offset] = (MatScalar) 1.0; 3079ae82921SPaul Mullowney offset += 1; 3089ae82921SPaul Mullowney 3099ae82921SPaul Mullowney v += nz; 3109ae82921SPaul Mullowney vi += nz; 3119ae82921SPaul Mullowney } 3122205254eSKarl Rupp 313aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 314aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 3152205254eSKarl Rupp 316aa372e3fSPaul Mullowney /* Create the matrix description */ 317aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSP(stat); 318aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 319aa372e3fSPaul Mullowney stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSP(stat); 320aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUSP(stat); 321aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSP(stat); 322aa372e3fSPaul Mullowney 323aa372e3fSPaul Mullowney /* Create the solve analysis information */ 324aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUSP(stat); 325aa372e3fSPaul Mullowney 326aa372e3fSPaul Mullowney /* set the operation */ 327aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 328aa372e3fSPaul Mullowney 329aa372e3fSPaul Mullowney /* set the matrix */ 330aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 331aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = n; 332aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = n; 333aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = nzLower; 334aa372e3fSPaul Mullowney 335aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 336aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); 337aa372e3fSPaul Mullowney 338aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); 339aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); 340aa372e3fSPaul Mullowney 341aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); 342aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); 343aa372e3fSPaul Mullowney 344aa372e3fSPaul Mullowney /* perform the solve analysis */ 345aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 346aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 347aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 348aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUSP(stat); 349aa372e3fSPaul Mullowney 350aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 351aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 3522205254eSKarl Rupp 3539ae82921SPaul Mullowney ierr = cudaFreeHost(AiLo);CHKERRCUSP(ierr); 3549ae82921SPaul Mullowney ierr = cudaFreeHost(AjLo);CHKERRCUSP(ierr); 3559ae82921SPaul Mullowney ierr = cudaFreeHost(AALo);CHKERRCUSP(ierr); 3569ae82921SPaul Mullowney } catch(char *ex) { 3579ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 3589ae82921SPaul Mullowney } 3599ae82921SPaul Mullowney } 3609ae82921SPaul Mullowney PetscFunctionReturn(0); 3619ae82921SPaul Mullowney } 3629ae82921SPaul Mullowney 3639ae82921SPaul Mullowney #undef __FUNCT__ 364087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILUUpperTriMatrix" 365087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) 3669ae82921SPaul Mullowney { 3679ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 3689ae82921SPaul Mullowney PetscInt n = A->rmap->n; 3699ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 370aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 3719ae82921SPaul Mullowney cusparseStatus_t stat; 3729ae82921SPaul Mullowney const PetscInt *aj = a->j,*adiag = a->diag,*vi; 3739ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 3749ae82921SPaul Mullowney PetscInt *AiUp, *AjUp; 3759ae82921SPaul Mullowney PetscScalar *AAUp; 3769ae82921SPaul Mullowney PetscInt i,nz, nzUpper, offset; 3779ae82921SPaul Mullowney PetscErrorCode ierr; 3789ae82921SPaul Mullowney 3799ae82921SPaul Mullowney PetscFunctionBegin; 3809ae82921SPaul Mullowney if (A->valid_GPU_matrix == PETSC_CUSP_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUSP_CPU) { 3819ae82921SPaul Mullowney try { 3829ae82921SPaul Mullowney /* next, figure out the number of nonzeros in the upper triangular matrix. */ 3839ae82921SPaul Mullowney nzUpper = adiag[0]-adiag[n]; 3849ae82921SPaul Mullowney 3859ae82921SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 3869ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUSP(ierr); 3879ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUSP(ierr); 3889ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUSP(ierr); 3899ae82921SPaul Mullowney 3909ae82921SPaul Mullowney /* Fill the upper triangular matrix */ 3919ae82921SPaul Mullowney AiUp[0]=(PetscInt) 0; 3929ae82921SPaul Mullowney AiUp[n]=nzUpper; 3939ae82921SPaul Mullowney offset = nzUpper; 3949ae82921SPaul Mullowney for (i=n-1; i>=0; i--) { 3959ae82921SPaul Mullowney v = aa + adiag[i+1] + 1; 3969ae82921SPaul Mullowney vi = aj + adiag[i+1] + 1; 3979ae82921SPaul Mullowney 398e057df02SPaul Mullowney /* number of elements NOT on the diagonal */ 3999ae82921SPaul Mullowney nz = adiag[i] - adiag[i+1]-1; 4009ae82921SPaul Mullowney 401e057df02SPaul Mullowney /* decrement the offset */ 4029ae82921SPaul Mullowney offset -= (nz+1); 4039ae82921SPaul Mullowney 404e057df02SPaul Mullowney /* first, set the diagonal elements */ 4059ae82921SPaul Mullowney AjUp[offset] = (PetscInt) i; 4069ae82921SPaul Mullowney AAUp[offset] = 1./v[nz]; 4079ae82921SPaul Mullowney AiUp[i] = AiUp[i+1] - (nz+1); 4089ae82921SPaul Mullowney 4099ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset+1]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 4109ae82921SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset+1]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 4119ae82921SPaul Mullowney } 4122205254eSKarl Rupp 413aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 414aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 4152205254eSKarl Rupp 416aa372e3fSPaul Mullowney /* Create the matrix description */ 417aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSP(stat); 418aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 419aa372e3fSPaul Mullowney stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSP(stat); 420aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSP(stat); 421aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSP(stat); 422aa372e3fSPaul Mullowney 423aa372e3fSPaul Mullowney /* Create the solve analysis information */ 424aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUSP(stat); 425aa372e3fSPaul Mullowney 426aa372e3fSPaul Mullowney /* set the operation */ 427aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 428aa372e3fSPaul Mullowney 429aa372e3fSPaul Mullowney /* set the matrix */ 430aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 431aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = n; 432aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = n; 433aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = nzUpper; 434aa372e3fSPaul Mullowney 435aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 436aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); 437aa372e3fSPaul Mullowney 438aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); 439aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); 440aa372e3fSPaul Mullowney 441aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); 442aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); 443aa372e3fSPaul Mullowney 444aa372e3fSPaul Mullowney /* perform the solve analysis */ 445aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 446aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 447aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 448aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUSP(stat); 449aa372e3fSPaul Mullowney 450aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 451aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 4522205254eSKarl Rupp 4539ae82921SPaul Mullowney ierr = cudaFreeHost(AiUp);CHKERRCUSP(ierr); 4549ae82921SPaul Mullowney ierr = cudaFreeHost(AjUp);CHKERRCUSP(ierr); 4559ae82921SPaul Mullowney ierr = cudaFreeHost(AAUp);CHKERRCUSP(ierr); 4569ae82921SPaul Mullowney } catch(char *ex) { 4579ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 4589ae82921SPaul Mullowney } 4599ae82921SPaul Mullowney } 4609ae82921SPaul Mullowney PetscFunctionReturn(0); 4619ae82921SPaul Mullowney } 4629ae82921SPaul Mullowney 4639ae82921SPaul Mullowney #undef __FUNCT__ 464087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU" 465087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) 4669ae82921SPaul Mullowney { 4679ae82921SPaul Mullowney PetscErrorCode ierr; 4689ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 4699ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 4709ae82921SPaul Mullowney IS isrow = a->row,iscol = a->icol; 4719ae82921SPaul Mullowney PetscBool row_identity,col_identity; 4729ae82921SPaul Mullowney const PetscInt *r,*c; 4739ae82921SPaul Mullowney PetscInt n = A->rmap->n; 4749ae82921SPaul Mullowney 4759ae82921SPaul Mullowney PetscFunctionBegin; 476087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); 477087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); 4782205254eSKarl Rupp 479aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 480aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 481aa372e3fSPaul Mullowney cusparseTriFactors->nnz=a->nz; 4829ae82921SPaul Mullowney 4839ae82921SPaul Mullowney A->valid_GPU_matrix = PETSC_CUSP_BOTH; 484e057df02SPaul Mullowney /*lower triangular indices */ 4859ae82921SPaul Mullowney ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); 4869ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 4872205254eSKarl Rupp if (!row_identity) { 488aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 489aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(r, r+n); 4902205254eSKarl Rupp } 4919ae82921SPaul Mullowney ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); 4929ae82921SPaul Mullowney 493e057df02SPaul Mullowney /*upper triangular indices */ 4949ae82921SPaul Mullowney ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); 4959ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 4962205254eSKarl Rupp if (!col_identity) { 497aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 498aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(c, c+n); 4992205254eSKarl Rupp } 5009ae82921SPaul Mullowney ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); 5019ae82921SPaul Mullowney PetscFunctionReturn(0); 5029ae82921SPaul Mullowney } 5039ae82921SPaul Mullowney 5049ae82921SPaul Mullowney #undef __FUNCT__ 505087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildICCTriMatrices" 506087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) 507087f3262SPaul Mullowney { 508087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 509087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 510aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 511aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 512087f3262SPaul Mullowney cusparseStatus_t stat; 513087f3262SPaul Mullowney PetscErrorCode ierr; 514087f3262SPaul Mullowney PetscInt *AiUp, *AjUp; 515087f3262SPaul Mullowney PetscScalar *AAUp; 516087f3262SPaul Mullowney PetscScalar *AALo; 517087f3262SPaul Mullowney PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; 518087f3262SPaul Mullowney Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; 519087f3262SPaul Mullowney const PetscInt *ai = b->i,*aj = b->j,*vj; 520087f3262SPaul Mullowney const MatScalar *aa = b->a,*v; 521087f3262SPaul Mullowney 522087f3262SPaul Mullowney PetscFunctionBegin; 523087f3262SPaul Mullowney if (A->valid_GPU_matrix == PETSC_CUSP_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUSP_CPU) { 524087f3262SPaul Mullowney try { 525087f3262SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 526087f3262SPaul Mullowney ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUSP(ierr); 527087f3262SPaul Mullowney ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUSP(ierr); 528087f3262SPaul Mullowney ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUSP(ierr); 529087f3262SPaul Mullowney ierr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUSP(ierr); 530087f3262SPaul Mullowney 531087f3262SPaul Mullowney /* Fill the upper triangular matrix */ 532087f3262SPaul Mullowney AiUp[0]=(PetscInt) 0; 533087f3262SPaul Mullowney AiUp[n]=nzUpper; 534087f3262SPaul Mullowney offset = 0; 535087f3262SPaul Mullowney for (i=0; i<n; i++) { 536087f3262SPaul Mullowney /* set the pointers */ 537087f3262SPaul Mullowney v = aa + ai[i]; 538087f3262SPaul Mullowney vj = aj + ai[i]; 539087f3262SPaul Mullowney nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ 540087f3262SPaul Mullowney 541087f3262SPaul Mullowney /* first, set the diagonal elements */ 542087f3262SPaul Mullowney AjUp[offset] = (PetscInt) i; 543087f3262SPaul Mullowney AAUp[offset] = 1.0/v[nz]; 544087f3262SPaul Mullowney AiUp[i] = offset; 545087f3262SPaul Mullowney AALo[offset] = 1.0/v[nz]; 546087f3262SPaul Mullowney 547087f3262SPaul Mullowney offset+=1; 548087f3262SPaul Mullowney if (nz>0) { 549087f3262SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset]), vj, nz*sizeof(PetscInt));CHKERRQ(ierr); 550087f3262SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 551087f3262SPaul Mullowney for (j=offset; j<offset+nz; j++) { 552087f3262SPaul Mullowney AAUp[j] = -AAUp[j]; 553087f3262SPaul Mullowney AALo[j] = AAUp[j]/v[nz]; 554087f3262SPaul Mullowney } 555087f3262SPaul Mullowney offset+=nz; 556087f3262SPaul Mullowney } 557087f3262SPaul Mullowney } 558087f3262SPaul Mullowney 559aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 560aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 561087f3262SPaul Mullowney 562aa372e3fSPaul Mullowney /* Create the matrix description */ 563aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSP(stat); 564aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 565aa372e3fSPaul Mullowney stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSP(stat); 566aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSP(stat); 567aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSP(stat); 568087f3262SPaul Mullowney 569aa372e3fSPaul Mullowney /* Create the solve analysis information */ 570aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUSP(stat); 571aa372e3fSPaul Mullowney 572aa372e3fSPaul Mullowney /* set the operation */ 573aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 574aa372e3fSPaul Mullowney 575aa372e3fSPaul Mullowney /* set the matrix */ 576aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 577aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = A->rmap->n; 578aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = A->cmap->n; 579aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = a->nz; 580aa372e3fSPaul Mullowney 581aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 582aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 583aa372e3fSPaul Mullowney 584aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 585aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 586aa372e3fSPaul Mullowney 587aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 588aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); 589aa372e3fSPaul Mullowney 590aa372e3fSPaul Mullowney /* perform the solve analysis */ 591aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 592aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 593aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 594aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUSP(stat); 595aa372e3fSPaul Mullowney 596aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 597aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 598aa372e3fSPaul Mullowney 599aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 600aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 601aa372e3fSPaul Mullowney 602aa372e3fSPaul Mullowney /* Create the matrix description */ 603aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSP(stat); 604aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 605aa372e3fSPaul Mullowney stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSP(stat); 606aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSP(stat); 607aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSP(stat); 608aa372e3fSPaul Mullowney 609aa372e3fSPaul Mullowney /* Create the solve analysis information */ 610aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUSP(stat); 611aa372e3fSPaul Mullowney 612aa372e3fSPaul Mullowney /* set the operation */ 613aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; 614aa372e3fSPaul Mullowney 615aa372e3fSPaul Mullowney /* set the matrix */ 616aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 617aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = A->rmap->n; 618aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = A->cmap->n; 619aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = a->nz; 620aa372e3fSPaul Mullowney 621aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 622aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 623aa372e3fSPaul Mullowney 624aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 625aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 626aa372e3fSPaul Mullowney 627aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 628aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); 629aa372e3fSPaul Mullowney 630aa372e3fSPaul Mullowney /* perform the solve analysis */ 631aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 632aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 633aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 634aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUSP(stat); 635aa372e3fSPaul Mullowney 636aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 637aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 638087f3262SPaul Mullowney 639087f3262SPaul Mullowney A->valid_GPU_matrix = PETSC_CUSP_BOTH; 640087f3262SPaul Mullowney ierr = cudaFreeHost(AiUp);CHKERRCUSP(ierr); 641087f3262SPaul Mullowney ierr = cudaFreeHost(AjUp);CHKERRCUSP(ierr); 642087f3262SPaul Mullowney ierr = cudaFreeHost(AAUp);CHKERRCUSP(ierr); 643087f3262SPaul Mullowney ierr = cudaFreeHost(AALo);CHKERRCUSP(ierr); 644087f3262SPaul Mullowney } catch(char *ex) { 645087f3262SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 646087f3262SPaul Mullowney } 647087f3262SPaul Mullowney } 648087f3262SPaul Mullowney PetscFunctionReturn(0); 649087f3262SPaul Mullowney } 650087f3262SPaul Mullowney 651087f3262SPaul Mullowney #undef __FUNCT__ 652087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU" 653087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) 6549ae82921SPaul Mullowney { 6559ae82921SPaul Mullowney PetscErrorCode ierr; 656087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 657087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 658087f3262SPaul Mullowney IS ip = a->row; 659087f3262SPaul Mullowney const PetscInt *rip; 660087f3262SPaul Mullowney PetscBool perm_identity; 661087f3262SPaul Mullowney PetscInt n = A->rmap->n; 662087f3262SPaul Mullowney 663087f3262SPaul Mullowney PetscFunctionBegin; 664087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); 665aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 666aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 667aa372e3fSPaul Mullowney cusparseTriFactors->nnz=(a->nz-n)*2 + n; 668aa372e3fSPaul Mullowney 669087f3262SPaul Mullowney /*lower triangular indices */ 670087f3262SPaul Mullowney ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); 671087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 672087f3262SPaul Mullowney if (!perm_identity) { 673aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 674aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(rip, rip+n); 675aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 676aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(rip, rip+n); 677087f3262SPaul Mullowney } 678087f3262SPaul Mullowney ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); 679087f3262SPaul Mullowney PetscFunctionReturn(0); 680087f3262SPaul Mullowney } 681087f3262SPaul Mullowney 682087f3262SPaul Mullowney #undef __FUNCT__ 6839ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorNumeric_SeqAIJCUSPARSE" 6846fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 6859ae82921SPaul Mullowney { 6869ae82921SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 6879ae82921SPaul Mullowney IS isrow = b->row,iscol = b->col; 6889ae82921SPaul Mullowney PetscBool row_identity,col_identity; 689b175d8bbSPaul Mullowney PetscErrorCode ierr; 6909ae82921SPaul Mullowney 6919ae82921SPaul Mullowney PetscFunctionBegin; 6929ae82921SPaul Mullowney ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 693e057df02SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 6949ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 6959ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 696bda325fcSPaul Mullowney if (row_identity && col_identity) { 697bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 698bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 699bda325fcSPaul Mullowney } else { 700bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 701bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 702bda325fcSPaul Mullowney } 7038dc1d2a3SPaul Mullowney 704e057df02SPaul Mullowney /* get the triangular factors */ 705087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 7069ae82921SPaul Mullowney PetscFunctionReturn(0); 7079ae82921SPaul Mullowney } 7089ae82921SPaul Mullowney 709087f3262SPaul Mullowney #undef __FUNCT__ 710087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorNumeric_SeqAIJCUSPARSE" 711087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 712087f3262SPaul Mullowney { 713087f3262SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 714087f3262SPaul Mullowney IS ip = b->row; 715087f3262SPaul Mullowney PetscBool perm_identity; 716b175d8bbSPaul Mullowney PetscErrorCode ierr; 717087f3262SPaul Mullowney 718087f3262SPaul Mullowney PetscFunctionBegin; 719087f3262SPaul Mullowney ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 720087f3262SPaul Mullowney 721087f3262SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 722087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 723087f3262SPaul Mullowney if (perm_identity) { 724087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 725087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 726087f3262SPaul Mullowney } else { 727087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 728087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 729087f3262SPaul Mullowney } 730087f3262SPaul Mullowney 731087f3262SPaul Mullowney /* get the triangular factors */ 732087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 733087f3262SPaul Mullowney PetscFunctionReturn(0); 734087f3262SPaul Mullowney } 7359ae82921SPaul Mullowney 736bda325fcSPaul Mullowney #undef __FUNCT__ 737bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEAnalyzeTransposeForSolve" 738b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) 739bda325fcSPaul Mullowney { 740bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 741aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 742aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 743aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 744aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 745bda325fcSPaul Mullowney cusparseStatus_t stat; 746aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 747aa372e3fSPaul Mullowney cusparseMatrixType_t matrixType; 748aa372e3fSPaul Mullowney cusparseFillMode_t fillMode; 749aa372e3fSPaul Mullowney cusparseDiagType_t diagType; 750b175d8bbSPaul Mullowney 751bda325fcSPaul Mullowney PetscFunctionBegin; 752bda325fcSPaul Mullowney 753aa372e3fSPaul Mullowney /*********************************************/ 754aa372e3fSPaul Mullowney /* Now the Transpose of the Lower Tri Factor */ 755aa372e3fSPaul Mullowney /*********************************************/ 756aa372e3fSPaul Mullowney 757aa372e3fSPaul Mullowney /* allocate space for the transpose of the lower triangular factor */ 758aa372e3fSPaul Mullowney loTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 759aa372e3fSPaul Mullowney 760aa372e3fSPaul Mullowney /* set the matrix descriptors of the lower triangular factor */ 761aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(loTriFactor->descr); 762aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(loTriFactor->descr); 763aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 764aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 765aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(loTriFactor->descr); 766aa372e3fSPaul Mullowney 767aa372e3fSPaul Mullowney /* Create the matrix description */ 768aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSP(stat); 769aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSP(stat); 770aa372e3fSPaul Mullowney stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSP(stat); 771aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSP(stat); 772aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSP(stat); 773aa372e3fSPaul Mullowney 774aa372e3fSPaul Mullowney /* Create the solve analysis information */ 775aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&loTriFactorT->solveInfo);CHKERRCUSP(stat); 776aa372e3fSPaul Mullowney 777aa372e3fSPaul Mullowney /* set the operation */ 778aa372e3fSPaul Mullowney loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 779aa372e3fSPaul Mullowney 780aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the lower triangular factor*/ 781aa372e3fSPaul Mullowney loTriFactorT->csrMat = new CsrMatrix; 782aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_rows; 783aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_cols; 784aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; 785aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactor->csrMat->num_rows+1); 786aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactor->csrMat->num_entries); 787aa372e3fSPaul Mullowney loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactor->csrMat->num_entries); 788aa372e3fSPaul Mullowney 789aa372e3fSPaul Mullowney /* compute the transpose of the lower triangular factor, i.e. the CSC */ 790aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, 791aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, 792aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 793aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 794aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 795aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 796aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 797aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 798aa372e3fSPaul Mullowney CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSP(stat); 799aa372e3fSPaul Mullowney 800aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 801aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, 802aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, 803aa372e3fSPaul Mullowney loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), 804aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), 805aa372e3fSPaul Mullowney loTriFactorT->solveInfo);CHKERRCUSP(stat); 806aa372e3fSPaul Mullowney 807aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 808aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; 809aa372e3fSPaul Mullowney 810aa372e3fSPaul Mullowney /*********************************************/ 811aa372e3fSPaul Mullowney /* Now the Transpose of the Upper Tri Factor */ 812aa372e3fSPaul Mullowney /*********************************************/ 813aa372e3fSPaul Mullowney 814aa372e3fSPaul Mullowney /* allocate space for the transpose of the upper triangular factor */ 815aa372e3fSPaul Mullowney upTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 816aa372e3fSPaul Mullowney 817aa372e3fSPaul Mullowney /* set the matrix descriptors of the upper triangular factor */ 818aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(upTriFactor->descr); 819aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(upTriFactor->descr); 820aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 821aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 822aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(upTriFactor->descr); 823aa372e3fSPaul Mullowney 824aa372e3fSPaul Mullowney /* Create the matrix description */ 825aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSP(stat); 826aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSP(stat); 827aa372e3fSPaul Mullowney stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSP(stat); 828aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSP(stat); 829aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSP(stat); 830aa372e3fSPaul Mullowney 831aa372e3fSPaul Mullowney /* Create the solve analysis information */ 832aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&upTriFactorT->solveInfo);CHKERRCUSP(stat); 833aa372e3fSPaul Mullowney 834aa372e3fSPaul Mullowney /* set the operation */ 835aa372e3fSPaul Mullowney upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 836aa372e3fSPaul Mullowney 837aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the upper triangular factor*/ 838aa372e3fSPaul Mullowney upTriFactorT->csrMat = new CsrMatrix; 839aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_rows; 840aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_cols; 841aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; 842aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactor->csrMat->num_rows+1); 843aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactor->csrMat->num_entries); 844aa372e3fSPaul Mullowney upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactor->csrMat->num_entries); 845aa372e3fSPaul Mullowney 846aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 847aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, 848aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, 849aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 850aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 851aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 852aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 853aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 854aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 855aa372e3fSPaul Mullowney CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSP(stat); 856aa372e3fSPaul Mullowney 857aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 858aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, 859aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, 860aa372e3fSPaul Mullowney upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), 861aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), 862aa372e3fSPaul Mullowney upTriFactorT->solveInfo);CHKERRCUSP(stat); 863aa372e3fSPaul Mullowney 864aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 865aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; 866bda325fcSPaul Mullowney PetscFunctionReturn(0); 867bda325fcSPaul Mullowney } 868bda325fcSPaul Mullowney 869bda325fcSPaul Mullowney #undef __FUNCT__ 870bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEGenerateTransposeForMult" 871b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEGenerateTransposeForMult(Mat A) 872bda325fcSPaul Mullowney { 873aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 874aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 875aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 876bda325fcSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 877bda325fcSPaul Mullowney cusparseStatus_t stat; 878aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 879b06137fdSPaul Mullowney cudaError_t err; 880b175d8bbSPaul Mullowney 881bda325fcSPaul Mullowney PetscFunctionBegin; 882aa372e3fSPaul Mullowney 883aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 884aa372e3fSPaul Mullowney matstructT = new Mat_SeqAIJCUSPARSEMultStruct; 885aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUSP(stat); 886aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 887aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSP(stat); 888aa372e3fSPaul Mullowney stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSP(stat); 889aa372e3fSPaul Mullowney 890b06137fdSPaul Mullowney /* set alpha and beta */ 891b06137fdSPaul Mullowney err = cudaMalloc((void **)&(matstructT->alpha),sizeof(PetscScalar));CHKERRCUSP(err); 892b06137fdSPaul Mullowney err = cudaMemcpy(matstructT->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUSP(err); 893b06137fdSPaul Mullowney err = cudaMalloc((void **)&(matstructT->beta),sizeof(PetscScalar));CHKERRCUSP(err); 894b06137fdSPaul Mullowney err = cudaMemcpy(matstructT->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUSP(err); 895b06137fdSPaul Mullowney stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSP(stat); 896b06137fdSPaul Mullowney 897aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 898aa372e3fSPaul Mullowney CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; 899aa372e3fSPaul Mullowney CsrMatrix *matrixT= new CsrMatrix; 900aa372e3fSPaul Mullowney matrixT->num_rows = A->rmap->n; 901aa372e3fSPaul Mullowney matrixT->num_cols = A->cmap->n; 902aa372e3fSPaul Mullowney matrixT->num_entries = a->nz; 903aa372e3fSPaul Mullowney matrixT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 904aa372e3fSPaul Mullowney matrixT->column_indices = new THRUSTINTARRAY32(a->nz); 905aa372e3fSPaul Mullowney matrixT->values = new THRUSTARRAY(a->nz); 906aa372e3fSPaul Mullowney 907aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 908aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 909aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, matrix->num_rows, 910aa372e3fSPaul Mullowney matrix->num_cols, matrix->num_entries, 911aa372e3fSPaul Mullowney matrix->values->data().get(), 912aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 913aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 914aa372e3fSPaul Mullowney matrixT->values->data().get(), 915aa372e3fSPaul Mullowney matrixT->column_indices->data().get(), 916aa372e3fSPaul Mullowney matrixT->row_offsets->data().get(), 917aa372e3fSPaul Mullowney CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSP(stat); 918aa372e3fSPaul Mullowney 919aa372e3fSPaul Mullowney /* assign the pointer */ 920aa372e3fSPaul Mullowney matstructT->mat = matrixT; 921aa372e3fSPaul Mullowney 922aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 9232692e278SPaul Mullowney #if CUDA_VERSION>=5000 924aa372e3fSPaul Mullowney /* First convert HYB to CSR */ 925aa372e3fSPaul Mullowney CsrMatrix *temp= new CsrMatrix; 926aa372e3fSPaul Mullowney temp->num_rows = A->rmap->n; 927aa372e3fSPaul Mullowney temp->num_cols = A->cmap->n; 928aa372e3fSPaul Mullowney temp->num_entries = a->nz; 929aa372e3fSPaul Mullowney temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 930aa372e3fSPaul Mullowney temp->column_indices = new THRUSTINTARRAY32(a->nz); 931aa372e3fSPaul Mullowney temp->values = new THRUSTARRAY(a->nz); 932aa372e3fSPaul Mullowney 9332692e278SPaul Mullowney 934aa372e3fSPaul Mullowney stat = cusparse_hyb2csr(cusparsestruct->handle, 935aa372e3fSPaul Mullowney matstruct->descr, (cusparseHybMat_t)matstruct->mat, 936aa372e3fSPaul Mullowney temp->values->data().get(), 937aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 938aa372e3fSPaul Mullowney temp->column_indices->data().get());CHKERRCUSP(stat); 939aa372e3fSPaul Mullowney 940aa372e3fSPaul Mullowney /* Next, convert CSR to CSC (i.e. the matrix transpose) */ 941aa372e3fSPaul Mullowney CsrMatrix *tempT= new CsrMatrix; 942aa372e3fSPaul Mullowney tempT->num_rows = A->rmap->n; 943aa372e3fSPaul Mullowney tempT->num_cols = A->cmap->n; 944aa372e3fSPaul Mullowney tempT->num_entries = a->nz; 945aa372e3fSPaul Mullowney tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 946aa372e3fSPaul Mullowney tempT->column_indices = new THRUSTINTARRAY32(a->nz); 947aa372e3fSPaul Mullowney tempT->values = new THRUSTARRAY(a->nz); 948aa372e3fSPaul Mullowney 949aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, 950aa372e3fSPaul Mullowney temp->num_cols, temp->num_entries, 951aa372e3fSPaul Mullowney temp->values->data().get(), 952aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 953aa372e3fSPaul Mullowney temp->column_indices->data().get(), 954aa372e3fSPaul Mullowney tempT->values->data().get(), 955aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 956aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 957aa372e3fSPaul Mullowney CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSP(stat); 958aa372e3fSPaul Mullowney 959aa372e3fSPaul Mullowney /* Last, convert CSC to HYB */ 960aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 961aa372e3fSPaul Mullowney stat = cusparseCreateHybMat(&hybMat);CHKERRCUSP(stat); 962aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 963aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 964aa372e3fSPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, 965aa372e3fSPaul Mullowney matstructT->descr, tempT->values->data().get(), 966aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 967aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 968aa372e3fSPaul Mullowney hybMat, 0, partition);CHKERRCUSP(stat); 969aa372e3fSPaul Mullowney 970aa372e3fSPaul Mullowney /* assign the pointer */ 971aa372e3fSPaul Mullowney matstructT->mat = hybMat; 972aa372e3fSPaul Mullowney 973aa372e3fSPaul Mullowney /* delete temporaries */ 974aa372e3fSPaul Mullowney if (tempT) { 975aa372e3fSPaul Mullowney if (tempT->values) delete (THRUSTARRAY*) tempT->values; 976aa372e3fSPaul Mullowney if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; 977aa372e3fSPaul Mullowney if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; 978aa372e3fSPaul Mullowney delete (CsrMatrix*) tempT; 979087f3262SPaul Mullowney } 980aa372e3fSPaul Mullowney if (temp) { 981aa372e3fSPaul Mullowney if (temp->values) delete (THRUSTARRAY*) temp->values; 982aa372e3fSPaul Mullowney if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; 983aa372e3fSPaul Mullowney if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; 984aa372e3fSPaul Mullowney delete (CsrMatrix*) temp; 985aa372e3fSPaul Mullowney } 9862692e278SPaul Mullowney #else 9872692e278SPaul Mullowney SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format for the Matrix Transpose (in MatMultTranspose) require CUDA 5.0 or later."); 9882692e278SPaul Mullowney #endif 989aa372e3fSPaul Mullowney } 990aa372e3fSPaul Mullowney /* assign the compressed row indices */ 991aa372e3fSPaul Mullowney matstructT->cprowIndices = new THRUSTINTARRAY; 992aa372e3fSPaul Mullowney 993aa372e3fSPaul Mullowney /* assign the pointer */ 994aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; 995bda325fcSPaul Mullowney PetscFunctionReturn(0); 996bda325fcSPaul Mullowney } 997bda325fcSPaul Mullowney 998bda325fcSPaul Mullowney #undef __FUNCT__ 999bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE" 10006fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 1001bda325fcSPaul Mullowney { 1002bda325fcSPaul Mullowney CUSPARRAY *xGPU, *bGPU; 1003bda325fcSPaul Mullowney cusparseStatus_t stat; 1004bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1005aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1006aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1007aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1008b175d8bbSPaul Mullowney PetscErrorCode ierr; 1009bda325fcSPaul Mullowney 1010bda325fcSPaul Mullowney PetscFunctionBegin; 1011aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1012aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1013bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1014aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1015aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1016bda325fcSPaul Mullowney } 1017bda325fcSPaul Mullowney 1018bda325fcSPaul Mullowney /* Get the GPU pointers */ 1019bda325fcSPaul Mullowney ierr = VecCUSPGetArrayWrite(xx,&xGPU);CHKERRQ(ierr); 1020bda325fcSPaul Mullowney ierr = VecCUSPGetArrayRead(bb,&bGPU);CHKERRQ(ierr); 1021bda325fcSPaul Mullowney 1022aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1023aa372e3fSPaul Mullowney thrust::copy(thrust::make_permutation_iterator(bGPU->begin(), cusparseTriFactors->rpermIndices->begin()), 1024aa372e3fSPaul Mullowney thrust::make_permutation_iterator(bGPU->end(), cusparseTriFactors->rpermIndices->end()), 1025aa372e3fSPaul Mullowney xGPU->begin()); 1026aa372e3fSPaul Mullowney 1027aa372e3fSPaul Mullowney /* First, solve U */ 1028aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1029aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1030aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1031aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1032aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1033aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1034aa372e3fSPaul Mullowney xGPU->data().get(), tempGPU->data().get());CHKERRCUSP(stat); 1035aa372e3fSPaul Mullowney 1036aa372e3fSPaul Mullowney /* Then, solve L */ 1037aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1038aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1039aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1040aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1041aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1042aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1043aa372e3fSPaul Mullowney tempGPU->data().get(), xGPU->data().get());CHKERRCUSP(stat); 1044aa372e3fSPaul Mullowney 1045aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1046aa372e3fSPaul Mullowney thrust::copy(thrust::make_permutation_iterator(xGPU->begin(), cusparseTriFactors->cpermIndices->begin()), 1047aa372e3fSPaul Mullowney thrust::make_permutation_iterator(xGPU->end(), cusparseTriFactors->cpermIndices->end()), 1048aa372e3fSPaul Mullowney tempGPU->begin()); 1049aa372e3fSPaul Mullowney 1050aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1051aa372e3fSPaul Mullowney thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU->begin()); 1052bda325fcSPaul Mullowney 1053bda325fcSPaul Mullowney /* restore */ 1054bda325fcSPaul Mullowney ierr = VecCUSPRestoreArrayRead(bb,&bGPU);CHKERRQ(ierr); 1055bda325fcSPaul Mullowney ierr = VecCUSPRestoreArrayWrite(xx,&xGPU);CHKERRQ(ierr); 1056bda325fcSPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1057087f3262SPaul Mullowney 1058aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1059bda325fcSPaul Mullowney PetscFunctionReturn(0); 1060bda325fcSPaul Mullowney } 1061bda325fcSPaul Mullowney 1062bda325fcSPaul Mullowney #undef __FUNCT__ 1063bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering" 10646fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 1065bda325fcSPaul Mullowney { 1066bda325fcSPaul Mullowney CUSPARRAY *xGPU,*bGPU; 1067bda325fcSPaul Mullowney cusparseStatus_t stat; 1068bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1069aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1070aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1071aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1072b175d8bbSPaul Mullowney PetscErrorCode ierr; 1073bda325fcSPaul Mullowney 1074bda325fcSPaul Mullowney PetscFunctionBegin; 1075aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1076aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1077bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1078aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1079aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1080bda325fcSPaul Mullowney } 1081bda325fcSPaul Mullowney 1082bda325fcSPaul Mullowney /* Get the GPU pointers */ 1083bda325fcSPaul Mullowney ierr = VecCUSPGetArrayWrite(xx,&xGPU);CHKERRQ(ierr); 1084bda325fcSPaul Mullowney ierr = VecCUSPGetArrayRead(bb,&bGPU);CHKERRQ(ierr); 1085bda325fcSPaul Mullowney 1086aa372e3fSPaul Mullowney /* First, solve U */ 1087aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1088aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1089aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1090aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1091aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1092aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1093aa372e3fSPaul Mullowney bGPU->data().get(), tempGPU->data().get());CHKERRCUSP(stat); 1094aa372e3fSPaul Mullowney 1095aa372e3fSPaul Mullowney /* Then, solve L */ 1096aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1097aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1098aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1099aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1100aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1101aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1102aa372e3fSPaul Mullowney tempGPU->data().get(), xGPU->data().get());CHKERRCUSP(stat); 1103bda325fcSPaul Mullowney 1104bda325fcSPaul Mullowney /* restore */ 1105bda325fcSPaul Mullowney ierr = VecCUSPRestoreArrayRead(bb,&bGPU);CHKERRQ(ierr); 1106bda325fcSPaul Mullowney ierr = VecCUSPRestoreArrayWrite(xx,&xGPU);CHKERRQ(ierr); 1107bda325fcSPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1108aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1109bda325fcSPaul Mullowney PetscFunctionReturn(0); 1110bda325fcSPaul Mullowney } 1111bda325fcSPaul Mullowney 11129ae82921SPaul Mullowney #undef __FUNCT__ 11139ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE" 11146fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 11159ae82921SPaul Mullowney { 1116bda325fcSPaul Mullowney CUSPARRAY *xGPU,*bGPU; 11179ae82921SPaul Mullowney cusparseStatus_t stat; 11189ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1119aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1120aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1121aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1122b175d8bbSPaul Mullowney PetscErrorCode ierr; 1123ebc8f436SDominic Meiser VecType t; 1124ebc8f436SDominic Meiser PetscBool flg; 11259ae82921SPaul Mullowney 11269ae82921SPaul Mullowney PetscFunctionBegin; 1127ebc8f436SDominic Meiser ierr = VecGetType(bb,&t);CHKERRQ(ierr); 1128ebc8f436SDominic Meiser ierr = PetscStrcmp(t,VECSEQCUSP,&flg);CHKERRQ(ierr); 1129ebc8f436SDominic Meiser if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #2). Can only deal with %s\n.",t,VECSEQCUSP); 1130ebc8f436SDominic Meiser ierr = VecGetType(xx,&t);CHKERRQ(ierr); 1131ebc8f436SDominic Meiser ierr = PetscStrcmp(t,VECSEQCUSP,&flg);CHKERRQ(ierr); 1132ebc8f436SDominic Meiser if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #3). Can only deal with %s\n.",t,VECSEQCUSP); 1133ebc8f436SDominic Meiser 1134e057df02SPaul Mullowney /* Get the GPU pointers */ 11359ae82921SPaul Mullowney ierr = VecCUSPGetArrayWrite(xx,&xGPU);CHKERRQ(ierr); 11369ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(bb,&bGPU);CHKERRQ(ierr); 11379ae82921SPaul Mullowney 1138aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1139aa372e3fSPaul Mullowney thrust::copy(thrust::make_permutation_iterator(bGPU->begin(), cusparseTriFactors->rpermIndices->begin()), 1140aa372e3fSPaul Mullowney thrust::make_permutation_iterator(bGPU->end(), cusparseTriFactors->rpermIndices->end()), 1141aa372e3fSPaul Mullowney xGPU->begin()); 1142aa372e3fSPaul Mullowney 1143aa372e3fSPaul Mullowney /* Next, solve L */ 1144aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1145aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1146aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1147aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1148aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1149aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1150aa372e3fSPaul Mullowney xGPU->data().get(), tempGPU->data().get());CHKERRCUSP(stat); 1151aa372e3fSPaul Mullowney 1152aa372e3fSPaul Mullowney /* Then, solve U */ 1153aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1154aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1155aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1156aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1157aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1158aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1159aa372e3fSPaul Mullowney tempGPU->data().get(), xGPU->data().get());CHKERRCUSP(stat); 1160aa372e3fSPaul Mullowney 1161aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1162aa372e3fSPaul Mullowney thrust::copy(thrust::make_permutation_iterator(xGPU->begin(), cusparseTriFactors->cpermIndices->begin()), 1163aa372e3fSPaul Mullowney thrust::make_permutation_iterator(xGPU->end(), cusparseTriFactors->cpermIndices->end()), 1164aa372e3fSPaul Mullowney tempGPU->begin()); 1165aa372e3fSPaul Mullowney 1166aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1167aa372e3fSPaul Mullowney thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU->begin()); 11689ae82921SPaul Mullowney 11699ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(bb,&bGPU);CHKERRQ(ierr); 11709ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayWrite(xx,&xGPU);CHKERRQ(ierr); 11719ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1172aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 11739ae82921SPaul Mullowney PetscFunctionReturn(0); 11749ae82921SPaul Mullowney } 11759ae82921SPaul Mullowney 11769ae82921SPaul Mullowney #undef __FUNCT__ 11779ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE_NaturalOrdering" 11786fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 11799ae82921SPaul Mullowney { 1180bda325fcSPaul Mullowney CUSPARRAY *xGPU,*bGPU; 11819ae82921SPaul Mullowney cusparseStatus_t stat; 11829ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1183aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1184aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1185aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1186b175d8bbSPaul Mullowney PetscErrorCode ierr; 11879ae82921SPaul Mullowney 11889ae82921SPaul Mullowney PetscFunctionBegin; 1189e057df02SPaul Mullowney /* Get the GPU pointers */ 11909ae82921SPaul Mullowney ierr = VecCUSPGetArrayWrite(xx,&xGPU);CHKERRQ(ierr); 11919ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(bb,&bGPU);CHKERRQ(ierr); 11929ae82921SPaul Mullowney 1193aa372e3fSPaul Mullowney /* First, solve L */ 1194aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1195aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1196aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1197aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1198aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1199aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1200aa372e3fSPaul Mullowney bGPU->data().get(), tempGPU->data().get());CHKERRCUSP(stat); 1201aa372e3fSPaul Mullowney 1202aa372e3fSPaul Mullowney /* Next, solve U */ 1203aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1204aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1205aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1206aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1207aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1208aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1209aa372e3fSPaul Mullowney tempGPU->data().get(), xGPU->data().get());CHKERRCUSP(stat); 12109ae82921SPaul Mullowney 12119ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(bb,&bGPU);CHKERRQ(ierr); 12129ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayWrite(xx,&xGPU);CHKERRQ(ierr); 12139ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1214aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 12159ae82921SPaul Mullowney PetscFunctionReturn(0); 12169ae82921SPaul Mullowney } 12179ae82921SPaul Mullowney 12189ae82921SPaul Mullowney #undef __FUNCT__ 1219e057df02SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSECopyToGPU" 12206fa9248bSJed Brown static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) 12219ae82921SPaul Mullowney { 12229ae82921SPaul Mullowney 1223aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1224aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 12259ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 12269ae82921SPaul Mullowney PetscInt m = A->rmap->n,*ii,*ridx; 12279ae82921SPaul Mullowney PetscErrorCode ierr; 1228aa372e3fSPaul Mullowney cusparseStatus_t stat; 1229b06137fdSPaul Mullowney cudaError_t err; 12309ae82921SPaul Mullowney 12319ae82921SPaul Mullowney PetscFunctionBegin; 12329ae82921SPaul Mullowney if (A->valid_GPU_matrix == PETSC_CUSP_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUSP_CPU) { 12339ae82921SPaul Mullowney ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 1234ce814652SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&matstruct,cusparsestruct->format); 12359ae82921SPaul Mullowney try { 1236aa372e3fSPaul Mullowney cusparsestruct->nonzerorow=0; 1237aa372e3fSPaul Mullowney for (int j = 0; j<m; j++) cusparsestruct->nonzerorow += ((a->i[j+1]-a->i[j])>0); 12389ae82921SPaul Mullowney 12399ae82921SPaul Mullowney if (a->compressedrow.use) { 12409ae82921SPaul Mullowney m = a->compressedrow.nrows; 12419ae82921SPaul Mullowney ii = a->compressedrow.i; 12429ae82921SPaul Mullowney ridx = a->compressedrow.rindex; 12439ae82921SPaul Mullowney } else { 1244b06137fdSPaul Mullowney /* Forcing compressed row on the GPU */ 12459ae82921SPaul Mullowney int k=0; 1246854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow+1, &ii);CHKERRQ(ierr); 1247854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow, &ridx);CHKERRQ(ierr); 12489ae82921SPaul Mullowney ii[0]=0; 12499ae82921SPaul Mullowney for (int j = 0; j<m; j++) { 12509ae82921SPaul Mullowney if ((a->i[j+1]-a->i[j])>0) { 12519ae82921SPaul Mullowney ii[k] = a->i[j]; 12529ae82921SPaul Mullowney ridx[k]= j; 12539ae82921SPaul Mullowney k++; 12549ae82921SPaul Mullowney } 12559ae82921SPaul Mullowney } 1256aa372e3fSPaul Mullowney ii[cusparsestruct->nonzerorow] = a->nz; 1257aa372e3fSPaul Mullowney m = cusparsestruct->nonzerorow; 12589ae82921SPaul Mullowney } 12599ae82921SPaul Mullowney 1260aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 1261aa372e3fSPaul Mullowney matstruct = new Mat_SeqAIJCUSPARSEMultStruct; 1262aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUSP(stat); 1263aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 1264aa372e3fSPaul Mullowney stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSP(stat); 12659ae82921SPaul Mullowney 1266b06137fdSPaul Mullowney err = cudaMalloc((void **)&(matstruct->alpha),sizeof(PetscScalar));CHKERRCUSP(err); 1267b06137fdSPaul Mullowney err = cudaMemcpy(matstruct->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUSP(err); 1268b06137fdSPaul Mullowney err = cudaMalloc((void **)&(matstruct->beta),sizeof(PetscScalar));CHKERRCUSP(err); 1269b06137fdSPaul Mullowney err = cudaMemcpy(matstruct->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUSP(err); 1270b06137fdSPaul Mullowney stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSP(stat); 1271b06137fdSPaul Mullowney 1272aa372e3fSPaul Mullowney /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ 1273aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1274aa372e3fSPaul Mullowney /* set the matrix */ 1275aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1276a65300a6SPaul Mullowney matrix->num_rows = m; 1277aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1278aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1279a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1280a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 12819ae82921SPaul Mullowney 1282aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1283aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1284aa372e3fSPaul Mullowney 1285aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1286aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1287aa372e3fSPaul Mullowney 1288aa372e3fSPaul Mullowney /* assign the pointer */ 1289aa372e3fSPaul Mullowney matstruct->mat = matrix; 1290aa372e3fSPaul Mullowney 1291aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 12922692e278SPaul Mullowney #if CUDA_VERSION>=4020 1293aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1294a65300a6SPaul Mullowney matrix->num_rows = m; 1295aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1296aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1297a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1298a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 1299aa372e3fSPaul Mullowney 1300aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1301aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1302aa372e3fSPaul Mullowney 1303aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1304aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1305aa372e3fSPaul Mullowney 1306aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 1307aa372e3fSPaul Mullowney stat = cusparseCreateHybMat(&hybMat);CHKERRCUSP(stat); 1308aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 1309aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 1310a65300a6SPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, matrix->num_rows, matrix->num_cols, 1311aa372e3fSPaul Mullowney matstruct->descr, matrix->values->data().get(), 1312aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 1313aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 1314aa372e3fSPaul Mullowney hybMat, 0, partition);CHKERRCUSP(stat); 1315aa372e3fSPaul Mullowney /* assign the pointer */ 1316aa372e3fSPaul Mullowney matstruct->mat = hybMat; 1317aa372e3fSPaul Mullowney 1318aa372e3fSPaul Mullowney if (matrix) { 1319aa372e3fSPaul Mullowney if (matrix->values) delete (THRUSTARRAY*)matrix->values; 1320aa372e3fSPaul Mullowney if (matrix->column_indices) delete (THRUSTINTARRAY32*)matrix->column_indices; 1321aa372e3fSPaul Mullowney if (matrix->row_offsets) delete (THRUSTINTARRAY32*)matrix->row_offsets; 1322aa372e3fSPaul Mullowney delete (CsrMatrix*)matrix; 1323087f3262SPaul Mullowney } 13242692e278SPaul Mullowney #endif 1325087f3262SPaul Mullowney } 1326ca45077fSPaul Mullowney 1327aa372e3fSPaul Mullowney /* assign the compressed row indices */ 1328aa372e3fSPaul Mullowney matstruct->cprowIndices = new THRUSTINTARRAY(m); 1329aa372e3fSPaul Mullowney matstruct->cprowIndices->assign(ridx,ridx+m); 1330aa372e3fSPaul Mullowney 1331aa372e3fSPaul Mullowney /* assign the pointer */ 1332aa372e3fSPaul Mullowney cusparsestruct->mat = matstruct; 1333aa372e3fSPaul Mullowney 13349ae82921SPaul Mullowney if (!a->compressedrow.use) { 13359ae82921SPaul Mullowney ierr = PetscFree(ii);CHKERRQ(ierr); 13369ae82921SPaul Mullowney ierr = PetscFree(ridx);CHKERRQ(ierr); 13379ae82921SPaul Mullowney } 1338aa372e3fSPaul Mullowney cusparsestruct->workVector = new THRUSTARRAY; 1339aa372e3fSPaul Mullowney cusparsestruct->workVector->resize(m); 13409ae82921SPaul Mullowney } catch(char *ex) { 13419ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 13429ae82921SPaul Mullowney } 13439ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 13442205254eSKarl Rupp 13459ae82921SPaul Mullowney A->valid_GPU_matrix = PETSC_CUSP_BOTH; 13462205254eSKarl Rupp 13479ae82921SPaul Mullowney ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 13489ae82921SPaul Mullowney } 13499ae82921SPaul Mullowney PetscFunctionReturn(0); 13509ae82921SPaul Mullowney } 13519ae82921SPaul Mullowney 13529ae82921SPaul Mullowney #undef __FUNCT__ 13532a7a6963SBarry Smith #define __FUNCT__ "MatCreateVecs_SeqAIJCUSPARSE" 13542a7a6963SBarry Smith static PetscErrorCode MatCreateVecs_SeqAIJCUSPARSE(Mat mat, Vec *right, Vec *left) 13559ae82921SPaul Mullowney { 13569ae82921SPaul Mullowney PetscErrorCode ierr; 135733d57670SJed Brown PetscInt rbs,cbs; 13589ae82921SPaul Mullowney 13599ae82921SPaul Mullowney PetscFunctionBegin; 136033d57670SJed Brown ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr); 13619ae82921SPaul Mullowney if (right) { 1362ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr); 13639ae82921SPaul Mullowney ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 136433d57670SJed Brown ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr); 13659ae82921SPaul Mullowney ierr = VecSetType(*right,VECSEQCUSP);CHKERRQ(ierr); 13669ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->cmap,&(*right)->map);CHKERRQ(ierr); 13679ae82921SPaul Mullowney } 13689ae82921SPaul Mullowney if (left) { 1369ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr); 13709ae82921SPaul Mullowney ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 137133d57670SJed Brown ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr); 13729ae82921SPaul Mullowney ierr = VecSetType(*left,VECSEQCUSP);CHKERRQ(ierr); 13739ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->rmap,&(*left)->map);CHKERRQ(ierr); 13749ae82921SPaul Mullowney } 13759ae82921SPaul Mullowney PetscFunctionReturn(0); 13769ae82921SPaul Mullowney } 13779ae82921SPaul Mullowney 1378aa372e3fSPaul Mullowney struct VecCUSPPlusEquals 1379aa372e3fSPaul Mullowney { 1380aa372e3fSPaul Mullowney template <typename Tuple> 1381aa372e3fSPaul Mullowney __host__ __device__ 1382aa372e3fSPaul Mullowney void operator()(Tuple t) 1383aa372e3fSPaul Mullowney { 1384aa372e3fSPaul Mullowney thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); 1385aa372e3fSPaul Mullowney } 1386aa372e3fSPaul Mullowney }; 1387aa372e3fSPaul Mullowney 13889ae82921SPaul Mullowney #undef __FUNCT__ 13899ae82921SPaul Mullowney #define __FUNCT__ "MatMult_SeqAIJCUSPARSE" 13906fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 13919ae82921SPaul Mullowney { 13929ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1393aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1394aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1395bda325fcSPaul Mullowney CUSPARRAY *xarray,*yarray; 1396b175d8bbSPaul Mullowney PetscErrorCode ierr; 1397aa372e3fSPaul Mullowney cusparseStatus_t stat; 13989ae82921SPaul Mullowney 13999ae82921SPaul Mullowney PetscFunctionBegin; 1400e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1401e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 14029ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(xx,&xarray);CHKERRQ(ierr); 14039ae82921SPaul Mullowney ierr = VecCUSPGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1404aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1405aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1406aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1407aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, mat->num_entries, 1408b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), 1409b06137fdSPaul Mullowney mat->column_indices->data().get(), xarray->data().get(), matstruct->beta, 1410aa372e3fSPaul Mullowney yarray->data().get());CHKERRCUSP(stat); 1411aa372e3fSPaul Mullowney } else { 14122692e278SPaul Mullowney #if CUDA_VERSION>=4020 1413aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1414aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1415b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1416b06137fdSPaul Mullowney xarray->data().get(), matstruct->beta, 1417aa372e3fSPaul Mullowney yarray->data().get());CHKERRCUSP(stat); 14182692e278SPaul Mullowney #endif 14199ae82921SPaul Mullowney } 14209ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 14219ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1422aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 14239ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1424ca45077fSPaul Mullowney } 1425aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 14269ae82921SPaul Mullowney PetscFunctionReturn(0); 14279ae82921SPaul Mullowney } 14289ae82921SPaul Mullowney 14299ae82921SPaul Mullowney #undef __FUNCT__ 1430ca45077fSPaul Mullowney #define __FUNCT__ "MatMultTranspose_SeqAIJCUSPARSE" 14316fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 1432ca45077fSPaul Mullowney { 1433ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1434aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1435aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1436bda325fcSPaul Mullowney CUSPARRAY *xarray,*yarray; 1437b175d8bbSPaul Mullowney PetscErrorCode ierr; 1438aa372e3fSPaul Mullowney cusparseStatus_t stat; 1439ca45077fSPaul Mullowney 1440ca45077fSPaul Mullowney PetscFunctionBegin; 1441e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1442e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1443aa372e3fSPaul Mullowney if (!matstructT) { 1444bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1445aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1446bda325fcSPaul Mullowney } 1447ca45077fSPaul Mullowney ierr = VecCUSPGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1448ca45077fSPaul Mullowney ierr = VecCUSPGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1449aa372e3fSPaul Mullowney 1450aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1451aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1452aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1453aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, 1454b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1455aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1456b06137fdSPaul Mullowney mat->column_indices->data().get(), xarray->data().get(), matstructT->beta, 1457aa372e3fSPaul Mullowney yarray->data().get());CHKERRCUSP(stat); 1458aa372e3fSPaul Mullowney } else { 14592692e278SPaul Mullowney #if CUDA_VERSION>=4020 1460aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1461aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1462b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1463b06137fdSPaul Mullowney xarray->data().get(), matstructT->beta, 1464aa372e3fSPaul Mullowney yarray->data().get());CHKERRCUSP(stat); 14652692e278SPaul Mullowney #endif 1466ca45077fSPaul Mullowney } 1467ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1468ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1469aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 1470ca45077fSPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1471ca45077fSPaul Mullowney } 1472aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 1473ca45077fSPaul Mullowney PetscFunctionReturn(0); 1474ca45077fSPaul Mullowney } 1475ca45077fSPaul Mullowney 1476aa372e3fSPaul Mullowney 1477ca45077fSPaul Mullowney #undef __FUNCT__ 14789ae82921SPaul Mullowney #define __FUNCT__ "MatMultAdd_SeqAIJCUSPARSE" 14796fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 14809ae82921SPaul Mullowney { 14819ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1482aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1483aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1484bda325fcSPaul Mullowney CUSPARRAY *xarray,*yarray,*zarray; 1485b175d8bbSPaul Mullowney PetscErrorCode ierr; 1486aa372e3fSPaul Mullowney cusparseStatus_t stat; 14876e111a19SKarl Rupp 14889ae82921SPaul Mullowney PetscFunctionBegin; 1489e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1490e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 14919ae82921SPaul Mullowney try { 14929ae82921SPaul Mullowney ierr = VecCopy_SeqCUSP(yy,zz);CHKERRQ(ierr); 14939ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(xx,&xarray);CHKERRQ(ierr); 14949ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(yy,&yarray);CHKERRQ(ierr); 14959ae82921SPaul Mullowney ierr = VecCUSPGetArrayWrite(zz,&zarray);CHKERRQ(ierr); 14969ae82921SPaul Mullowney 1497e057df02SPaul Mullowney /* multiply add */ 1498aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1499aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1500b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1501b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1502b06137fdSPaul Mullowney size of the workVector */ 1503aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1504a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1505b06137fdSPaul Mullowney mat->num_entries, matstruct->alpha, matstruct->descr, 1506aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1507b06137fdSPaul Mullowney mat->column_indices->data().get(), xarray->data().get(), matstruct->beta, 1508aa372e3fSPaul Mullowney cusparsestruct->workVector->data().get());CHKERRCUSP(stat); 1509aa372e3fSPaul Mullowney } else { 15102692e278SPaul Mullowney #if CUDA_VERSION>=4020 1511aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1512a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1513aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1514b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1515b06137fdSPaul Mullowney xarray->data().get(), matstruct->beta, 1516aa372e3fSPaul Mullowney cusparsestruct->workVector->data().get());CHKERRCUSP(stat); 1517a65300a6SPaul Mullowney } 15182692e278SPaul Mullowney #endif 1519aa372e3fSPaul Mullowney } 1520aa372e3fSPaul Mullowney 1521aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1522aa372e3fSPaul Mullowney thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zarray->begin(), matstruct->cprowIndices->begin()))), 1523aa372e3fSPaul Mullowney thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zarray->begin(), matstruct->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1524aa372e3fSPaul Mullowney VecCUSPPlusEquals()); 15259ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 15269ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(yy,&yarray);CHKERRQ(ierr); 15279ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); 15289ae82921SPaul Mullowney 15299ae82921SPaul Mullowney } catch(char *ex) { 15309ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 15319ae82921SPaul Mullowney } 15329ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 15339ae82921SPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 15349ae82921SPaul Mullowney PetscFunctionReturn(0); 15359ae82921SPaul Mullowney } 15369ae82921SPaul Mullowney 15379ae82921SPaul Mullowney #undef __FUNCT__ 1538b175d8bbSPaul Mullowney #define __FUNCT__ "MatMultTransposeAdd_SeqAIJCUSPARSE" 15396fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 1540ca45077fSPaul Mullowney { 1541ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1542aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1543aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1544ca45077fSPaul Mullowney CUSPARRAY *xarray,*yarray,*zarray; 1545b175d8bbSPaul Mullowney PetscErrorCode ierr; 1546aa372e3fSPaul Mullowney cusparseStatus_t stat; 15476e111a19SKarl Rupp 1548ca45077fSPaul Mullowney PetscFunctionBegin; 1549e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1550e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1551aa372e3fSPaul Mullowney if (!matstructT) { 1552bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1553aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1554bda325fcSPaul Mullowney } 1555aa372e3fSPaul Mullowney 1556ca45077fSPaul Mullowney try { 1557ca45077fSPaul Mullowney ierr = VecCopy_SeqCUSP(yy,zz);CHKERRQ(ierr); 1558ca45077fSPaul Mullowney ierr = VecCUSPGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1559ca45077fSPaul Mullowney ierr = VecCUSPGetArrayRead(yy,&yarray);CHKERRQ(ierr); 1560ca45077fSPaul Mullowney ierr = VecCUSPGetArrayWrite(zz,&zarray);CHKERRQ(ierr); 1561ca45077fSPaul Mullowney 1562e057df02SPaul Mullowney /* multiply add with matrix transpose */ 1563aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1564aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1565b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1566b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1567b06137fdSPaul Mullowney size of the workVector */ 1568aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1569a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1570b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1571aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1572b06137fdSPaul Mullowney mat->column_indices->data().get(), xarray->data().get(), matstructT->beta, 1573aa372e3fSPaul Mullowney cusparsestruct->workVector->data().get());CHKERRCUSP(stat); 1574aa372e3fSPaul Mullowney } else { 15752692e278SPaul Mullowney #if CUDA_VERSION>=4020 1576aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1577a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1578aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1579b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1580b06137fdSPaul Mullowney xarray->data().get(), matstructT->beta, 1581aa372e3fSPaul Mullowney cusparsestruct->workVector->data().get());CHKERRCUSP(stat); 1582a65300a6SPaul Mullowney } 15832692e278SPaul Mullowney #endif 1584aa372e3fSPaul Mullowney } 1585aa372e3fSPaul Mullowney 1586aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1587aa372e3fSPaul Mullowney thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zarray->begin(), matstructT->cprowIndices->begin()))), 1588aa372e3fSPaul Mullowney thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zarray->begin(), matstructT->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1589aa372e3fSPaul Mullowney VecCUSPPlusEquals()); 1590ca45077fSPaul Mullowney 1591ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1592ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayRead(yy,&yarray);CHKERRQ(ierr); 1593ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); 1594ca45077fSPaul Mullowney 1595ca45077fSPaul Mullowney } catch(char *ex) { 1596ca45077fSPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 1597ca45077fSPaul Mullowney } 1598ca45077fSPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1599ca45077fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 1600ca45077fSPaul Mullowney PetscFunctionReturn(0); 1601ca45077fSPaul Mullowney } 1602ca45077fSPaul Mullowney 1603ca45077fSPaul Mullowney #undef __FUNCT__ 16049ae82921SPaul Mullowney #define __FUNCT__ "MatAssemblyEnd_SeqAIJCUSPARSE" 16056fa9248bSJed Brown static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) 16069ae82921SPaul Mullowney { 16079ae82921SPaul Mullowney PetscErrorCode ierr; 16086e111a19SKarl Rupp 16099ae82921SPaul Mullowney PetscFunctionBegin; 16109ae82921SPaul Mullowney ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); 1611bc3f50f2SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 1612e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); 1613bc3f50f2SPaul Mullowney } 16149ae82921SPaul Mullowney if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0); 1615bbf3fe20SPaul Mullowney A->ops->mult = MatMult_SeqAIJCUSPARSE; 1616bbf3fe20SPaul Mullowney A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1617bbf3fe20SPaul Mullowney A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1618bbf3fe20SPaul Mullowney A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 16199ae82921SPaul Mullowney PetscFunctionReturn(0); 16209ae82921SPaul Mullowney } 16219ae82921SPaul Mullowney 16229ae82921SPaul Mullowney /* --------------------------------------------------------------------------------*/ 16239ae82921SPaul Mullowney #undef __FUNCT__ 16249ae82921SPaul Mullowney #define __FUNCT__ "MatCreateSeqAIJCUSPARSE" 1625e057df02SPaul Mullowney /*@ 16269ae82921SPaul Mullowney MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format 1627e057df02SPaul Mullowney (the default parallel PETSc format). This matrix will ultimately pushed down 1628e057df02SPaul Mullowney to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix 1629e057df02SPaul Mullowney assembly performance the user should preallocate the matrix storage by setting 1630e057df02SPaul Mullowney the parameter nz (or the array nnz). By setting these parameters accurately, 1631e057df02SPaul Mullowney performance during matrix assembly can be increased by more than a factor of 50. 16329ae82921SPaul Mullowney 16339ae82921SPaul Mullowney Collective on MPI_Comm 16349ae82921SPaul Mullowney 16359ae82921SPaul Mullowney Input Parameters: 16369ae82921SPaul Mullowney + comm - MPI communicator, set to PETSC_COMM_SELF 16379ae82921SPaul Mullowney . m - number of rows 16389ae82921SPaul Mullowney . n - number of columns 16399ae82921SPaul Mullowney . nz - number of nonzeros per row (same for all rows) 16409ae82921SPaul Mullowney - nnz - array containing the number of nonzeros in the various rows 16410298fd71SBarry Smith (possibly different for each row) or NULL 16429ae82921SPaul Mullowney 16439ae82921SPaul Mullowney Output Parameter: 16449ae82921SPaul Mullowney . A - the matrix 16459ae82921SPaul Mullowney 16469ae82921SPaul Mullowney It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), 16479ae82921SPaul Mullowney MatXXXXSetPreallocation() paradgm instead of this routine directly. 16489ae82921SPaul Mullowney [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] 16499ae82921SPaul Mullowney 16509ae82921SPaul Mullowney Notes: 16519ae82921SPaul Mullowney If nnz is given then nz is ignored 16529ae82921SPaul Mullowney 16539ae82921SPaul Mullowney The AIJ format (also called the Yale sparse matrix format or 16549ae82921SPaul Mullowney compressed row storage), is fully compatible with standard Fortran 77 16559ae82921SPaul Mullowney storage. That is, the stored row and column indices can begin at 16569ae82921SPaul Mullowney either one (as in Fortran) or zero. See the users' manual for details. 16579ae82921SPaul Mullowney 16589ae82921SPaul Mullowney Specify the preallocated storage with either nz or nnz (not both). 16590298fd71SBarry Smith Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory 16609ae82921SPaul Mullowney allocation. For large problems you MUST preallocate memory or you 16619ae82921SPaul Mullowney will get TERRIBLE performance, see the users' manual chapter on matrices. 16629ae82921SPaul Mullowney 16639ae82921SPaul Mullowney By default, this format uses inodes (identical nodes) when possible, to 16649ae82921SPaul Mullowney improve numerical efficiency of matrix-vector products and solves. We 16659ae82921SPaul Mullowney search for consecutive rows with the same nonzero structure, thereby 16669ae82921SPaul Mullowney reusing matrix information to achieve increased efficiency. 16679ae82921SPaul Mullowney 16689ae82921SPaul Mullowney Level: intermediate 16699ae82921SPaul Mullowney 1670e057df02SPaul Mullowney .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE 16719ae82921SPaul Mullowney @*/ 16729ae82921SPaul Mullowney PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) 16739ae82921SPaul Mullowney { 16749ae82921SPaul Mullowney PetscErrorCode ierr; 16759ae82921SPaul Mullowney 16769ae82921SPaul Mullowney PetscFunctionBegin; 16779ae82921SPaul Mullowney ierr = MatCreate(comm,A);CHKERRQ(ierr); 16789ae82921SPaul Mullowney ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); 16799ae82921SPaul Mullowney ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 16809ae82921SPaul Mullowney ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); 16819ae82921SPaul Mullowney PetscFunctionReturn(0); 16829ae82921SPaul Mullowney } 16839ae82921SPaul Mullowney 16849ae82921SPaul Mullowney #undef __FUNCT__ 16859ae82921SPaul Mullowney #define __FUNCT__ "MatDestroy_SeqAIJCUSPARSE" 16866fa9248bSJed Brown static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) 16879ae82921SPaul Mullowney { 16889ae82921SPaul Mullowney PetscErrorCode ierr; 1689ab25e6cbSDominic Meiser 16909ae82921SPaul Mullowney PetscFunctionBegin; 16919ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 16929ae82921SPaul Mullowney if (A->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) { 1693ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); 16949ae82921SPaul Mullowney } 16959ae82921SPaul Mullowney } else { 1696ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); 1697aa372e3fSPaul Mullowney } 16989ae82921SPaul Mullowney ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); 16999ae82921SPaul Mullowney PetscFunctionReturn(0); 17009ae82921SPaul Mullowney } 17019ae82921SPaul Mullowney 17029ae82921SPaul Mullowney #undef __FUNCT__ 17039ae82921SPaul Mullowney #define __FUNCT__ "MatCreate_SeqAIJCUSPARSE" 17048cc058d9SJed Brown PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) 17059ae82921SPaul Mullowney { 17069ae82921SPaul Mullowney PetscErrorCode ierr; 1707aa372e3fSPaul Mullowney cusparseStatus_t stat; 1708aa372e3fSPaul Mullowney cusparseHandle_t handle=0; 17099ae82921SPaul Mullowney 17109ae82921SPaul Mullowney PetscFunctionBegin; 17119ae82921SPaul Mullowney ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); 17129ae82921SPaul Mullowney if (B->factortype==MAT_FACTOR_NONE) { 1713e057df02SPaul Mullowney /* you cannot check the inode.use flag here since the matrix was just created. 1714e057df02SPaul Mullowney now build a GPU matrix data structure */ 17159ae82921SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSE; 17169ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->mat = 0; 1717aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->matTranspose = 0; 1718aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->workVector = 0; 1719e057df02SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->format = MAT_CUSPARSE_CSR; 1720aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 1721aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = 0; 1722aa372e3fSPaul Mullowney stat = cusparseCreate(&handle);CHKERRCUSP(stat); 1723aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = handle; 1724aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 17259ae82921SPaul Mullowney } else { 17269ae82921SPaul Mullowney /* NEXT, set the pointers to the triangular factors */ 1727debe9ee2SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSETriFactors; 17289ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtr = 0; 17299ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtr = 0; 1730aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtrTranspose = 0; 1731aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtrTranspose = 0; 1732aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->rpermIndices = 0; 1733aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->cpermIndices = 0; 1734aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->workVector = 0; 1735aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = 0; 1736aa372e3fSPaul Mullowney stat = cusparseCreate(&handle);CHKERRCUSP(stat); 1737aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = handle; 1738aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->nnz = 0; 17399ae82921SPaul Mullowney } 1740aa372e3fSPaul Mullowney 17419ae82921SPaul Mullowney B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; 17429ae82921SPaul Mullowney B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; 17432a7a6963SBarry Smith B->ops->getvecs = MatCreateVecs_SeqAIJCUSPARSE; 17449ae82921SPaul Mullowney B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; 1745ca45077fSPaul Mullowney B->ops->mult = MatMult_SeqAIJCUSPARSE; 1746ca45077fSPaul Mullowney B->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1747ca45077fSPaul Mullowney B->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1748ca45077fSPaul Mullowney B->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 17492205254eSKarl Rupp 17509ae82921SPaul Mullowney ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 17512205254eSKarl Rupp 17529ae82921SPaul Mullowney B->valid_GPU_matrix = PETSC_CUSP_UNALLOCATED; 17532205254eSKarl Rupp 1754bdf89e91SBarry Smith ierr = PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); 17559ae82921SPaul Mullowney PetscFunctionReturn(0); 17569ae82921SPaul Mullowney } 17579ae82921SPaul Mullowney 1758e057df02SPaul Mullowney /*M 1759e057df02SPaul Mullowney MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. 1760e057df02SPaul Mullowney 1761e057df02SPaul Mullowney A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either 17622692e278SPaul Mullowney CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. 17632692e278SPaul Mullowney All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. 1764e057df02SPaul Mullowney 1765e057df02SPaul Mullowney Options Database Keys: 1766e057df02SPaul Mullowney + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() 1767aa372e3fSPaul Mullowney . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1768aa372e3fSPaul Mullowney . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1769e057df02SPaul Mullowney 1770e057df02SPaul Mullowney Level: beginner 1771e057df02SPaul Mullowney 17728468deeeSKarl Rupp .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 1773e057df02SPaul Mullowney M*/ 17747f756511SDominic Meiser 177542c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat,MatFactorType,Mat*); 177642c9c57cSBarry Smith 17770f39cd5aSBarry Smith 177842c9c57cSBarry Smith #undef __FUNCT__ 177942c9c57cSBarry Smith #define __FUNCT__ "MatSolverPackageRegister_CUSPARSE" 178029b38603SBarry Smith PETSC_EXTERN PetscErrorCode MatSolverPackageRegister_CUSPARSE(void) 178142c9c57cSBarry Smith { 178242c9c57cSBarry Smith PetscErrorCode ierr; 178342c9c57cSBarry Smith 178442c9c57cSBarry Smith PetscFunctionBegin; 178542c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 178642c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 178742c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 178842c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 178942c9c57cSBarry Smith PetscFunctionReturn(0); 179042c9c57cSBarry Smith } 179129b38603SBarry Smith 179281e08676SBarry Smith 17937f756511SDominic Meiser #undef __FUNCT__ 17947f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSE_Destroy" 17957f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) 17967f756511SDominic Meiser { 17977f756511SDominic Meiser cusparseStatus_t stat; 17987f756511SDominic Meiser cusparseHandle_t handle; 17997f756511SDominic Meiser 18007f756511SDominic Meiser PetscFunctionBegin; 18017f756511SDominic Meiser if (*cusparsestruct) { 18027f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format); 18037f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format); 18047f756511SDominic Meiser delete (*cusparsestruct)->workVector; 18057f756511SDominic Meiser if (handle = (*cusparsestruct)->handle) { 18067f756511SDominic Meiser stat = cusparseDestroy(handle);CHKERRCUSP(stat); 18077f756511SDominic Meiser } 18087f756511SDominic Meiser delete *cusparsestruct; 18097f756511SDominic Meiser *cusparsestruct = 0; 18107f756511SDominic Meiser } 18117f756511SDominic Meiser PetscFunctionReturn(0); 18127f756511SDominic Meiser } 18137f756511SDominic Meiser 18147f756511SDominic Meiser #undef __FUNCT__ 18157f756511SDominic Meiser #define __FUNCT__ "CsrMatrix_Destroy" 18167f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) 18177f756511SDominic Meiser { 18187f756511SDominic Meiser PetscFunctionBegin; 18197f756511SDominic Meiser if (*mat) { 18207f756511SDominic Meiser delete (*mat)->values; 18217f756511SDominic Meiser delete (*mat)->column_indices; 18227f756511SDominic Meiser delete (*mat)->row_offsets; 18237f756511SDominic Meiser delete *mat; 18247f756511SDominic Meiser *mat = 0; 18257f756511SDominic Meiser } 18267f756511SDominic Meiser PetscFunctionReturn(0); 18277f756511SDominic Meiser } 18287f756511SDominic Meiser 18297f756511SDominic Meiser #undef __FUNCT__ 18307f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactorStruct_Destroy" 18317f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) 18327f756511SDominic Meiser { 18337f756511SDominic Meiser cusparseStatus_t stat; 18347f756511SDominic Meiser PetscErrorCode ierr; 18357f756511SDominic Meiser 18367f756511SDominic Meiser PetscFunctionBegin; 18377f756511SDominic Meiser if (*trifactor) { 18387f756511SDominic Meiser if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSP(stat); } 18397f756511SDominic Meiser if ((*trifactor)->solveInfo) { stat = cusparseDestroySolveAnalysisInfo((*trifactor)->solveInfo);CHKERRCUSP(stat); } 18407f756511SDominic Meiser ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); 18417f756511SDominic Meiser delete *trifactor; 18427f756511SDominic Meiser *trifactor = 0; 18437f756511SDominic Meiser } 18447f756511SDominic Meiser PetscFunctionReturn(0); 18457f756511SDominic Meiser } 18467f756511SDominic Meiser 18477f756511SDominic Meiser #undef __FUNCT__ 18487f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSEMultStruct_Destroy" 18497f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) 18507f756511SDominic Meiser { 18517f756511SDominic Meiser CsrMatrix *mat; 18527f756511SDominic Meiser cusparseStatus_t stat; 18537f756511SDominic Meiser cudaError_t err; 18547f756511SDominic Meiser 18557f756511SDominic Meiser PetscFunctionBegin; 18567f756511SDominic Meiser if (*matstruct) { 18577f756511SDominic Meiser if ((*matstruct)->mat) { 18587f756511SDominic Meiser if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { 18597f756511SDominic Meiser cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; 18607f756511SDominic Meiser stat = cusparseDestroyHybMat(hybMat);CHKERRCUSP(stat); 18617f756511SDominic Meiser } else { 18627f756511SDominic Meiser mat = (CsrMatrix*)(*matstruct)->mat; 18637f756511SDominic Meiser CsrMatrix_Destroy(&mat); 18647f756511SDominic Meiser } 18657f756511SDominic Meiser } 18667f756511SDominic Meiser if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSP(stat); } 18677f756511SDominic Meiser delete (*matstruct)->cprowIndices; 18687f756511SDominic Meiser if ((*matstruct)->alpha) { err=cudaFree((*matstruct)->alpha);CHKERRCUSP(err); } 18697f756511SDominic Meiser if ((*matstruct)->beta) { err=cudaFree((*matstruct)->beta);CHKERRCUSP(err); } 18707f756511SDominic Meiser delete *matstruct; 18717f756511SDominic Meiser *matstruct = 0; 18727f756511SDominic Meiser } 18737f756511SDominic Meiser PetscFunctionReturn(0); 18747f756511SDominic Meiser } 18757f756511SDominic Meiser 18767f756511SDominic Meiser #undef __FUNCT__ 18777f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactors_Destroy" 18787f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) 18797f756511SDominic Meiser { 18807f756511SDominic Meiser cusparseHandle_t handle; 18817f756511SDominic Meiser cusparseStatus_t stat; 18827f756511SDominic Meiser 18837f756511SDominic Meiser PetscFunctionBegin; 18847f756511SDominic Meiser if (*trifactors) { 18857f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtr); 18867f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtr); 18877f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose); 18887f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose); 18897f756511SDominic Meiser delete (*trifactors)->rpermIndices; 18907f756511SDominic Meiser delete (*trifactors)->cpermIndices; 18917f756511SDominic Meiser delete (*trifactors)->workVector; 18927f756511SDominic Meiser if (handle = (*trifactors)->handle) { 18937f756511SDominic Meiser stat = cusparseDestroy(handle);CHKERRCUSP(stat); 18947f756511SDominic Meiser } 18957f756511SDominic Meiser delete *trifactors; 18967f756511SDominic Meiser *trifactors = 0; 18977f756511SDominic Meiser } 18987f756511SDominic Meiser PetscFunctionReturn(0); 18997f756511SDominic Meiser } 19007f756511SDominic Meiser 1901