19ae82921SPaul Mullowney /* 29ae82921SPaul Mullowney Defines the basic matrix operations for the AIJ (compressed row) 3bc3f50f2SPaul Mullowney matrix storage format using the CUSPARSE library, 49ae82921SPaul Mullowney */ 5*dced61a5SBarry Smith #define PETSC_SKIP_SPINLOCK 69ae82921SPaul Mullowney 73d13b8fdSMatthew G. Knepley #include <petscconf.h> 83d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 9087f3262SPaul Mullowney #include <../src/mat/impls/sbaij/seq/sbaij.h> 103d13b8fdSMatthew G. Knepley #include <../src/vec/vec/impls/dvecimpl.h> 11af0996ceSBarry Smith #include <petsc/private/vecimpl.h> 129ae82921SPaul Mullowney #undef VecType 133d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> 14bc3f50f2SPaul Mullowney 15e057df02SPaul Mullowney const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; 169ae82921SPaul Mullowney 17087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 18087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 19087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 20087f3262SPaul Mullowney 216fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 226fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 236fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 24087f3262SPaul Mullowney 256fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); 266fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 276fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 286fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 298c34d3f5SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptions *PetscOptionsObject,Mat); 306fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); 316fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 326fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 336fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 349ae82921SPaul Mullowney 357f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); 367f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); 377f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); 387f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); 397f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); 407f756511SDominic Meiser 419ae82921SPaul Mullowney #undef __FUNCT__ 42b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetStream" 43b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream) 44b06137fdSPaul Mullowney { 45b06137fdSPaul Mullowney cusparseStatus_t stat; 46b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 47b06137fdSPaul Mullowney 48b06137fdSPaul Mullowney PetscFunctionBegin; 49b06137fdSPaul Mullowney cusparsestruct->stream = stream; 50b06137fdSPaul Mullowney stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUSP(stat); 51b06137fdSPaul Mullowney PetscFunctionReturn(0); 52b06137fdSPaul Mullowney } 53b06137fdSPaul Mullowney 54b06137fdSPaul Mullowney #undef __FUNCT__ 55b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetHandle" 56b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle) 57b06137fdSPaul Mullowney { 58b06137fdSPaul Mullowney cusparseStatus_t stat; 59b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 60b06137fdSPaul Mullowney 61b06137fdSPaul Mullowney PetscFunctionBegin; 62b06137fdSPaul Mullowney if (cusparsestruct->handle) 63b06137fdSPaul Mullowney stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUSP(stat); 64b06137fdSPaul Mullowney cusparsestruct->handle = handle; 65b06137fdSPaul Mullowney stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSP(stat); 66b06137fdSPaul Mullowney PetscFunctionReturn(0); 67b06137fdSPaul Mullowney } 68b06137fdSPaul Mullowney 69b06137fdSPaul Mullowney #undef __FUNCT__ 70b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSEClearHandle" 71b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSEClearHandle(Mat A) 72b06137fdSPaul Mullowney { 73b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 74b06137fdSPaul Mullowney PetscFunctionBegin; 75b06137fdSPaul Mullowney if (cusparsestruct->handle) 76b06137fdSPaul Mullowney cusparsestruct->handle = 0; 77b06137fdSPaul Mullowney PetscFunctionReturn(0); 78b06137fdSPaul Mullowney } 79b06137fdSPaul Mullowney 80b06137fdSPaul Mullowney #undef __FUNCT__ 819ae82921SPaul Mullowney #define __FUNCT__ "MatFactorGetSolverPackage_seqaij_cusparse" 829ae82921SPaul Mullowney PetscErrorCode MatFactorGetSolverPackage_seqaij_cusparse(Mat A,const MatSolverPackage *type) 839ae82921SPaul Mullowney { 849ae82921SPaul Mullowney PetscFunctionBegin; 859ae82921SPaul Mullowney *type = MATSOLVERCUSPARSE; 869ae82921SPaul Mullowney PetscFunctionReturn(0); 879ae82921SPaul Mullowney } 889ae82921SPaul Mullowney 89c708e6cdSJed Brown /*MC 90087f3262SPaul Mullowney MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices 91087f3262SPaul Mullowney on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported 92087f3262SPaul Mullowney algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer 93087f3262SPaul Mullowney performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the 94087f3262SPaul Mullowney CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these 95087f3262SPaul Mullowney algorithms are not recommended. This class does NOT support direct solver operations. 96c708e6cdSJed Brown 979ae82921SPaul Mullowney Level: beginner 98c708e6cdSJed Brown 99c708e6cdSJed Brown .seealso: PCFactorSetMatSolverPackage(), MatSolverPackage, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 100c708e6cdSJed Brown M*/ 1019ae82921SPaul Mullowney 1029ae82921SPaul Mullowney #undef __FUNCT__ 10342c9c57cSBarry Smith #define __FUNCT__ "MatGetFactor_seqaijcusparse_cusparse" 10442c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) 1059ae82921SPaul Mullowney { 1069ae82921SPaul Mullowney PetscErrorCode ierr; 107bc3f50f2SPaul Mullowney PetscInt n = A->rmap->n; 1089ae82921SPaul Mullowney 1099ae82921SPaul Mullowney PetscFunctionBegin; 110bc3f50f2SPaul Mullowney ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); 111404133a2SPaul Mullowney (*B)->factortype = ftype; 112bc3f50f2SPaul Mullowney ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); 1139ae82921SPaul Mullowney ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 1142205254eSKarl Rupp 115087f3262SPaul Mullowney if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { 11633d57670SJed Brown ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); 1179ae82921SPaul Mullowney (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; 1189ae82921SPaul Mullowney (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; 119087f3262SPaul Mullowney } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { 120087f3262SPaul Mullowney (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; 121087f3262SPaul Mullowney (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; 1229ae82921SPaul Mullowney } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); 123bc3f50f2SPaul Mullowney 124fa03d054SJed Brown ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); 12562a20339SJed Brown ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverPackage_C",MatFactorGetSolverPackage_seqaij_cusparse);CHKERRQ(ierr); 1269ae82921SPaul Mullowney PetscFunctionReturn(0); 1279ae82921SPaul Mullowney } 1289ae82921SPaul Mullowney 1299ae82921SPaul Mullowney #undef __FUNCT__ 130e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat_SeqAIJCUSPARSE" 131bc3f50f2SPaul Mullowney PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 132ca45077fSPaul Mullowney { 133aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1346e111a19SKarl Rupp 135ca45077fSPaul Mullowney PetscFunctionBegin; 1362692e278SPaul Mullowney #if CUDA_VERSION>=4020 137ca45077fSPaul Mullowney switch (op) { 138e057df02SPaul Mullowney case MAT_CUSPARSE_MULT: 139aa372e3fSPaul Mullowney cusparsestruct->format = format; 140ca45077fSPaul Mullowney break; 141e057df02SPaul Mullowney case MAT_CUSPARSE_ALL: 142aa372e3fSPaul Mullowney cusparsestruct->format = format; 143ca45077fSPaul Mullowney break; 144ca45077fSPaul Mullowney default: 14536d62e41SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); 146ca45077fSPaul Mullowney } 1472692e278SPaul Mullowney #else 1482692e278SPaul Mullowney if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) 1492692e278SPaul Mullowney SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format require CUDA 4.2 or later."); 1502692e278SPaul Mullowney #endif 151ca45077fSPaul Mullowney PetscFunctionReturn(0); 152ca45077fSPaul Mullowney } 1539ae82921SPaul Mullowney 154e057df02SPaul Mullowney /*@ 155e057df02SPaul Mullowney MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular 156e057df02SPaul Mullowney operation. Only the MatMult operation can use different GPU storage formats 157aa372e3fSPaul Mullowney for MPIAIJCUSPARSE matrices. 158e057df02SPaul Mullowney Not Collective 159e057df02SPaul Mullowney 160e057df02SPaul Mullowney Input Parameters: 1618468deeeSKarl Rupp + A - Matrix of type SEQAIJCUSPARSE 16236d62e41SPaul Mullowney . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. 1632692e278SPaul Mullowney - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) 164e057df02SPaul Mullowney 165e057df02SPaul Mullowney Output Parameter: 166e057df02SPaul Mullowney 167e057df02SPaul Mullowney Level: intermediate 168e057df02SPaul Mullowney 1698468deeeSKarl Rupp .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 170e057df02SPaul Mullowney @*/ 171e057df02SPaul Mullowney #undef __FUNCT__ 172e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat" 173e057df02SPaul Mullowney PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 174e057df02SPaul Mullowney { 175e057df02SPaul Mullowney PetscErrorCode ierr; 1766e111a19SKarl Rupp 177e057df02SPaul Mullowney PetscFunctionBegin; 178e057df02SPaul Mullowney PetscValidHeaderSpecific(A, MAT_CLASSID,1); 179e057df02SPaul Mullowney ierr = PetscTryMethod(A, "MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); 180e057df02SPaul Mullowney PetscFunctionReturn(0); 181e057df02SPaul Mullowney } 182e057df02SPaul Mullowney 1839ae82921SPaul Mullowney #undef __FUNCT__ 1849ae82921SPaul Mullowney #define __FUNCT__ "MatSetFromOptions_SeqAIJCUSPARSE" 1858c34d3f5SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptions *PetscOptionsObject,Mat A) 1869ae82921SPaul Mullowney { 1879ae82921SPaul Mullowney PetscErrorCode ierr; 188e057df02SPaul Mullowney MatCUSPARSEStorageFormat format; 1899ae82921SPaul Mullowney PetscBool flg; 190a183c035SDominic Meiser Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1916e111a19SKarl Rupp 1929ae82921SPaul Mullowney PetscFunctionBegin; 193e55864a3SBarry Smith ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); 194e057df02SPaul Mullowney ierr = PetscObjectOptionsBegin((PetscObject)A); 1959ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 196e057df02SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", 197a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 198e057df02SPaul Mullowney if (flg) { 199e057df02SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr); 200045c96e1SPaul Mullowney } 2019ae82921SPaul Mullowney } 2024c87dfd4SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", 203a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 2044c87dfd4SPaul Mullowney if (flg) { 2054c87dfd4SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); 2064c87dfd4SPaul Mullowney } 2079ae82921SPaul Mullowney ierr = PetscOptionsEnd();CHKERRQ(ierr); 2089ae82921SPaul Mullowney PetscFunctionReturn(0); 2099ae82921SPaul Mullowney 2109ae82921SPaul Mullowney } 2119ae82921SPaul Mullowney 2129ae82921SPaul Mullowney #undef __FUNCT__ 2139ae82921SPaul Mullowney #define __FUNCT__ "MatILUFactorSymbolic_SeqAIJCUSPARSE" 2146fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2159ae82921SPaul Mullowney { 2169ae82921SPaul Mullowney PetscErrorCode ierr; 2179ae82921SPaul Mullowney 2189ae82921SPaul Mullowney PetscFunctionBegin; 2199ae82921SPaul Mullowney ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2209ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2219ae82921SPaul Mullowney PetscFunctionReturn(0); 2229ae82921SPaul Mullowney } 2239ae82921SPaul Mullowney 2249ae82921SPaul Mullowney #undef __FUNCT__ 2259ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorSymbolic_SeqAIJCUSPARSE" 2266fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2279ae82921SPaul Mullowney { 2289ae82921SPaul Mullowney PetscErrorCode ierr; 2299ae82921SPaul Mullowney 2309ae82921SPaul Mullowney PetscFunctionBegin; 2319ae82921SPaul Mullowney ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2329ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2339ae82921SPaul Mullowney PetscFunctionReturn(0); 2349ae82921SPaul Mullowney } 2359ae82921SPaul Mullowney 2369ae82921SPaul Mullowney #undef __FUNCT__ 237087f3262SPaul Mullowney #define __FUNCT__ "MatICCFactorSymbolic_SeqAIJCUSPARSE" 238087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 239087f3262SPaul Mullowney { 240087f3262SPaul Mullowney PetscErrorCode ierr; 241087f3262SPaul Mullowney 242087f3262SPaul Mullowney PetscFunctionBegin; 243087f3262SPaul Mullowney ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 244087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 245087f3262SPaul Mullowney PetscFunctionReturn(0); 246087f3262SPaul Mullowney } 247087f3262SPaul Mullowney 248087f3262SPaul Mullowney #undef __FUNCT__ 249087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorSymbolic_SeqAIJCUSPARSE" 250087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 251087f3262SPaul Mullowney { 252087f3262SPaul Mullowney PetscErrorCode ierr; 253087f3262SPaul Mullowney 254087f3262SPaul Mullowney PetscFunctionBegin; 255087f3262SPaul Mullowney ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 256087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 257087f3262SPaul Mullowney PetscFunctionReturn(0); 258087f3262SPaul Mullowney } 259087f3262SPaul Mullowney 260087f3262SPaul Mullowney #undef __FUNCT__ 261087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILULowerTriMatrix" 262087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) 2639ae82921SPaul Mullowney { 2649ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 2659ae82921SPaul Mullowney PetscInt n = A->rmap->n; 2669ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 267aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 2689ae82921SPaul Mullowney cusparseStatus_t stat; 2699ae82921SPaul Mullowney const PetscInt *ai = a->i,*aj = a->j,*vi; 2709ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 2719ae82921SPaul Mullowney PetscInt *AiLo, *AjLo; 2729ae82921SPaul Mullowney PetscScalar *AALo; 2739ae82921SPaul Mullowney PetscInt i,nz, nzLower, offset, rowOffset; 274b175d8bbSPaul Mullowney PetscErrorCode ierr; 2759ae82921SPaul Mullowney 2769ae82921SPaul Mullowney PetscFunctionBegin; 2779ae82921SPaul Mullowney if (A->valid_GPU_matrix == PETSC_CUSP_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUSP_CPU) { 2789ae82921SPaul Mullowney try { 2799ae82921SPaul Mullowney /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ 2809ae82921SPaul Mullowney nzLower=n+ai[n]-ai[1]; 2819ae82921SPaul Mullowney 2829ae82921SPaul Mullowney /* Allocate Space for the lower triangular matrix */ 2839ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUSP(ierr); 2849ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUSP(ierr); 2859ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUSP(ierr); 2869ae82921SPaul Mullowney 2879ae82921SPaul Mullowney /* Fill the lower triangular matrix */ 2889ae82921SPaul Mullowney AiLo[0] = (PetscInt) 0; 2899ae82921SPaul Mullowney AiLo[n] = nzLower; 2909ae82921SPaul Mullowney AjLo[0] = (PetscInt) 0; 2919ae82921SPaul Mullowney AALo[0] = (MatScalar) 1.0; 2929ae82921SPaul Mullowney v = aa; 2939ae82921SPaul Mullowney vi = aj; 2949ae82921SPaul Mullowney offset = 1; 2959ae82921SPaul Mullowney rowOffset= 1; 2969ae82921SPaul Mullowney for (i=1; i<n; i++) { 2979ae82921SPaul Mullowney nz = ai[i+1] - ai[i]; 298e057df02SPaul Mullowney /* additional 1 for the term on the diagonal */ 2999ae82921SPaul Mullowney AiLo[i] = rowOffset; 3009ae82921SPaul Mullowney rowOffset += nz+1; 3019ae82921SPaul Mullowney 3029ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjLo[offset]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 3039ae82921SPaul Mullowney ierr = PetscMemcpy(&(AALo[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 3049ae82921SPaul Mullowney 3059ae82921SPaul Mullowney offset += nz; 3069ae82921SPaul Mullowney AjLo[offset] = (PetscInt) i; 3079ae82921SPaul Mullowney AALo[offset] = (MatScalar) 1.0; 3089ae82921SPaul Mullowney offset += 1; 3099ae82921SPaul Mullowney 3109ae82921SPaul Mullowney v += nz; 3119ae82921SPaul Mullowney vi += nz; 3129ae82921SPaul Mullowney } 3132205254eSKarl Rupp 314aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 315aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 3162205254eSKarl Rupp 317aa372e3fSPaul Mullowney /* Create the matrix description */ 318aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSP(stat); 319aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 320aa372e3fSPaul Mullowney stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSP(stat); 321aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUSP(stat); 322aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSP(stat); 323aa372e3fSPaul Mullowney 324aa372e3fSPaul Mullowney /* Create the solve analysis information */ 325aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUSP(stat); 326aa372e3fSPaul Mullowney 327aa372e3fSPaul Mullowney /* set the operation */ 328aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 329aa372e3fSPaul Mullowney 330aa372e3fSPaul Mullowney /* set the matrix */ 331aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 332aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = n; 333aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = n; 334aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = nzLower; 335aa372e3fSPaul Mullowney 336aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 337aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); 338aa372e3fSPaul Mullowney 339aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); 340aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); 341aa372e3fSPaul Mullowney 342aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); 343aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); 344aa372e3fSPaul Mullowney 345aa372e3fSPaul Mullowney /* perform the solve analysis */ 346aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 347aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 348aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 349aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUSP(stat); 350aa372e3fSPaul Mullowney 351aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 352aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 3532205254eSKarl Rupp 3549ae82921SPaul Mullowney ierr = cudaFreeHost(AiLo);CHKERRCUSP(ierr); 3559ae82921SPaul Mullowney ierr = cudaFreeHost(AjLo);CHKERRCUSP(ierr); 3569ae82921SPaul Mullowney ierr = cudaFreeHost(AALo);CHKERRCUSP(ierr); 3579ae82921SPaul Mullowney } catch(char *ex) { 3589ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 3599ae82921SPaul Mullowney } 3609ae82921SPaul Mullowney } 3619ae82921SPaul Mullowney PetscFunctionReturn(0); 3629ae82921SPaul Mullowney } 3639ae82921SPaul Mullowney 3649ae82921SPaul Mullowney #undef __FUNCT__ 365087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILUUpperTriMatrix" 366087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) 3679ae82921SPaul Mullowney { 3689ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 3699ae82921SPaul Mullowney PetscInt n = A->rmap->n; 3709ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 371aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 3729ae82921SPaul Mullowney cusparseStatus_t stat; 3739ae82921SPaul Mullowney const PetscInt *aj = a->j,*adiag = a->diag,*vi; 3749ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 3759ae82921SPaul Mullowney PetscInt *AiUp, *AjUp; 3769ae82921SPaul Mullowney PetscScalar *AAUp; 3779ae82921SPaul Mullowney PetscInt i,nz, nzUpper, offset; 3789ae82921SPaul Mullowney PetscErrorCode ierr; 3799ae82921SPaul Mullowney 3809ae82921SPaul Mullowney PetscFunctionBegin; 3819ae82921SPaul Mullowney if (A->valid_GPU_matrix == PETSC_CUSP_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUSP_CPU) { 3829ae82921SPaul Mullowney try { 3839ae82921SPaul Mullowney /* next, figure out the number of nonzeros in the upper triangular matrix. */ 3849ae82921SPaul Mullowney nzUpper = adiag[0]-adiag[n]; 3859ae82921SPaul Mullowney 3869ae82921SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 3879ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUSP(ierr); 3889ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUSP(ierr); 3899ae82921SPaul Mullowney ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUSP(ierr); 3909ae82921SPaul Mullowney 3919ae82921SPaul Mullowney /* Fill the upper triangular matrix */ 3929ae82921SPaul Mullowney AiUp[0]=(PetscInt) 0; 3939ae82921SPaul Mullowney AiUp[n]=nzUpper; 3949ae82921SPaul Mullowney offset = nzUpper; 3959ae82921SPaul Mullowney for (i=n-1; i>=0; i--) { 3969ae82921SPaul Mullowney v = aa + adiag[i+1] + 1; 3979ae82921SPaul Mullowney vi = aj + adiag[i+1] + 1; 3989ae82921SPaul Mullowney 399e057df02SPaul Mullowney /* number of elements NOT on the diagonal */ 4009ae82921SPaul Mullowney nz = adiag[i] - adiag[i+1]-1; 4019ae82921SPaul Mullowney 402e057df02SPaul Mullowney /* decrement the offset */ 4039ae82921SPaul Mullowney offset -= (nz+1); 4049ae82921SPaul Mullowney 405e057df02SPaul Mullowney /* first, set the diagonal elements */ 4069ae82921SPaul Mullowney AjUp[offset] = (PetscInt) i; 4079ae82921SPaul Mullowney AAUp[offset] = 1./v[nz]; 4089ae82921SPaul Mullowney AiUp[i] = AiUp[i+1] - (nz+1); 4099ae82921SPaul Mullowney 4109ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset+1]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 4119ae82921SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset+1]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 4129ae82921SPaul Mullowney } 4132205254eSKarl Rupp 414aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 415aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 4162205254eSKarl Rupp 417aa372e3fSPaul Mullowney /* Create the matrix description */ 418aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSP(stat); 419aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 420aa372e3fSPaul Mullowney stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSP(stat); 421aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSP(stat); 422aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSP(stat); 423aa372e3fSPaul Mullowney 424aa372e3fSPaul Mullowney /* Create the solve analysis information */ 425aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUSP(stat); 426aa372e3fSPaul Mullowney 427aa372e3fSPaul Mullowney /* set the operation */ 428aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 429aa372e3fSPaul Mullowney 430aa372e3fSPaul Mullowney /* set the matrix */ 431aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 432aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = n; 433aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = n; 434aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = nzUpper; 435aa372e3fSPaul Mullowney 436aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 437aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); 438aa372e3fSPaul Mullowney 439aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); 440aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); 441aa372e3fSPaul Mullowney 442aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); 443aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); 444aa372e3fSPaul Mullowney 445aa372e3fSPaul Mullowney /* perform the solve analysis */ 446aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 447aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 448aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 449aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUSP(stat); 450aa372e3fSPaul Mullowney 451aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 452aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 4532205254eSKarl Rupp 4549ae82921SPaul Mullowney ierr = cudaFreeHost(AiUp);CHKERRCUSP(ierr); 4559ae82921SPaul Mullowney ierr = cudaFreeHost(AjUp);CHKERRCUSP(ierr); 4569ae82921SPaul Mullowney ierr = cudaFreeHost(AAUp);CHKERRCUSP(ierr); 4579ae82921SPaul Mullowney } catch(char *ex) { 4589ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 4599ae82921SPaul Mullowney } 4609ae82921SPaul Mullowney } 4619ae82921SPaul Mullowney PetscFunctionReturn(0); 4629ae82921SPaul Mullowney } 4639ae82921SPaul Mullowney 4649ae82921SPaul Mullowney #undef __FUNCT__ 465087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU" 466087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) 4679ae82921SPaul Mullowney { 4689ae82921SPaul Mullowney PetscErrorCode ierr; 4699ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 4709ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 4719ae82921SPaul Mullowney IS isrow = a->row,iscol = a->icol; 4729ae82921SPaul Mullowney PetscBool row_identity,col_identity; 4739ae82921SPaul Mullowney const PetscInt *r,*c; 4749ae82921SPaul Mullowney PetscInt n = A->rmap->n; 4759ae82921SPaul Mullowney 4769ae82921SPaul Mullowney PetscFunctionBegin; 477087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); 478087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); 4792205254eSKarl Rupp 480aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 481aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 482aa372e3fSPaul Mullowney cusparseTriFactors->nnz=a->nz; 4839ae82921SPaul Mullowney 4849ae82921SPaul Mullowney A->valid_GPU_matrix = PETSC_CUSP_BOTH; 485e057df02SPaul Mullowney /*lower triangular indices */ 4869ae82921SPaul Mullowney ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); 4879ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 4882205254eSKarl Rupp if (!row_identity) { 489aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 490aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(r, r+n); 4912205254eSKarl Rupp } 4929ae82921SPaul Mullowney ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); 4939ae82921SPaul Mullowney 494e057df02SPaul Mullowney /*upper triangular indices */ 4959ae82921SPaul Mullowney ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); 4969ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 4972205254eSKarl Rupp if (!col_identity) { 498aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 499aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(c, c+n); 5002205254eSKarl Rupp } 5019ae82921SPaul Mullowney ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); 5029ae82921SPaul Mullowney PetscFunctionReturn(0); 5039ae82921SPaul Mullowney } 5049ae82921SPaul Mullowney 5059ae82921SPaul Mullowney #undef __FUNCT__ 506087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildICCTriMatrices" 507087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) 508087f3262SPaul Mullowney { 509087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 510087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 511aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 512aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 513087f3262SPaul Mullowney cusparseStatus_t stat; 514087f3262SPaul Mullowney PetscErrorCode ierr; 515087f3262SPaul Mullowney PetscInt *AiUp, *AjUp; 516087f3262SPaul Mullowney PetscScalar *AAUp; 517087f3262SPaul Mullowney PetscScalar *AALo; 518087f3262SPaul Mullowney PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; 519087f3262SPaul Mullowney Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; 520087f3262SPaul Mullowney const PetscInt *ai = b->i,*aj = b->j,*vj; 521087f3262SPaul Mullowney const MatScalar *aa = b->a,*v; 522087f3262SPaul Mullowney 523087f3262SPaul Mullowney PetscFunctionBegin; 524087f3262SPaul Mullowney if (A->valid_GPU_matrix == PETSC_CUSP_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUSP_CPU) { 525087f3262SPaul Mullowney try { 526087f3262SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 527087f3262SPaul Mullowney ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUSP(ierr); 528087f3262SPaul Mullowney ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUSP(ierr); 529087f3262SPaul Mullowney ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUSP(ierr); 530087f3262SPaul Mullowney ierr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUSP(ierr); 531087f3262SPaul Mullowney 532087f3262SPaul Mullowney /* Fill the upper triangular matrix */ 533087f3262SPaul Mullowney AiUp[0]=(PetscInt) 0; 534087f3262SPaul Mullowney AiUp[n]=nzUpper; 535087f3262SPaul Mullowney offset = 0; 536087f3262SPaul Mullowney for (i=0; i<n; i++) { 537087f3262SPaul Mullowney /* set the pointers */ 538087f3262SPaul Mullowney v = aa + ai[i]; 539087f3262SPaul Mullowney vj = aj + ai[i]; 540087f3262SPaul Mullowney nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ 541087f3262SPaul Mullowney 542087f3262SPaul Mullowney /* first, set the diagonal elements */ 543087f3262SPaul Mullowney AjUp[offset] = (PetscInt) i; 544087f3262SPaul Mullowney AAUp[offset] = 1.0/v[nz]; 545087f3262SPaul Mullowney AiUp[i] = offset; 546087f3262SPaul Mullowney AALo[offset] = 1.0/v[nz]; 547087f3262SPaul Mullowney 548087f3262SPaul Mullowney offset+=1; 549087f3262SPaul Mullowney if (nz>0) { 550087f3262SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset]), vj, nz*sizeof(PetscInt));CHKERRQ(ierr); 551087f3262SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 552087f3262SPaul Mullowney for (j=offset; j<offset+nz; j++) { 553087f3262SPaul Mullowney AAUp[j] = -AAUp[j]; 554087f3262SPaul Mullowney AALo[j] = AAUp[j]/v[nz]; 555087f3262SPaul Mullowney } 556087f3262SPaul Mullowney offset+=nz; 557087f3262SPaul Mullowney } 558087f3262SPaul Mullowney } 559087f3262SPaul Mullowney 560aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 561aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 562087f3262SPaul Mullowney 563aa372e3fSPaul Mullowney /* Create the matrix description */ 564aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUSP(stat); 565aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 566aa372e3fSPaul Mullowney stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSP(stat); 567aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSP(stat); 568aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUSP(stat); 569087f3262SPaul Mullowney 570aa372e3fSPaul Mullowney /* Create the solve analysis information */ 571aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUSP(stat); 572aa372e3fSPaul Mullowney 573aa372e3fSPaul Mullowney /* set the operation */ 574aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 575aa372e3fSPaul Mullowney 576aa372e3fSPaul Mullowney /* set the matrix */ 577aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 578aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = A->rmap->n; 579aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = A->cmap->n; 580aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = a->nz; 581aa372e3fSPaul Mullowney 582aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 583aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 584aa372e3fSPaul Mullowney 585aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 586aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 587aa372e3fSPaul Mullowney 588aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 589aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); 590aa372e3fSPaul Mullowney 591aa372e3fSPaul Mullowney /* perform the solve analysis */ 592aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 593aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 594aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 595aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUSP(stat); 596aa372e3fSPaul Mullowney 597aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 598aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 599aa372e3fSPaul Mullowney 600aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 601aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 602aa372e3fSPaul Mullowney 603aa372e3fSPaul Mullowney /* Create the matrix description */ 604aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUSP(stat); 605aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 606aa372e3fSPaul Mullowney stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUSP(stat); 607aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUSP(stat); 608aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUSP(stat); 609aa372e3fSPaul Mullowney 610aa372e3fSPaul Mullowney /* Create the solve analysis information */ 611aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUSP(stat); 612aa372e3fSPaul Mullowney 613aa372e3fSPaul Mullowney /* set the operation */ 614aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; 615aa372e3fSPaul Mullowney 616aa372e3fSPaul Mullowney /* set the matrix */ 617aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 618aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = A->rmap->n; 619aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = A->cmap->n; 620aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = a->nz; 621aa372e3fSPaul Mullowney 622aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 623aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 624aa372e3fSPaul Mullowney 625aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 626aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 627aa372e3fSPaul Mullowney 628aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 629aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); 630aa372e3fSPaul Mullowney 631aa372e3fSPaul Mullowney /* perform the solve analysis */ 632aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 633aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 634aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 635aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUSP(stat); 636aa372e3fSPaul Mullowney 637aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 638aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 639087f3262SPaul Mullowney 640087f3262SPaul Mullowney A->valid_GPU_matrix = PETSC_CUSP_BOTH; 641087f3262SPaul Mullowney ierr = cudaFreeHost(AiUp);CHKERRCUSP(ierr); 642087f3262SPaul Mullowney ierr = cudaFreeHost(AjUp);CHKERRCUSP(ierr); 643087f3262SPaul Mullowney ierr = cudaFreeHost(AAUp);CHKERRCUSP(ierr); 644087f3262SPaul Mullowney ierr = cudaFreeHost(AALo);CHKERRCUSP(ierr); 645087f3262SPaul Mullowney } catch(char *ex) { 646087f3262SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 647087f3262SPaul Mullowney } 648087f3262SPaul Mullowney } 649087f3262SPaul Mullowney PetscFunctionReturn(0); 650087f3262SPaul Mullowney } 651087f3262SPaul Mullowney 652087f3262SPaul Mullowney #undef __FUNCT__ 653087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU" 654087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) 6559ae82921SPaul Mullowney { 6569ae82921SPaul Mullowney PetscErrorCode ierr; 657087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 658087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 659087f3262SPaul Mullowney IS ip = a->row; 660087f3262SPaul Mullowney const PetscInt *rip; 661087f3262SPaul Mullowney PetscBool perm_identity; 662087f3262SPaul Mullowney PetscInt n = A->rmap->n; 663087f3262SPaul Mullowney 664087f3262SPaul Mullowney PetscFunctionBegin; 665087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); 666aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 667aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 668aa372e3fSPaul Mullowney cusparseTriFactors->nnz=(a->nz-n)*2 + n; 669aa372e3fSPaul Mullowney 670087f3262SPaul Mullowney /*lower triangular indices */ 671087f3262SPaul Mullowney ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); 672087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 673087f3262SPaul Mullowney if (!perm_identity) { 674aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 675aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(rip, rip+n); 676aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 677aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(rip, rip+n); 678087f3262SPaul Mullowney } 679087f3262SPaul Mullowney ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); 680087f3262SPaul Mullowney PetscFunctionReturn(0); 681087f3262SPaul Mullowney } 682087f3262SPaul Mullowney 683087f3262SPaul Mullowney #undef __FUNCT__ 6849ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorNumeric_SeqAIJCUSPARSE" 6856fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 6869ae82921SPaul Mullowney { 6879ae82921SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 6889ae82921SPaul Mullowney IS isrow = b->row,iscol = b->col; 6899ae82921SPaul Mullowney PetscBool row_identity,col_identity; 690b175d8bbSPaul Mullowney PetscErrorCode ierr; 6919ae82921SPaul Mullowney 6929ae82921SPaul Mullowney PetscFunctionBegin; 6939ae82921SPaul Mullowney ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 694e057df02SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 6959ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 6969ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 697bda325fcSPaul Mullowney if (row_identity && col_identity) { 698bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 699bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 700bda325fcSPaul Mullowney } else { 701bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 702bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 703bda325fcSPaul Mullowney } 7048dc1d2a3SPaul Mullowney 705e057df02SPaul Mullowney /* get the triangular factors */ 706087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 7079ae82921SPaul Mullowney PetscFunctionReturn(0); 7089ae82921SPaul Mullowney } 7099ae82921SPaul Mullowney 710087f3262SPaul Mullowney #undef __FUNCT__ 711087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorNumeric_SeqAIJCUSPARSE" 712087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 713087f3262SPaul Mullowney { 714087f3262SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 715087f3262SPaul Mullowney IS ip = b->row; 716087f3262SPaul Mullowney PetscBool perm_identity; 717b175d8bbSPaul Mullowney PetscErrorCode ierr; 718087f3262SPaul Mullowney 719087f3262SPaul Mullowney PetscFunctionBegin; 720087f3262SPaul Mullowney ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 721087f3262SPaul Mullowney 722087f3262SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 723087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 724087f3262SPaul Mullowney if (perm_identity) { 725087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 726087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 727087f3262SPaul Mullowney } else { 728087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 729087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 730087f3262SPaul Mullowney } 731087f3262SPaul Mullowney 732087f3262SPaul Mullowney /* get the triangular factors */ 733087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 734087f3262SPaul Mullowney PetscFunctionReturn(0); 735087f3262SPaul Mullowney } 7369ae82921SPaul Mullowney 737bda325fcSPaul Mullowney #undef __FUNCT__ 738bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEAnalyzeTransposeForSolve" 739b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) 740bda325fcSPaul Mullowney { 741bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 742aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 743aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 744aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 745aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 746bda325fcSPaul Mullowney cusparseStatus_t stat; 747aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 748aa372e3fSPaul Mullowney cusparseMatrixType_t matrixType; 749aa372e3fSPaul Mullowney cusparseFillMode_t fillMode; 750aa372e3fSPaul Mullowney cusparseDiagType_t diagType; 751b175d8bbSPaul Mullowney 752bda325fcSPaul Mullowney PetscFunctionBegin; 753bda325fcSPaul Mullowney 754aa372e3fSPaul Mullowney /*********************************************/ 755aa372e3fSPaul Mullowney /* Now the Transpose of the Lower Tri Factor */ 756aa372e3fSPaul Mullowney /*********************************************/ 757aa372e3fSPaul Mullowney 758aa372e3fSPaul Mullowney /* allocate space for the transpose of the lower triangular factor */ 759aa372e3fSPaul Mullowney loTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 760aa372e3fSPaul Mullowney 761aa372e3fSPaul Mullowney /* set the matrix descriptors of the lower triangular factor */ 762aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(loTriFactor->descr); 763aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(loTriFactor->descr); 764aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 765aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 766aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(loTriFactor->descr); 767aa372e3fSPaul Mullowney 768aa372e3fSPaul Mullowney /* Create the matrix description */ 769aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUSP(stat); 770aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUSP(stat); 771aa372e3fSPaul Mullowney stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUSP(stat); 772aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUSP(stat); 773aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUSP(stat); 774aa372e3fSPaul Mullowney 775aa372e3fSPaul Mullowney /* Create the solve analysis information */ 776aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&loTriFactorT->solveInfo);CHKERRCUSP(stat); 777aa372e3fSPaul Mullowney 778aa372e3fSPaul Mullowney /* set the operation */ 779aa372e3fSPaul Mullowney loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 780aa372e3fSPaul Mullowney 781aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the lower triangular factor*/ 782aa372e3fSPaul Mullowney loTriFactorT->csrMat = new CsrMatrix; 783aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_rows; 784aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_cols; 785aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; 786aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactor->csrMat->num_rows+1); 787aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactor->csrMat->num_entries); 788aa372e3fSPaul Mullowney loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactor->csrMat->num_entries); 789aa372e3fSPaul Mullowney 790aa372e3fSPaul Mullowney /* compute the transpose of the lower triangular factor, i.e. the CSC */ 791aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, 792aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, 793aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 794aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 795aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 796aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 797aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 798aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 799aa372e3fSPaul Mullowney CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSP(stat); 800aa372e3fSPaul Mullowney 801aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 802aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, 803aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, 804aa372e3fSPaul Mullowney loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), 805aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), 806aa372e3fSPaul Mullowney loTriFactorT->solveInfo);CHKERRCUSP(stat); 807aa372e3fSPaul Mullowney 808aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 809aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; 810aa372e3fSPaul Mullowney 811aa372e3fSPaul Mullowney /*********************************************/ 812aa372e3fSPaul Mullowney /* Now the Transpose of the Upper Tri Factor */ 813aa372e3fSPaul Mullowney /*********************************************/ 814aa372e3fSPaul Mullowney 815aa372e3fSPaul Mullowney /* allocate space for the transpose of the upper triangular factor */ 816aa372e3fSPaul Mullowney upTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 817aa372e3fSPaul Mullowney 818aa372e3fSPaul Mullowney /* set the matrix descriptors of the upper triangular factor */ 819aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(upTriFactor->descr); 820aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(upTriFactor->descr); 821aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 822aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 823aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(upTriFactor->descr); 824aa372e3fSPaul Mullowney 825aa372e3fSPaul Mullowney /* Create the matrix description */ 826aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUSP(stat); 827aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUSP(stat); 828aa372e3fSPaul Mullowney stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUSP(stat); 829aa372e3fSPaul Mullowney stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUSP(stat); 830aa372e3fSPaul Mullowney stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUSP(stat); 831aa372e3fSPaul Mullowney 832aa372e3fSPaul Mullowney /* Create the solve analysis information */ 833aa372e3fSPaul Mullowney stat = cusparseCreateSolveAnalysisInfo(&upTriFactorT->solveInfo);CHKERRCUSP(stat); 834aa372e3fSPaul Mullowney 835aa372e3fSPaul Mullowney /* set the operation */ 836aa372e3fSPaul Mullowney upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 837aa372e3fSPaul Mullowney 838aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the upper triangular factor*/ 839aa372e3fSPaul Mullowney upTriFactorT->csrMat = new CsrMatrix; 840aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_rows; 841aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_cols; 842aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; 843aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactor->csrMat->num_rows+1); 844aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactor->csrMat->num_entries); 845aa372e3fSPaul Mullowney upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactor->csrMat->num_entries); 846aa372e3fSPaul Mullowney 847aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 848aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, 849aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, 850aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 851aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 852aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 853aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 854aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 855aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 856aa372e3fSPaul Mullowney CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSP(stat); 857aa372e3fSPaul Mullowney 858aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 859aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, 860aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, 861aa372e3fSPaul Mullowney upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), 862aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), 863aa372e3fSPaul Mullowney upTriFactorT->solveInfo);CHKERRCUSP(stat); 864aa372e3fSPaul Mullowney 865aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 866aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; 867bda325fcSPaul Mullowney PetscFunctionReturn(0); 868bda325fcSPaul Mullowney } 869bda325fcSPaul Mullowney 870bda325fcSPaul Mullowney #undef __FUNCT__ 871bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEGenerateTransposeForMult" 872b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEGenerateTransposeForMult(Mat A) 873bda325fcSPaul Mullowney { 874aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 875aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 876aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 877bda325fcSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 878bda325fcSPaul Mullowney cusparseStatus_t stat; 879aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 880b06137fdSPaul Mullowney cudaError_t err; 881b175d8bbSPaul Mullowney 882bda325fcSPaul Mullowney PetscFunctionBegin; 883aa372e3fSPaul Mullowney 884aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 885aa372e3fSPaul Mullowney matstructT = new Mat_SeqAIJCUSPARSEMultStruct; 886aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUSP(stat); 887aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 888aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUSP(stat); 889aa372e3fSPaul Mullowney stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSP(stat); 890aa372e3fSPaul Mullowney 891b06137fdSPaul Mullowney /* set alpha and beta */ 892b06137fdSPaul Mullowney err = cudaMalloc((void **)&(matstructT->alpha),sizeof(PetscScalar));CHKERRCUSP(err); 893b06137fdSPaul Mullowney err = cudaMemcpy(matstructT->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUSP(err); 894b06137fdSPaul Mullowney err = cudaMalloc((void **)&(matstructT->beta),sizeof(PetscScalar));CHKERRCUSP(err); 895b06137fdSPaul Mullowney err = cudaMemcpy(matstructT->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUSP(err); 896b06137fdSPaul Mullowney stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSP(stat); 897b06137fdSPaul Mullowney 898aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 899aa372e3fSPaul Mullowney CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; 900aa372e3fSPaul Mullowney CsrMatrix *matrixT= new CsrMatrix; 901aa372e3fSPaul Mullowney matrixT->num_rows = A->rmap->n; 902aa372e3fSPaul Mullowney matrixT->num_cols = A->cmap->n; 903aa372e3fSPaul Mullowney matrixT->num_entries = a->nz; 904aa372e3fSPaul Mullowney matrixT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 905aa372e3fSPaul Mullowney matrixT->column_indices = new THRUSTINTARRAY32(a->nz); 906aa372e3fSPaul Mullowney matrixT->values = new THRUSTARRAY(a->nz); 907aa372e3fSPaul Mullowney 908aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 909aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 910aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, matrix->num_rows, 911aa372e3fSPaul Mullowney matrix->num_cols, matrix->num_entries, 912aa372e3fSPaul Mullowney matrix->values->data().get(), 913aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 914aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 915aa372e3fSPaul Mullowney matrixT->values->data().get(), 916aa372e3fSPaul Mullowney matrixT->column_indices->data().get(), 917aa372e3fSPaul Mullowney matrixT->row_offsets->data().get(), 918aa372e3fSPaul Mullowney CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSP(stat); 919aa372e3fSPaul Mullowney 920aa372e3fSPaul Mullowney /* assign the pointer */ 921aa372e3fSPaul Mullowney matstructT->mat = matrixT; 922aa372e3fSPaul Mullowney 923aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 9242692e278SPaul Mullowney #if CUDA_VERSION>=5000 925aa372e3fSPaul Mullowney /* First convert HYB to CSR */ 926aa372e3fSPaul Mullowney CsrMatrix *temp= new CsrMatrix; 927aa372e3fSPaul Mullowney temp->num_rows = A->rmap->n; 928aa372e3fSPaul Mullowney temp->num_cols = A->cmap->n; 929aa372e3fSPaul Mullowney temp->num_entries = a->nz; 930aa372e3fSPaul Mullowney temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 931aa372e3fSPaul Mullowney temp->column_indices = new THRUSTINTARRAY32(a->nz); 932aa372e3fSPaul Mullowney temp->values = new THRUSTARRAY(a->nz); 933aa372e3fSPaul Mullowney 9342692e278SPaul Mullowney 935aa372e3fSPaul Mullowney stat = cusparse_hyb2csr(cusparsestruct->handle, 936aa372e3fSPaul Mullowney matstruct->descr, (cusparseHybMat_t)matstruct->mat, 937aa372e3fSPaul Mullowney temp->values->data().get(), 938aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 939aa372e3fSPaul Mullowney temp->column_indices->data().get());CHKERRCUSP(stat); 940aa372e3fSPaul Mullowney 941aa372e3fSPaul Mullowney /* Next, convert CSR to CSC (i.e. the matrix transpose) */ 942aa372e3fSPaul Mullowney CsrMatrix *tempT= new CsrMatrix; 943aa372e3fSPaul Mullowney tempT->num_rows = A->rmap->n; 944aa372e3fSPaul Mullowney tempT->num_cols = A->cmap->n; 945aa372e3fSPaul Mullowney tempT->num_entries = a->nz; 946aa372e3fSPaul Mullowney tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 947aa372e3fSPaul Mullowney tempT->column_indices = new THRUSTINTARRAY32(a->nz); 948aa372e3fSPaul Mullowney tempT->values = new THRUSTARRAY(a->nz); 949aa372e3fSPaul Mullowney 950aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, 951aa372e3fSPaul Mullowney temp->num_cols, temp->num_entries, 952aa372e3fSPaul Mullowney temp->values->data().get(), 953aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 954aa372e3fSPaul Mullowney temp->column_indices->data().get(), 955aa372e3fSPaul Mullowney tempT->values->data().get(), 956aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 957aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 958aa372e3fSPaul Mullowney CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUSP(stat); 959aa372e3fSPaul Mullowney 960aa372e3fSPaul Mullowney /* Last, convert CSC to HYB */ 961aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 962aa372e3fSPaul Mullowney stat = cusparseCreateHybMat(&hybMat);CHKERRCUSP(stat); 963aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 964aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 965aa372e3fSPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, 966aa372e3fSPaul Mullowney matstructT->descr, tempT->values->data().get(), 967aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 968aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 969aa372e3fSPaul Mullowney hybMat, 0, partition);CHKERRCUSP(stat); 970aa372e3fSPaul Mullowney 971aa372e3fSPaul Mullowney /* assign the pointer */ 972aa372e3fSPaul Mullowney matstructT->mat = hybMat; 973aa372e3fSPaul Mullowney 974aa372e3fSPaul Mullowney /* delete temporaries */ 975aa372e3fSPaul Mullowney if (tempT) { 976aa372e3fSPaul Mullowney if (tempT->values) delete (THRUSTARRAY*) tempT->values; 977aa372e3fSPaul Mullowney if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; 978aa372e3fSPaul Mullowney if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; 979aa372e3fSPaul Mullowney delete (CsrMatrix*) tempT; 980087f3262SPaul Mullowney } 981aa372e3fSPaul Mullowney if (temp) { 982aa372e3fSPaul Mullowney if (temp->values) delete (THRUSTARRAY*) temp->values; 983aa372e3fSPaul Mullowney if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; 984aa372e3fSPaul Mullowney if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; 985aa372e3fSPaul Mullowney delete (CsrMatrix*) temp; 986aa372e3fSPaul Mullowney } 9872692e278SPaul Mullowney #else 9882692e278SPaul Mullowney SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format for the Matrix Transpose (in MatMultTranspose) require CUDA 5.0 or later."); 9892692e278SPaul Mullowney #endif 990aa372e3fSPaul Mullowney } 991aa372e3fSPaul Mullowney /* assign the compressed row indices */ 992aa372e3fSPaul Mullowney matstructT->cprowIndices = new THRUSTINTARRAY; 993aa372e3fSPaul Mullowney 994aa372e3fSPaul Mullowney /* assign the pointer */ 995aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; 996bda325fcSPaul Mullowney PetscFunctionReturn(0); 997bda325fcSPaul Mullowney } 998bda325fcSPaul Mullowney 999bda325fcSPaul Mullowney #undef __FUNCT__ 1000bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE" 10016fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 1002bda325fcSPaul Mullowney { 1003bda325fcSPaul Mullowney CUSPARRAY *xGPU, *bGPU; 1004bda325fcSPaul Mullowney cusparseStatus_t stat; 1005bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1006aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1007aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1008aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1009b175d8bbSPaul Mullowney PetscErrorCode ierr; 1010bda325fcSPaul Mullowney 1011bda325fcSPaul Mullowney PetscFunctionBegin; 1012aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1013aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1014bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1015aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1016aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1017bda325fcSPaul Mullowney } 1018bda325fcSPaul Mullowney 1019bda325fcSPaul Mullowney /* Get the GPU pointers */ 1020bda325fcSPaul Mullowney ierr = VecCUSPGetArrayWrite(xx,&xGPU);CHKERRQ(ierr); 1021bda325fcSPaul Mullowney ierr = VecCUSPGetArrayRead(bb,&bGPU);CHKERRQ(ierr); 1022bda325fcSPaul Mullowney 1023aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1024aa372e3fSPaul Mullowney thrust::copy(thrust::make_permutation_iterator(bGPU->begin(), cusparseTriFactors->rpermIndices->begin()), 1025aa372e3fSPaul Mullowney thrust::make_permutation_iterator(bGPU->end(), cusparseTriFactors->rpermIndices->end()), 1026aa372e3fSPaul Mullowney xGPU->begin()); 1027aa372e3fSPaul Mullowney 1028aa372e3fSPaul Mullowney /* First, solve U */ 1029aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1030aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1031aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1032aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1033aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1034aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1035aa372e3fSPaul Mullowney xGPU->data().get(), tempGPU->data().get());CHKERRCUSP(stat); 1036aa372e3fSPaul Mullowney 1037aa372e3fSPaul Mullowney /* Then, solve L */ 1038aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1039aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1040aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1041aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1042aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1043aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1044aa372e3fSPaul Mullowney tempGPU->data().get(), xGPU->data().get());CHKERRCUSP(stat); 1045aa372e3fSPaul Mullowney 1046aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1047aa372e3fSPaul Mullowney thrust::copy(thrust::make_permutation_iterator(xGPU->begin(), cusparseTriFactors->cpermIndices->begin()), 1048aa372e3fSPaul Mullowney thrust::make_permutation_iterator(xGPU->end(), cusparseTriFactors->cpermIndices->end()), 1049aa372e3fSPaul Mullowney tempGPU->begin()); 1050aa372e3fSPaul Mullowney 1051aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1052aa372e3fSPaul Mullowney thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU->begin()); 1053bda325fcSPaul Mullowney 1054bda325fcSPaul Mullowney /* restore */ 1055bda325fcSPaul Mullowney ierr = VecCUSPRestoreArrayRead(bb,&bGPU);CHKERRQ(ierr); 1056bda325fcSPaul Mullowney ierr = VecCUSPRestoreArrayWrite(xx,&xGPU);CHKERRQ(ierr); 1057bda325fcSPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1058087f3262SPaul Mullowney 1059aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1060bda325fcSPaul Mullowney PetscFunctionReturn(0); 1061bda325fcSPaul Mullowney } 1062bda325fcSPaul Mullowney 1063bda325fcSPaul Mullowney #undef __FUNCT__ 1064bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering" 10656fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 1066bda325fcSPaul Mullowney { 1067bda325fcSPaul Mullowney CUSPARRAY *xGPU,*bGPU; 1068bda325fcSPaul Mullowney cusparseStatus_t stat; 1069bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1070aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1071aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1072aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1073b175d8bbSPaul Mullowney PetscErrorCode ierr; 1074bda325fcSPaul Mullowney 1075bda325fcSPaul Mullowney PetscFunctionBegin; 1076aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1077aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1078bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1079aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1080aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1081bda325fcSPaul Mullowney } 1082bda325fcSPaul Mullowney 1083bda325fcSPaul Mullowney /* Get the GPU pointers */ 1084bda325fcSPaul Mullowney ierr = VecCUSPGetArrayWrite(xx,&xGPU);CHKERRQ(ierr); 1085bda325fcSPaul Mullowney ierr = VecCUSPGetArrayRead(bb,&bGPU);CHKERRQ(ierr); 1086bda325fcSPaul Mullowney 1087aa372e3fSPaul Mullowney /* First, solve U */ 1088aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1089aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1090aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1091aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1092aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1093aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1094aa372e3fSPaul Mullowney bGPU->data().get(), tempGPU->data().get());CHKERRCUSP(stat); 1095aa372e3fSPaul Mullowney 1096aa372e3fSPaul Mullowney /* Then, solve L */ 1097aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1098aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1099aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1100aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1101aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1102aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1103aa372e3fSPaul Mullowney tempGPU->data().get(), xGPU->data().get());CHKERRCUSP(stat); 1104bda325fcSPaul Mullowney 1105bda325fcSPaul Mullowney /* restore */ 1106bda325fcSPaul Mullowney ierr = VecCUSPRestoreArrayRead(bb,&bGPU);CHKERRQ(ierr); 1107bda325fcSPaul Mullowney ierr = VecCUSPRestoreArrayWrite(xx,&xGPU);CHKERRQ(ierr); 1108bda325fcSPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1109aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1110bda325fcSPaul Mullowney PetscFunctionReturn(0); 1111bda325fcSPaul Mullowney } 1112bda325fcSPaul Mullowney 11139ae82921SPaul Mullowney #undef __FUNCT__ 11149ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE" 11156fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 11169ae82921SPaul Mullowney { 1117bda325fcSPaul Mullowney CUSPARRAY *xGPU,*bGPU; 11189ae82921SPaul Mullowney cusparseStatus_t stat; 11199ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1120aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1121aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1122aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1123b175d8bbSPaul Mullowney PetscErrorCode ierr; 1124ebc8f436SDominic Meiser VecType t; 1125ebc8f436SDominic Meiser PetscBool flg; 11269ae82921SPaul Mullowney 11279ae82921SPaul Mullowney PetscFunctionBegin; 1128ebc8f436SDominic Meiser ierr = VecGetType(bb,&t);CHKERRQ(ierr); 1129ebc8f436SDominic Meiser ierr = PetscStrcmp(t,VECSEQCUSP,&flg);CHKERRQ(ierr); 1130ebc8f436SDominic Meiser if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #2). Can only deal with %s\n.",t,VECSEQCUSP); 1131ebc8f436SDominic Meiser ierr = VecGetType(xx,&t);CHKERRQ(ierr); 1132ebc8f436SDominic Meiser ierr = PetscStrcmp(t,VECSEQCUSP,&flg);CHKERRQ(ierr); 1133ebc8f436SDominic Meiser if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #3). Can only deal with %s\n.",t,VECSEQCUSP); 1134ebc8f436SDominic Meiser 1135e057df02SPaul Mullowney /* Get the GPU pointers */ 11369ae82921SPaul Mullowney ierr = VecCUSPGetArrayWrite(xx,&xGPU);CHKERRQ(ierr); 11379ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(bb,&bGPU);CHKERRQ(ierr); 11389ae82921SPaul Mullowney 1139aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1140aa372e3fSPaul Mullowney thrust::copy(thrust::make_permutation_iterator(bGPU->begin(), cusparseTriFactors->rpermIndices->begin()), 1141aa372e3fSPaul Mullowney thrust::make_permutation_iterator(bGPU->end(), cusparseTriFactors->rpermIndices->end()), 1142aa372e3fSPaul Mullowney xGPU->begin()); 1143aa372e3fSPaul Mullowney 1144aa372e3fSPaul Mullowney /* Next, solve L */ 1145aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1146aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1147aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1148aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1149aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1150aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1151aa372e3fSPaul Mullowney xGPU->data().get(), tempGPU->data().get());CHKERRCUSP(stat); 1152aa372e3fSPaul Mullowney 1153aa372e3fSPaul Mullowney /* Then, solve U */ 1154aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1155aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1156aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1157aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1158aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1159aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1160aa372e3fSPaul Mullowney tempGPU->data().get(), xGPU->data().get());CHKERRCUSP(stat); 1161aa372e3fSPaul Mullowney 1162aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1163aa372e3fSPaul Mullowney thrust::copy(thrust::make_permutation_iterator(xGPU->begin(), cusparseTriFactors->cpermIndices->begin()), 1164aa372e3fSPaul Mullowney thrust::make_permutation_iterator(xGPU->end(), cusparseTriFactors->cpermIndices->end()), 1165aa372e3fSPaul Mullowney tempGPU->begin()); 1166aa372e3fSPaul Mullowney 1167aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1168aa372e3fSPaul Mullowney thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU->begin()); 11699ae82921SPaul Mullowney 11709ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(bb,&bGPU);CHKERRQ(ierr); 11719ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayWrite(xx,&xGPU);CHKERRQ(ierr); 11729ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1173aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 11749ae82921SPaul Mullowney PetscFunctionReturn(0); 11759ae82921SPaul Mullowney } 11769ae82921SPaul Mullowney 11779ae82921SPaul Mullowney #undef __FUNCT__ 11789ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE_NaturalOrdering" 11796fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 11809ae82921SPaul Mullowney { 1181bda325fcSPaul Mullowney CUSPARRAY *xGPU,*bGPU; 11829ae82921SPaul Mullowney cusparseStatus_t stat; 11839ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1184aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1185aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1186aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1187b175d8bbSPaul Mullowney PetscErrorCode ierr; 11889ae82921SPaul Mullowney 11899ae82921SPaul Mullowney PetscFunctionBegin; 1190e057df02SPaul Mullowney /* Get the GPU pointers */ 11919ae82921SPaul Mullowney ierr = VecCUSPGetArrayWrite(xx,&xGPU);CHKERRQ(ierr); 11929ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(bb,&bGPU);CHKERRQ(ierr); 11939ae82921SPaul Mullowney 1194aa372e3fSPaul Mullowney /* First, solve L */ 1195aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1196aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1197aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1198aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1199aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1200aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1201aa372e3fSPaul Mullowney bGPU->data().get(), tempGPU->data().get());CHKERRCUSP(stat); 1202aa372e3fSPaul Mullowney 1203aa372e3fSPaul Mullowney /* Next, solve U */ 1204aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1205aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1206aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1207aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1208aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1209aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1210aa372e3fSPaul Mullowney tempGPU->data().get(), xGPU->data().get());CHKERRCUSP(stat); 12119ae82921SPaul Mullowney 12129ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(bb,&bGPU);CHKERRQ(ierr); 12139ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayWrite(xx,&xGPU);CHKERRQ(ierr); 12149ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1215aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 12169ae82921SPaul Mullowney PetscFunctionReturn(0); 12179ae82921SPaul Mullowney } 12189ae82921SPaul Mullowney 12199ae82921SPaul Mullowney #undef __FUNCT__ 1220e057df02SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSECopyToGPU" 12216fa9248bSJed Brown static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) 12229ae82921SPaul Mullowney { 12239ae82921SPaul Mullowney 1224aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1225aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 12269ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 12279ae82921SPaul Mullowney PetscInt m = A->rmap->n,*ii,*ridx; 12289ae82921SPaul Mullowney PetscErrorCode ierr; 1229aa372e3fSPaul Mullowney cusparseStatus_t stat; 1230b06137fdSPaul Mullowney cudaError_t err; 12319ae82921SPaul Mullowney 12329ae82921SPaul Mullowney PetscFunctionBegin; 12339ae82921SPaul Mullowney if (A->valid_GPU_matrix == PETSC_CUSP_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUSP_CPU) { 12349ae82921SPaul Mullowney ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 1235ce814652SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&matstruct,cusparsestruct->format); 12369ae82921SPaul Mullowney try { 1237aa372e3fSPaul Mullowney cusparsestruct->nonzerorow=0; 1238aa372e3fSPaul Mullowney for (int j = 0; j<m; j++) cusparsestruct->nonzerorow += ((a->i[j+1]-a->i[j])>0); 12399ae82921SPaul Mullowney 12409ae82921SPaul Mullowney if (a->compressedrow.use) { 12419ae82921SPaul Mullowney m = a->compressedrow.nrows; 12429ae82921SPaul Mullowney ii = a->compressedrow.i; 12439ae82921SPaul Mullowney ridx = a->compressedrow.rindex; 12449ae82921SPaul Mullowney } else { 1245b06137fdSPaul Mullowney /* Forcing compressed row on the GPU */ 12469ae82921SPaul Mullowney int k=0; 1247854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow+1, &ii);CHKERRQ(ierr); 1248854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow, &ridx);CHKERRQ(ierr); 12499ae82921SPaul Mullowney ii[0]=0; 12509ae82921SPaul Mullowney for (int j = 0; j<m; j++) { 12519ae82921SPaul Mullowney if ((a->i[j+1]-a->i[j])>0) { 12529ae82921SPaul Mullowney ii[k] = a->i[j]; 12539ae82921SPaul Mullowney ridx[k]= j; 12549ae82921SPaul Mullowney k++; 12559ae82921SPaul Mullowney } 12569ae82921SPaul Mullowney } 1257aa372e3fSPaul Mullowney ii[cusparsestruct->nonzerorow] = a->nz; 1258aa372e3fSPaul Mullowney m = cusparsestruct->nonzerorow; 12599ae82921SPaul Mullowney } 12609ae82921SPaul Mullowney 1261aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 1262aa372e3fSPaul Mullowney matstruct = new Mat_SeqAIJCUSPARSEMultStruct; 1263aa372e3fSPaul Mullowney stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUSP(stat); 1264aa372e3fSPaul Mullowney stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUSP(stat); 1265aa372e3fSPaul Mullowney stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUSP(stat); 12669ae82921SPaul Mullowney 1267b06137fdSPaul Mullowney err = cudaMalloc((void **)&(matstruct->alpha),sizeof(PetscScalar));CHKERRCUSP(err); 1268b06137fdSPaul Mullowney err = cudaMemcpy(matstruct->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUSP(err); 1269b06137fdSPaul Mullowney err = cudaMalloc((void **)&(matstruct->beta),sizeof(PetscScalar));CHKERRCUSP(err); 1270b06137fdSPaul Mullowney err = cudaMemcpy(matstruct->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUSP(err); 1271b06137fdSPaul Mullowney stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUSP(stat); 1272b06137fdSPaul Mullowney 1273aa372e3fSPaul Mullowney /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ 1274aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1275aa372e3fSPaul Mullowney /* set the matrix */ 1276aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1277a65300a6SPaul Mullowney matrix->num_rows = m; 1278aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1279aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1280a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1281a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 12829ae82921SPaul Mullowney 1283aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1284aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1285aa372e3fSPaul Mullowney 1286aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1287aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1288aa372e3fSPaul Mullowney 1289aa372e3fSPaul Mullowney /* assign the pointer */ 1290aa372e3fSPaul Mullowney matstruct->mat = matrix; 1291aa372e3fSPaul Mullowney 1292aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 12932692e278SPaul Mullowney #if CUDA_VERSION>=4020 1294aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1295a65300a6SPaul Mullowney matrix->num_rows = m; 1296aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1297aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1298a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1299a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 1300aa372e3fSPaul Mullowney 1301aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1302aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1303aa372e3fSPaul Mullowney 1304aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1305aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1306aa372e3fSPaul Mullowney 1307aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 1308aa372e3fSPaul Mullowney stat = cusparseCreateHybMat(&hybMat);CHKERRCUSP(stat); 1309aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 1310aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 1311a65300a6SPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, matrix->num_rows, matrix->num_cols, 1312aa372e3fSPaul Mullowney matstruct->descr, matrix->values->data().get(), 1313aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 1314aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 1315aa372e3fSPaul Mullowney hybMat, 0, partition);CHKERRCUSP(stat); 1316aa372e3fSPaul Mullowney /* assign the pointer */ 1317aa372e3fSPaul Mullowney matstruct->mat = hybMat; 1318aa372e3fSPaul Mullowney 1319aa372e3fSPaul Mullowney if (matrix) { 1320aa372e3fSPaul Mullowney if (matrix->values) delete (THRUSTARRAY*)matrix->values; 1321aa372e3fSPaul Mullowney if (matrix->column_indices) delete (THRUSTINTARRAY32*)matrix->column_indices; 1322aa372e3fSPaul Mullowney if (matrix->row_offsets) delete (THRUSTINTARRAY32*)matrix->row_offsets; 1323aa372e3fSPaul Mullowney delete (CsrMatrix*)matrix; 1324087f3262SPaul Mullowney } 13252692e278SPaul Mullowney #endif 1326087f3262SPaul Mullowney } 1327ca45077fSPaul Mullowney 1328aa372e3fSPaul Mullowney /* assign the compressed row indices */ 1329aa372e3fSPaul Mullowney matstruct->cprowIndices = new THRUSTINTARRAY(m); 1330aa372e3fSPaul Mullowney matstruct->cprowIndices->assign(ridx,ridx+m); 1331aa372e3fSPaul Mullowney 1332aa372e3fSPaul Mullowney /* assign the pointer */ 1333aa372e3fSPaul Mullowney cusparsestruct->mat = matstruct; 1334aa372e3fSPaul Mullowney 13359ae82921SPaul Mullowney if (!a->compressedrow.use) { 13369ae82921SPaul Mullowney ierr = PetscFree(ii);CHKERRQ(ierr); 13379ae82921SPaul Mullowney ierr = PetscFree(ridx);CHKERRQ(ierr); 13389ae82921SPaul Mullowney } 1339aa372e3fSPaul Mullowney cusparsestruct->workVector = new THRUSTARRAY; 1340aa372e3fSPaul Mullowney cusparsestruct->workVector->resize(m); 13419ae82921SPaul Mullowney } catch(char *ex) { 13429ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 13439ae82921SPaul Mullowney } 13449ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 13452205254eSKarl Rupp 13469ae82921SPaul Mullowney A->valid_GPU_matrix = PETSC_CUSP_BOTH; 13472205254eSKarl Rupp 13489ae82921SPaul Mullowney ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 13499ae82921SPaul Mullowney } 13509ae82921SPaul Mullowney PetscFunctionReturn(0); 13519ae82921SPaul Mullowney } 13529ae82921SPaul Mullowney 13539ae82921SPaul Mullowney #undef __FUNCT__ 13542a7a6963SBarry Smith #define __FUNCT__ "MatCreateVecs_SeqAIJCUSPARSE" 13552a7a6963SBarry Smith static PetscErrorCode MatCreateVecs_SeqAIJCUSPARSE(Mat mat, Vec *right, Vec *left) 13569ae82921SPaul Mullowney { 13579ae82921SPaul Mullowney PetscErrorCode ierr; 135833d57670SJed Brown PetscInt rbs,cbs; 13599ae82921SPaul Mullowney 13609ae82921SPaul Mullowney PetscFunctionBegin; 136133d57670SJed Brown ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr); 13629ae82921SPaul Mullowney if (right) { 1363ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr); 13649ae82921SPaul Mullowney ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 136533d57670SJed Brown ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr); 13669ae82921SPaul Mullowney ierr = VecSetType(*right,VECSEQCUSP);CHKERRQ(ierr); 13679ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->cmap,&(*right)->map);CHKERRQ(ierr); 13689ae82921SPaul Mullowney } 13699ae82921SPaul Mullowney if (left) { 1370ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr); 13719ae82921SPaul Mullowney ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 137233d57670SJed Brown ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr); 13739ae82921SPaul Mullowney ierr = VecSetType(*left,VECSEQCUSP);CHKERRQ(ierr); 13749ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->rmap,&(*left)->map);CHKERRQ(ierr); 13759ae82921SPaul Mullowney } 13769ae82921SPaul Mullowney PetscFunctionReturn(0); 13779ae82921SPaul Mullowney } 13789ae82921SPaul Mullowney 1379aa372e3fSPaul Mullowney struct VecCUSPPlusEquals 1380aa372e3fSPaul Mullowney { 1381aa372e3fSPaul Mullowney template <typename Tuple> 1382aa372e3fSPaul Mullowney __host__ __device__ 1383aa372e3fSPaul Mullowney void operator()(Tuple t) 1384aa372e3fSPaul Mullowney { 1385aa372e3fSPaul Mullowney thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); 1386aa372e3fSPaul Mullowney } 1387aa372e3fSPaul Mullowney }; 1388aa372e3fSPaul Mullowney 13899ae82921SPaul Mullowney #undef __FUNCT__ 13909ae82921SPaul Mullowney #define __FUNCT__ "MatMult_SeqAIJCUSPARSE" 13916fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 13929ae82921SPaul Mullowney { 13939ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1394aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1395aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1396bda325fcSPaul Mullowney CUSPARRAY *xarray,*yarray; 1397b175d8bbSPaul Mullowney PetscErrorCode ierr; 1398aa372e3fSPaul Mullowney cusparseStatus_t stat; 13999ae82921SPaul Mullowney 14009ae82921SPaul Mullowney PetscFunctionBegin; 1401e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1402e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 14039ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(xx,&xarray);CHKERRQ(ierr); 14049ae82921SPaul Mullowney ierr = VecCUSPGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1405aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1406aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1407aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1408aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, mat->num_entries, 1409b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), 1410b06137fdSPaul Mullowney mat->column_indices->data().get(), xarray->data().get(), matstruct->beta, 1411aa372e3fSPaul Mullowney yarray->data().get());CHKERRCUSP(stat); 1412aa372e3fSPaul Mullowney } else { 14132692e278SPaul Mullowney #if CUDA_VERSION>=4020 1414aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1415aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1416b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1417b06137fdSPaul Mullowney xarray->data().get(), matstruct->beta, 1418aa372e3fSPaul Mullowney yarray->data().get());CHKERRCUSP(stat); 14192692e278SPaul Mullowney #endif 14209ae82921SPaul Mullowney } 14219ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 14229ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1423aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 14249ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1425ca45077fSPaul Mullowney } 1426aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 14279ae82921SPaul Mullowney PetscFunctionReturn(0); 14289ae82921SPaul Mullowney } 14299ae82921SPaul Mullowney 14309ae82921SPaul Mullowney #undef __FUNCT__ 1431ca45077fSPaul Mullowney #define __FUNCT__ "MatMultTranspose_SeqAIJCUSPARSE" 14326fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 1433ca45077fSPaul Mullowney { 1434ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1435aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1436aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1437bda325fcSPaul Mullowney CUSPARRAY *xarray,*yarray; 1438b175d8bbSPaul Mullowney PetscErrorCode ierr; 1439aa372e3fSPaul Mullowney cusparseStatus_t stat; 1440ca45077fSPaul Mullowney 1441ca45077fSPaul Mullowney PetscFunctionBegin; 1442e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1443e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1444aa372e3fSPaul Mullowney if (!matstructT) { 1445bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1446aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1447bda325fcSPaul Mullowney } 1448ca45077fSPaul Mullowney ierr = VecCUSPGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1449ca45077fSPaul Mullowney ierr = VecCUSPGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1450aa372e3fSPaul Mullowney 1451aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1452aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1453aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1454aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, 1455b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1456aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1457b06137fdSPaul Mullowney mat->column_indices->data().get(), xarray->data().get(), matstructT->beta, 1458aa372e3fSPaul Mullowney yarray->data().get());CHKERRCUSP(stat); 1459aa372e3fSPaul Mullowney } else { 14602692e278SPaul Mullowney #if CUDA_VERSION>=4020 1461aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1462aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1463b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1464b06137fdSPaul Mullowney xarray->data().get(), matstructT->beta, 1465aa372e3fSPaul Mullowney yarray->data().get());CHKERRCUSP(stat); 14662692e278SPaul Mullowney #endif 1467ca45077fSPaul Mullowney } 1468ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1469ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1470aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 1471ca45077fSPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1472ca45077fSPaul Mullowney } 1473aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 1474ca45077fSPaul Mullowney PetscFunctionReturn(0); 1475ca45077fSPaul Mullowney } 1476ca45077fSPaul Mullowney 1477aa372e3fSPaul Mullowney 1478ca45077fSPaul Mullowney #undef __FUNCT__ 14799ae82921SPaul Mullowney #define __FUNCT__ "MatMultAdd_SeqAIJCUSPARSE" 14806fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 14819ae82921SPaul Mullowney { 14829ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1483aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1484aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1485bda325fcSPaul Mullowney CUSPARRAY *xarray,*yarray,*zarray; 1486b175d8bbSPaul Mullowney PetscErrorCode ierr; 1487aa372e3fSPaul Mullowney cusparseStatus_t stat; 14886e111a19SKarl Rupp 14899ae82921SPaul Mullowney PetscFunctionBegin; 1490e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1491e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 14929ae82921SPaul Mullowney try { 14939ae82921SPaul Mullowney ierr = VecCopy_SeqCUSP(yy,zz);CHKERRQ(ierr); 14949ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(xx,&xarray);CHKERRQ(ierr); 14959ae82921SPaul Mullowney ierr = VecCUSPGetArrayRead(yy,&yarray);CHKERRQ(ierr); 14969ae82921SPaul Mullowney ierr = VecCUSPGetArrayWrite(zz,&zarray);CHKERRQ(ierr); 14979ae82921SPaul Mullowney 1498e057df02SPaul Mullowney /* multiply add */ 1499aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1500aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1501b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1502b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1503b06137fdSPaul Mullowney size of the workVector */ 1504aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1505a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1506b06137fdSPaul Mullowney mat->num_entries, matstruct->alpha, matstruct->descr, 1507aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1508b06137fdSPaul Mullowney mat->column_indices->data().get(), xarray->data().get(), matstruct->beta, 1509aa372e3fSPaul Mullowney cusparsestruct->workVector->data().get());CHKERRCUSP(stat); 1510aa372e3fSPaul Mullowney } else { 15112692e278SPaul Mullowney #if CUDA_VERSION>=4020 1512aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1513a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1514aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1515b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1516b06137fdSPaul Mullowney xarray->data().get(), matstruct->beta, 1517aa372e3fSPaul Mullowney cusparsestruct->workVector->data().get());CHKERRCUSP(stat); 1518a65300a6SPaul Mullowney } 15192692e278SPaul Mullowney #endif 1520aa372e3fSPaul Mullowney } 1521aa372e3fSPaul Mullowney 1522aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1523aa372e3fSPaul Mullowney thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zarray->begin(), matstruct->cprowIndices->begin()))), 1524aa372e3fSPaul Mullowney thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zarray->begin(), matstruct->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1525aa372e3fSPaul Mullowney VecCUSPPlusEquals()); 15269ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 15279ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayRead(yy,&yarray);CHKERRQ(ierr); 15289ae82921SPaul Mullowney ierr = VecCUSPRestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); 15299ae82921SPaul Mullowney 15309ae82921SPaul Mullowney } catch(char *ex) { 15319ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 15329ae82921SPaul Mullowney } 15339ae82921SPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 15349ae82921SPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 15359ae82921SPaul Mullowney PetscFunctionReturn(0); 15369ae82921SPaul Mullowney } 15379ae82921SPaul Mullowney 15389ae82921SPaul Mullowney #undef __FUNCT__ 1539b175d8bbSPaul Mullowney #define __FUNCT__ "MatMultTransposeAdd_SeqAIJCUSPARSE" 15406fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 1541ca45077fSPaul Mullowney { 1542ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1543aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1544aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1545ca45077fSPaul Mullowney CUSPARRAY *xarray,*yarray,*zarray; 1546b175d8bbSPaul Mullowney PetscErrorCode ierr; 1547aa372e3fSPaul Mullowney cusparseStatus_t stat; 15486e111a19SKarl Rupp 1549ca45077fSPaul Mullowney PetscFunctionBegin; 1550e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1551e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1552aa372e3fSPaul Mullowney if (!matstructT) { 1553bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1554aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1555bda325fcSPaul Mullowney } 1556aa372e3fSPaul Mullowney 1557ca45077fSPaul Mullowney try { 1558ca45077fSPaul Mullowney ierr = VecCopy_SeqCUSP(yy,zz);CHKERRQ(ierr); 1559ca45077fSPaul Mullowney ierr = VecCUSPGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1560ca45077fSPaul Mullowney ierr = VecCUSPGetArrayRead(yy,&yarray);CHKERRQ(ierr); 1561ca45077fSPaul Mullowney ierr = VecCUSPGetArrayWrite(zz,&zarray);CHKERRQ(ierr); 1562ca45077fSPaul Mullowney 1563e057df02SPaul Mullowney /* multiply add with matrix transpose */ 1564aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1565aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1566b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1567b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1568b06137fdSPaul Mullowney size of the workVector */ 1569aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1570a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1571b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1572aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1573b06137fdSPaul Mullowney mat->column_indices->data().get(), xarray->data().get(), matstructT->beta, 1574aa372e3fSPaul Mullowney cusparsestruct->workVector->data().get());CHKERRCUSP(stat); 1575aa372e3fSPaul Mullowney } else { 15762692e278SPaul Mullowney #if CUDA_VERSION>=4020 1577aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1578a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1579aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1580b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1581b06137fdSPaul Mullowney xarray->data().get(), matstructT->beta, 1582aa372e3fSPaul Mullowney cusparsestruct->workVector->data().get());CHKERRCUSP(stat); 1583a65300a6SPaul Mullowney } 15842692e278SPaul Mullowney #endif 1585aa372e3fSPaul Mullowney } 1586aa372e3fSPaul Mullowney 1587aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1588aa372e3fSPaul Mullowney thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zarray->begin(), matstructT->cprowIndices->begin()))), 1589aa372e3fSPaul Mullowney thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zarray->begin(), matstructT->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1590aa372e3fSPaul Mullowney VecCUSPPlusEquals()); 1591ca45077fSPaul Mullowney 1592ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1593ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayRead(yy,&yarray);CHKERRQ(ierr); 1594ca45077fSPaul Mullowney ierr = VecCUSPRestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); 1595ca45077fSPaul Mullowney 1596ca45077fSPaul Mullowney } catch(char *ex) { 1597ca45077fSPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 1598ca45077fSPaul Mullowney } 1599ca45077fSPaul Mullowney ierr = WaitForGPU();CHKERRCUSP(ierr); 1600ca45077fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 1601ca45077fSPaul Mullowney PetscFunctionReturn(0); 1602ca45077fSPaul Mullowney } 1603ca45077fSPaul Mullowney 1604ca45077fSPaul Mullowney #undef __FUNCT__ 16059ae82921SPaul Mullowney #define __FUNCT__ "MatAssemblyEnd_SeqAIJCUSPARSE" 16066fa9248bSJed Brown static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) 16079ae82921SPaul Mullowney { 16089ae82921SPaul Mullowney PetscErrorCode ierr; 16096e111a19SKarl Rupp 16109ae82921SPaul Mullowney PetscFunctionBegin; 16119ae82921SPaul Mullowney ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); 1612bc3f50f2SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 1613e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); 1614bc3f50f2SPaul Mullowney } 16159ae82921SPaul Mullowney if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0); 1616bbf3fe20SPaul Mullowney A->ops->mult = MatMult_SeqAIJCUSPARSE; 1617bbf3fe20SPaul Mullowney A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1618bbf3fe20SPaul Mullowney A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1619bbf3fe20SPaul Mullowney A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 16209ae82921SPaul Mullowney PetscFunctionReturn(0); 16219ae82921SPaul Mullowney } 16229ae82921SPaul Mullowney 16239ae82921SPaul Mullowney /* --------------------------------------------------------------------------------*/ 16249ae82921SPaul Mullowney #undef __FUNCT__ 16259ae82921SPaul Mullowney #define __FUNCT__ "MatCreateSeqAIJCUSPARSE" 1626e057df02SPaul Mullowney /*@ 16279ae82921SPaul Mullowney MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format 1628e057df02SPaul Mullowney (the default parallel PETSc format). This matrix will ultimately pushed down 1629e057df02SPaul Mullowney to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix 1630e057df02SPaul Mullowney assembly performance the user should preallocate the matrix storage by setting 1631e057df02SPaul Mullowney the parameter nz (or the array nnz). By setting these parameters accurately, 1632e057df02SPaul Mullowney performance during matrix assembly can be increased by more than a factor of 50. 16339ae82921SPaul Mullowney 16349ae82921SPaul Mullowney Collective on MPI_Comm 16359ae82921SPaul Mullowney 16369ae82921SPaul Mullowney Input Parameters: 16379ae82921SPaul Mullowney + comm - MPI communicator, set to PETSC_COMM_SELF 16389ae82921SPaul Mullowney . m - number of rows 16399ae82921SPaul Mullowney . n - number of columns 16409ae82921SPaul Mullowney . nz - number of nonzeros per row (same for all rows) 16419ae82921SPaul Mullowney - nnz - array containing the number of nonzeros in the various rows 16420298fd71SBarry Smith (possibly different for each row) or NULL 16439ae82921SPaul Mullowney 16449ae82921SPaul Mullowney Output Parameter: 16459ae82921SPaul Mullowney . A - the matrix 16469ae82921SPaul Mullowney 16479ae82921SPaul Mullowney It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), 16489ae82921SPaul Mullowney MatXXXXSetPreallocation() paradgm instead of this routine directly. 16499ae82921SPaul Mullowney [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] 16509ae82921SPaul Mullowney 16519ae82921SPaul Mullowney Notes: 16529ae82921SPaul Mullowney If nnz is given then nz is ignored 16539ae82921SPaul Mullowney 16549ae82921SPaul Mullowney The AIJ format (also called the Yale sparse matrix format or 16559ae82921SPaul Mullowney compressed row storage), is fully compatible with standard Fortran 77 16569ae82921SPaul Mullowney storage. That is, the stored row and column indices can begin at 16579ae82921SPaul Mullowney either one (as in Fortran) or zero. See the users' manual for details. 16589ae82921SPaul Mullowney 16599ae82921SPaul Mullowney Specify the preallocated storage with either nz or nnz (not both). 16600298fd71SBarry Smith Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory 16619ae82921SPaul Mullowney allocation. For large problems you MUST preallocate memory or you 16629ae82921SPaul Mullowney will get TERRIBLE performance, see the users' manual chapter on matrices. 16639ae82921SPaul Mullowney 16649ae82921SPaul Mullowney By default, this format uses inodes (identical nodes) when possible, to 16659ae82921SPaul Mullowney improve numerical efficiency of matrix-vector products and solves. We 16669ae82921SPaul Mullowney search for consecutive rows with the same nonzero structure, thereby 16679ae82921SPaul Mullowney reusing matrix information to achieve increased efficiency. 16689ae82921SPaul Mullowney 16699ae82921SPaul Mullowney Level: intermediate 16709ae82921SPaul Mullowney 1671e057df02SPaul Mullowney .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE 16729ae82921SPaul Mullowney @*/ 16739ae82921SPaul Mullowney PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) 16749ae82921SPaul Mullowney { 16759ae82921SPaul Mullowney PetscErrorCode ierr; 16769ae82921SPaul Mullowney 16779ae82921SPaul Mullowney PetscFunctionBegin; 16789ae82921SPaul Mullowney ierr = MatCreate(comm,A);CHKERRQ(ierr); 16799ae82921SPaul Mullowney ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); 16809ae82921SPaul Mullowney ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 16819ae82921SPaul Mullowney ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); 16829ae82921SPaul Mullowney PetscFunctionReturn(0); 16839ae82921SPaul Mullowney } 16849ae82921SPaul Mullowney 16859ae82921SPaul Mullowney #undef __FUNCT__ 16869ae82921SPaul Mullowney #define __FUNCT__ "MatDestroy_SeqAIJCUSPARSE" 16876fa9248bSJed Brown static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) 16889ae82921SPaul Mullowney { 16899ae82921SPaul Mullowney PetscErrorCode ierr; 1690ab25e6cbSDominic Meiser 16919ae82921SPaul Mullowney PetscFunctionBegin; 16929ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 16939ae82921SPaul Mullowney if (A->valid_GPU_matrix != PETSC_CUSP_UNALLOCATED) { 1694ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); 16959ae82921SPaul Mullowney } 16969ae82921SPaul Mullowney } else { 1697ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); 1698aa372e3fSPaul Mullowney } 16999ae82921SPaul Mullowney ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); 17009ae82921SPaul Mullowney PetscFunctionReturn(0); 17019ae82921SPaul Mullowney } 17029ae82921SPaul Mullowney 17039ae82921SPaul Mullowney #undef __FUNCT__ 17049ae82921SPaul Mullowney #define __FUNCT__ "MatCreate_SeqAIJCUSPARSE" 17058cc058d9SJed Brown PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) 17069ae82921SPaul Mullowney { 17079ae82921SPaul Mullowney PetscErrorCode ierr; 1708aa372e3fSPaul Mullowney cusparseStatus_t stat; 1709aa372e3fSPaul Mullowney cusparseHandle_t handle=0; 17109ae82921SPaul Mullowney 17119ae82921SPaul Mullowney PetscFunctionBegin; 17129ae82921SPaul Mullowney ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); 17139ae82921SPaul Mullowney if (B->factortype==MAT_FACTOR_NONE) { 1714e057df02SPaul Mullowney /* you cannot check the inode.use flag here since the matrix was just created. 1715e057df02SPaul Mullowney now build a GPU matrix data structure */ 17169ae82921SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSE; 17179ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->mat = 0; 1718aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->matTranspose = 0; 1719aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->workVector = 0; 1720e057df02SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->format = MAT_CUSPARSE_CSR; 1721aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 1722aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = 0; 1723aa372e3fSPaul Mullowney stat = cusparseCreate(&handle);CHKERRCUSP(stat); 1724aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = handle; 1725aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 17269ae82921SPaul Mullowney } else { 17279ae82921SPaul Mullowney /* NEXT, set the pointers to the triangular factors */ 1728debe9ee2SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSETriFactors; 17299ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtr = 0; 17309ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtr = 0; 1731aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtrTranspose = 0; 1732aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtrTranspose = 0; 1733aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->rpermIndices = 0; 1734aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->cpermIndices = 0; 1735aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->workVector = 0; 1736aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = 0; 1737aa372e3fSPaul Mullowney stat = cusparseCreate(&handle);CHKERRCUSP(stat); 1738aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = handle; 1739aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->nnz = 0; 17409ae82921SPaul Mullowney } 1741aa372e3fSPaul Mullowney 17429ae82921SPaul Mullowney B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; 17439ae82921SPaul Mullowney B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; 17442a7a6963SBarry Smith B->ops->getvecs = MatCreateVecs_SeqAIJCUSPARSE; 17459ae82921SPaul Mullowney B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; 1746ca45077fSPaul Mullowney B->ops->mult = MatMult_SeqAIJCUSPARSE; 1747ca45077fSPaul Mullowney B->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1748ca45077fSPaul Mullowney B->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1749ca45077fSPaul Mullowney B->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 17502205254eSKarl Rupp 17519ae82921SPaul Mullowney ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 17522205254eSKarl Rupp 17539ae82921SPaul Mullowney B->valid_GPU_matrix = PETSC_CUSP_UNALLOCATED; 17542205254eSKarl Rupp 1755bdf89e91SBarry Smith ierr = PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); 17569ae82921SPaul Mullowney PetscFunctionReturn(0); 17579ae82921SPaul Mullowney } 17589ae82921SPaul Mullowney 1759e057df02SPaul Mullowney /*M 1760e057df02SPaul Mullowney MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. 1761e057df02SPaul Mullowney 1762e057df02SPaul Mullowney A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either 17632692e278SPaul Mullowney CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. 17642692e278SPaul Mullowney All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. 1765e057df02SPaul Mullowney 1766e057df02SPaul Mullowney Options Database Keys: 1767e057df02SPaul Mullowney + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() 1768aa372e3fSPaul Mullowney . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1769aa372e3fSPaul Mullowney . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1770e057df02SPaul Mullowney 1771e057df02SPaul Mullowney Level: beginner 1772e057df02SPaul Mullowney 17738468deeeSKarl Rupp .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 1774e057df02SPaul Mullowney M*/ 17757f756511SDominic Meiser 177642c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat,MatFactorType,Mat*); 177742c9c57cSBarry Smith 17780f39cd5aSBarry Smith 177942c9c57cSBarry Smith #undef __FUNCT__ 178042c9c57cSBarry Smith #define __FUNCT__ "MatSolverPackageRegister_CUSPARSE" 178129b38603SBarry Smith PETSC_EXTERN PetscErrorCode MatSolverPackageRegister_CUSPARSE(void) 178242c9c57cSBarry Smith { 178342c9c57cSBarry Smith PetscErrorCode ierr; 178442c9c57cSBarry Smith 178542c9c57cSBarry Smith PetscFunctionBegin; 178642c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 178742c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 178842c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 178942c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 179042c9c57cSBarry Smith PetscFunctionReturn(0); 179142c9c57cSBarry Smith } 179229b38603SBarry Smith 179381e08676SBarry Smith 17947f756511SDominic Meiser #undef __FUNCT__ 17957f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSE_Destroy" 17967f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) 17977f756511SDominic Meiser { 17987f756511SDominic Meiser cusparseStatus_t stat; 17997f756511SDominic Meiser cusparseHandle_t handle; 18007f756511SDominic Meiser 18017f756511SDominic Meiser PetscFunctionBegin; 18027f756511SDominic Meiser if (*cusparsestruct) { 18037f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format); 18047f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format); 18057f756511SDominic Meiser delete (*cusparsestruct)->workVector; 18067f756511SDominic Meiser if (handle = (*cusparsestruct)->handle) { 18077f756511SDominic Meiser stat = cusparseDestroy(handle);CHKERRCUSP(stat); 18087f756511SDominic Meiser } 18097f756511SDominic Meiser delete *cusparsestruct; 18107f756511SDominic Meiser *cusparsestruct = 0; 18117f756511SDominic Meiser } 18127f756511SDominic Meiser PetscFunctionReturn(0); 18137f756511SDominic Meiser } 18147f756511SDominic Meiser 18157f756511SDominic Meiser #undef __FUNCT__ 18167f756511SDominic Meiser #define __FUNCT__ "CsrMatrix_Destroy" 18177f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) 18187f756511SDominic Meiser { 18197f756511SDominic Meiser PetscFunctionBegin; 18207f756511SDominic Meiser if (*mat) { 18217f756511SDominic Meiser delete (*mat)->values; 18227f756511SDominic Meiser delete (*mat)->column_indices; 18237f756511SDominic Meiser delete (*mat)->row_offsets; 18247f756511SDominic Meiser delete *mat; 18257f756511SDominic Meiser *mat = 0; 18267f756511SDominic Meiser } 18277f756511SDominic Meiser PetscFunctionReturn(0); 18287f756511SDominic Meiser } 18297f756511SDominic Meiser 18307f756511SDominic Meiser #undef __FUNCT__ 18317f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactorStruct_Destroy" 18327f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) 18337f756511SDominic Meiser { 18347f756511SDominic Meiser cusparseStatus_t stat; 18357f756511SDominic Meiser PetscErrorCode ierr; 18367f756511SDominic Meiser 18377f756511SDominic Meiser PetscFunctionBegin; 18387f756511SDominic Meiser if (*trifactor) { 18397f756511SDominic Meiser if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUSP(stat); } 18407f756511SDominic Meiser if ((*trifactor)->solveInfo) { stat = cusparseDestroySolveAnalysisInfo((*trifactor)->solveInfo);CHKERRCUSP(stat); } 18417f756511SDominic Meiser ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); 18427f756511SDominic Meiser delete *trifactor; 18437f756511SDominic Meiser *trifactor = 0; 18447f756511SDominic Meiser } 18457f756511SDominic Meiser PetscFunctionReturn(0); 18467f756511SDominic Meiser } 18477f756511SDominic Meiser 18487f756511SDominic Meiser #undef __FUNCT__ 18497f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSEMultStruct_Destroy" 18507f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) 18517f756511SDominic Meiser { 18527f756511SDominic Meiser CsrMatrix *mat; 18537f756511SDominic Meiser cusparseStatus_t stat; 18547f756511SDominic Meiser cudaError_t err; 18557f756511SDominic Meiser 18567f756511SDominic Meiser PetscFunctionBegin; 18577f756511SDominic Meiser if (*matstruct) { 18587f756511SDominic Meiser if ((*matstruct)->mat) { 18597f756511SDominic Meiser if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { 18607f756511SDominic Meiser cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; 18617f756511SDominic Meiser stat = cusparseDestroyHybMat(hybMat);CHKERRCUSP(stat); 18627f756511SDominic Meiser } else { 18637f756511SDominic Meiser mat = (CsrMatrix*)(*matstruct)->mat; 18647f756511SDominic Meiser CsrMatrix_Destroy(&mat); 18657f756511SDominic Meiser } 18667f756511SDominic Meiser } 18677f756511SDominic Meiser if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUSP(stat); } 18687f756511SDominic Meiser delete (*matstruct)->cprowIndices; 18697f756511SDominic Meiser if ((*matstruct)->alpha) { err=cudaFree((*matstruct)->alpha);CHKERRCUSP(err); } 18707f756511SDominic Meiser if ((*matstruct)->beta) { err=cudaFree((*matstruct)->beta);CHKERRCUSP(err); } 18717f756511SDominic Meiser delete *matstruct; 18727f756511SDominic Meiser *matstruct = 0; 18737f756511SDominic Meiser } 18747f756511SDominic Meiser PetscFunctionReturn(0); 18757f756511SDominic Meiser } 18767f756511SDominic Meiser 18777f756511SDominic Meiser #undef __FUNCT__ 18787f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactors_Destroy" 18797f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) 18807f756511SDominic Meiser { 18817f756511SDominic Meiser cusparseHandle_t handle; 18827f756511SDominic Meiser cusparseStatus_t stat; 18837f756511SDominic Meiser 18847f756511SDominic Meiser PetscFunctionBegin; 18857f756511SDominic Meiser if (*trifactors) { 18867f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtr); 18877f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtr); 18887f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose); 18897f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose); 18907f756511SDominic Meiser delete (*trifactors)->rpermIndices; 18917f756511SDominic Meiser delete (*trifactors)->cpermIndices; 18927f756511SDominic Meiser delete (*trifactors)->workVector; 18937f756511SDominic Meiser if (handle = (*trifactors)->handle) { 18947f756511SDominic Meiser stat = cusparseDestroy(handle);CHKERRCUSP(stat); 18957f756511SDominic Meiser } 18967f756511SDominic Meiser delete *trifactors; 18977f756511SDominic Meiser *trifactors = 0; 18987f756511SDominic Meiser } 18997f756511SDominic Meiser PetscFunctionReturn(0); 19007f756511SDominic Meiser } 19017f756511SDominic Meiser 1902