19ae82921SPaul Mullowney /* 29ae82921SPaul Mullowney Defines the basic matrix operations for the AIJ (compressed row) 3bc3f50f2SPaul Mullowney matrix storage format using the CUSPARSE library, 49ae82921SPaul Mullowney */ 5dced61a5SBarry Smith #define PETSC_SKIP_SPINLOCK 69ae82921SPaul Mullowney 73d13b8fdSMatthew G. Knepley #include <petscconf.h> 83d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 9087f3262SPaul Mullowney #include <../src/mat/impls/sbaij/seq/sbaij.h> 103d13b8fdSMatthew G. Knepley #include <../src/vec/vec/impls/dvecimpl.h> 11af0996ceSBarry Smith #include <petsc/private/vecimpl.h> 129ae82921SPaul Mullowney #undef VecType 133d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> 14bc3f50f2SPaul Mullowney 15e057df02SPaul Mullowney const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; 169ae82921SPaul Mullowney 17087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 18087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 19087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 20087f3262SPaul Mullowney 216fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 226fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 236fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 24087f3262SPaul Mullowney 256fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); 266fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 276fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 286fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 294416b707SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat); 306fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); 316fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 326fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 336fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 349ae82921SPaul Mullowney 357f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); 367f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); 377f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); 387f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); 397f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); 407f756511SDominic Meiser 419ae82921SPaul Mullowney #undef __FUNCT__ 42b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetStream" 43b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream) 44b06137fdSPaul Mullowney { 45b06137fdSPaul Mullowney cusparseStatus_t stat; 46b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 47b06137fdSPaul Mullowney 48b06137fdSPaul Mullowney PetscFunctionBegin; 49b06137fdSPaul Mullowney cusparsestruct->stream = stream; 50c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUDA(stat); 51b06137fdSPaul Mullowney PetscFunctionReturn(0); 52b06137fdSPaul Mullowney } 53b06137fdSPaul Mullowney 54b06137fdSPaul Mullowney #undef __FUNCT__ 55b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetHandle" 56b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle) 57b06137fdSPaul Mullowney { 58b06137fdSPaul Mullowney cusparseStatus_t stat; 59b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 60b06137fdSPaul Mullowney 61b06137fdSPaul Mullowney PetscFunctionBegin; 62*16a2e217SAlejandro Lamas Daviña if (cusparsestruct->handle) { 63c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUDA(stat); 64*16a2e217SAlejandro Lamas Daviña } 65b06137fdSPaul Mullowney cusparsestruct->handle = handle; 66c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); 67b06137fdSPaul Mullowney PetscFunctionReturn(0); 68b06137fdSPaul Mullowney } 69b06137fdSPaul Mullowney 70b06137fdSPaul Mullowney #undef __FUNCT__ 71b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSEClearHandle" 72b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSEClearHandle(Mat A) 73b06137fdSPaul Mullowney { 74b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 75b06137fdSPaul Mullowney PetscFunctionBegin; 76b06137fdSPaul Mullowney if (cusparsestruct->handle) 77b06137fdSPaul Mullowney cusparsestruct->handle = 0; 78b06137fdSPaul Mullowney PetscFunctionReturn(0); 79b06137fdSPaul Mullowney } 80b06137fdSPaul Mullowney 81b06137fdSPaul Mullowney #undef __FUNCT__ 829ae82921SPaul Mullowney #define __FUNCT__ "MatFactorGetSolverPackage_seqaij_cusparse" 839ae82921SPaul Mullowney PetscErrorCode MatFactorGetSolverPackage_seqaij_cusparse(Mat A,const MatSolverPackage *type) 849ae82921SPaul Mullowney { 859ae82921SPaul Mullowney PetscFunctionBegin; 869ae82921SPaul Mullowney *type = MATSOLVERCUSPARSE; 879ae82921SPaul Mullowney PetscFunctionReturn(0); 889ae82921SPaul Mullowney } 899ae82921SPaul Mullowney 90c708e6cdSJed Brown /*MC 91087f3262SPaul Mullowney MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices 92087f3262SPaul Mullowney on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported 93087f3262SPaul Mullowney algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer 94087f3262SPaul Mullowney performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the 95087f3262SPaul Mullowney CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these 96087f3262SPaul Mullowney algorithms are not recommended. This class does NOT support direct solver operations. 97c708e6cdSJed Brown 989ae82921SPaul Mullowney Level: beginner 99c708e6cdSJed Brown 100c708e6cdSJed Brown .seealso: PCFactorSetMatSolverPackage(), MatSolverPackage, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 101c708e6cdSJed Brown M*/ 1029ae82921SPaul Mullowney 1039ae82921SPaul Mullowney #undef __FUNCT__ 10442c9c57cSBarry Smith #define __FUNCT__ "MatGetFactor_seqaijcusparse_cusparse" 10542c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) 1069ae82921SPaul Mullowney { 1079ae82921SPaul Mullowney PetscErrorCode ierr; 108bc3f50f2SPaul Mullowney PetscInt n = A->rmap->n; 1099ae82921SPaul Mullowney 1109ae82921SPaul Mullowney PetscFunctionBegin; 111bc3f50f2SPaul Mullowney ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); 112404133a2SPaul Mullowney (*B)->factortype = ftype; 113bc3f50f2SPaul Mullowney ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); 1149ae82921SPaul Mullowney ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 1152205254eSKarl Rupp 116087f3262SPaul Mullowney if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { 11733d57670SJed Brown ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); 1189ae82921SPaul Mullowney (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; 1199ae82921SPaul Mullowney (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; 120087f3262SPaul Mullowney } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { 121087f3262SPaul Mullowney (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; 122087f3262SPaul Mullowney (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; 1239ae82921SPaul Mullowney } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); 124bc3f50f2SPaul Mullowney 125fa03d054SJed Brown ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); 12662a20339SJed Brown ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverPackage_C",MatFactorGetSolverPackage_seqaij_cusparse);CHKERRQ(ierr); 1279ae82921SPaul Mullowney PetscFunctionReturn(0); 1289ae82921SPaul Mullowney } 1299ae82921SPaul Mullowney 1309ae82921SPaul Mullowney #undef __FUNCT__ 131e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat_SeqAIJCUSPARSE" 132bc3f50f2SPaul Mullowney PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 133ca45077fSPaul Mullowney { 134aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1356e111a19SKarl Rupp 136ca45077fSPaul Mullowney PetscFunctionBegin; 1372692e278SPaul Mullowney #if CUDA_VERSION>=4020 138ca45077fSPaul Mullowney switch (op) { 139e057df02SPaul Mullowney case MAT_CUSPARSE_MULT: 140aa372e3fSPaul Mullowney cusparsestruct->format = format; 141ca45077fSPaul Mullowney break; 142e057df02SPaul Mullowney case MAT_CUSPARSE_ALL: 143aa372e3fSPaul Mullowney cusparsestruct->format = format; 144ca45077fSPaul Mullowney break; 145ca45077fSPaul Mullowney default: 14636d62e41SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); 147ca45077fSPaul Mullowney } 1482692e278SPaul Mullowney #else 1496c4ed002SBarry Smith if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format require CUDA 4.2 or later."); 1502692e278SPaul Mullowney #endif 151ca45077fSPaul Mullowney PetscFunctionReturn(0); 152ca45077fSPaul Mullowney } 1539ae82921SPaul Mullowney 154e057df02SPaul Mullowney /*@ 155e057df02SPaul Mullowney MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular 156e057df02SPaul Mullowney operation. Only the MatMult operation can use different GPU storage formats 157aa372e3fSPaul Mullowney for MPIAIJCUSPARSE matrices. 158e057df02SPaul Mullowney Not Collective 159e057df02SPaul Mullowney 160e057df02SPaul Mullowney Input Parameters: 1618468deeeSKarl Rupp + A - Matrix of type SEQAIJCUSPARSE 16236d62e41SPaul Mullowney . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. 1632692e278SPaul Mullowney - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) 164e057df02SPaul Mullowney 165e057df02SPaul Mullowney Output Parameter: 166e057df02SPaul Mullowney 167e057df02SPaul Mullowney Level: intermediate 168e057df02SPaul Mullowney 1698468deeeSKarl Rupp .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 170e057df02SPaul Mullowney @*/ 171e057df02SPaul Mullowney #undef __FUNCT__ 172e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat" 173e057df02SPaul Mullowney PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 174e057df02SPaul Mullowney { 175e057df02SPaul Mullowney PetscErrorCode ierr; 1766e111a19SKarl Rupp 177e057df02SPaul Mullowney PetscFunctionBegin; 178e057df02SPaul Mullowney PetscValidHeaderSpecific(A, MAT_CLASSID,1); 179e057df02SPaul Mullowney ierr = PetscTryMethod(A, "MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); 180e057df02SPaul Mullowney PetscFunctionReturn(0); 181e057df02SPaul Mullowney } 182e057df02SPaul Mullowney 1839ae82921SPaul Mullowney #undef __FUNCT__ 1849ae82921SPaul Mullowney #define __FUNCT__ "MatSetFromOptions_SeqAIJCUSPARSE" 1854416b707SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) 1869ae82921SPaul Mullowney { 1879ae82921SPaul Mullowney PetscErrorCode ierr; 188e057df02SPaul Mullowney MatCUSPARSEStorageFormat format; 1899ae82921SPaul Mullowney PetscBool flg; 190a183c035SDominic Meiser Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1916e111a19SKarl Rupp 1929ae82921SPaul Mullowney PetscFunctionBegin; 193e55864a3SBarry Smith ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); 194e057df02SPaul Mullowney ierr = PetscObjectOptionsBegin((PetscObject)A); 1959ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 196e057df02SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", 197a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 198e057df02SPaul Mullowney if (flg) { 199e057df02SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr); 200045c96e1SPaul Mullowney } 2019ae82921SPaul Mullowney } 2024c87dfd4SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", 203a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 2044c87dfd4SPaul Mullowney if (flg) { 2054c87dfd4SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); 2064c87dfd4SPaul Mullowney } 2079ae82921SPaul Mullowney ierr = PetscOptionsEnd();CHKERRQ(ierr); 2089ae82921SPaul Mullowney PetscFunctionReturn(0); 2099ae82921SPaul Mullowney 2109ae82921SPaul Mullowney } 2119ae82921SPaul Mullowney 2129ae82921SPaul Mullowney #undef __FUNCT__ 2139ae82921SPaul Mullowney #define __FUNCT__ "MatILUFactorSymbolic_SeqAIJCUSPARSE" 2146fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2159ae82921SPaul Mullowney { 2169ae82921SPaul Mullowney PetscErrorCode ierr; 2179ae82921SPaul Mullowney 2189ae82921SPaul Mullowney PetscFunctionBegin; 2199ae82921SPaul Mullowney ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2209ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2219ae82921SPaul Mullowney PetscFunctionReturn(0); 2229ae82921SPaul Mullowney } 2239ae82921SPaul Mullowney 2249ae82921SPaul Mullowney #undef __FUNCT__ 2259ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorSymbolic_SeqAIJCUSPARSE" 2266fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2279ae82921SPaul Mullowney { 2289ae82921SPaul Mullowney PetscErrorCode ierr; 2299ae82921SPaul Mullowney 2309ae82921SPaul Mullowney PetscFunctionBegin; 2319ae82921SPaul Mullowney ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2329ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2339ae82921SPaul Mullowney PetscFunctionReturn(0); 2349ae82921SPaul Mullowney } 2359ae82921SPaul Mullowney 2369ae82921SPaul Mullowney #undef __FUNCT__ 237087f3262SPaul Mullowney #define __FUNCT__ "MatICCFactorSymbolic_SeqAIJCUSPARSE" 238087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 239087f3262SPaul Mullowney { 240087f3262SPaul Mullowney PetscErrorCode ierr; 241087f3262SPaul Mullowney 242087f3262SPaul Mullowney PetscFunctionBegin; 243087f3262SPaul Mullowney ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 244087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 245087f3262SPaul Mullowney PetscFunctionReturn(0); 246087f3262SPaul Mullowney } 247087f3262SPaul Mullowney 248087f3262SPaul Mullowney #undef __FUNCT__ 249087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorSymbolic_SeqAIJCUSPARSE" 250087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 251087f3262SPaul Mullowney { 252087f3262SPaul Mullowney PetscErrorCode ierr; 253087f3262SPaul Mullowney 254087f3262SPaul Mullowney PetscFunctionBegin; 255087f3262SPaul Mullowney ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 256087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 257087f3262SPaul Mullowney PetscFunctionReturn(0); 258087f3262SPaul Mullowney } 259087f3262SPaul Mullowney 260087f3262SPaul Mullowney #undef __FUNCT__ 261087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILULowerTriMatrix" 262087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) 2639ae82921SPaul Mullowney { 2649ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 2659ae82921SPaul Mullowney PetscInt n = A->rmap->n; 2669ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 267aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 2689ae82921SPaul Mullowney cusparseStatus_t stat; 2699ae82921SPaul Mullowney const PetscInt *ai = a->i,*aj = a->j,*vi; 2709ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 2719ae82921SPaul Mullowney PetscInt *AiLo, *AjLo; 2729ae82921SPaul Mullowney PetscScalar *AALo; 2739ae82921SPaul Mullowney PetscInt i,nz, nzLower, offset, rowOffset; 274b175d8bbSPaul Mullowney PetscErrorCode ierr; 2759ae82921SPaul Mullowney 2769ae82921SPaul Mullowney PetscFunctionBegin; 277c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 2789ae82921SPaul Mullowney try { 2799ae82921SPaul Mullowney /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ 2809ae82921SPaul Mullowney nzLower=n+ai[n]-ai[1]; 2819ae82921SPaul Mullowney 2829ae82921SPaul Mullowney /* Allocate Space for the lower triangular matrix */ 283c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); 284c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(ierr); 285c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(ierr); 2869ae82921SPaul Mullowney 2879ae82921SPaul Mullowney /* Fill the lower triangular matrix */ 2889ae82921SPaul Mullowney AiLo[0] = (PetscInt) 0; 2899ae82921SPaul Mullowney AiLo[n] = nzLower; 2909ae82921SPaul Mullowney AjLo[0] = (PetscInt) 0; 2919ae82921SPaul Mullowney AALo[0] = (MatScalar) 1.0; 2929ae82921SPaul Mullowney v = aa; 2939ae82921SPaul Mullowney vi = aj; 2949ae82921SPaul Mullowney offset = 1; 2959ae82921SPaul Mullowney rowOffset= 1; 2969ae82921SPaul Mullowney for (i=1; i<n; i++) { 2979ae82921SPaul Mullowney nz = ai[i+1] - ai[i]; 298e057df02SPaul Mullowney /* additional 1 for the term on the diagonal */ 2999ae82921SPaul Mullowney AiLo[i] = rowOffset; 3009ae82921SPaul Mullowney rowOffset += nz+1; 3019ae82921SPaul Mullowney 3029ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjLo[offset]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 3039ae82921SPaul Mullowney ierr = PetscMemcpy(&(AALo[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 3049ae82921SPaul Mullowney 3059ae82921SPaul Mullowney offset += nz; 3069ae82921SPaul Mullowney AjLo[offset] = (PetscInt) i; 3079ae82921SPaul Mullowney AALo[offset] = (MatScalar) 1.0; 3089ae82921SPaul Mullowney offset += 1; 3099ae82921SPaul Mullowney 3109ae82921SPaul Mullowney v += nz; 3119ae82921SPaul Mullowney vi += nz; 3129ae82921SPaul Mullowney } 3132205254eSKarl Rupp 314aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 315aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 3162205254eSKarl Rupp 317aa372e3fSPaul Mullowney /* Create the matrix description */ 318c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); 319c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 320c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 321c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUDA(stat); 322c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); 323aa372e3fSPaul Mullowney 324aa372e3fSPaul Mullowney /* Create the solve analysis information */ 325c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); 326aa372e3fSPaul Mullowney 327aa372e3fSPaul Mullowney /* set the operation */ 328aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 329aa372e3fSPaul Mullowney 330aa372e3fSPaul Mullowney /* set the matrix */ 331aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 332aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = n; 333aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = n; 334aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = nzLower; 335aa372e3fSPaul Mullowney 336aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 337aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); 338aa372e3fSPaul Mullowney 339aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); 340aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); 341aa372e3fSPaul Mullowney 342aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); 343aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); 344aa372e3fSPaul Mullowney 345aa372e3fSPaul Mullowney /* perform the solve analysis */ 346aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 347aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 348aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 349c41cb2e2SAlejandro Lamas Daviña loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); 350aa372e3fSPaul Mullowney 351aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 352aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 3532205254eSKarl Rupp 354c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AiLo);CHKERRCUDA(ierr); 355c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AjLo);CHKERRCUDA(ierr); 356c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AALo);CHKERRCUDA(ierr); 3579ae82921SPaul Mullowney } catch(char *ex) { 3589ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 3599ae82921SPaul Mullowney } 3609ae82921SPaul Mullowney } 3619ae82921SPaul Mullowney PetscFunctionReturn(0); 3629ae82921SPaul Mullowney } 3639ae82921SPaul Mullowney 3649ae82921SPaul Mullowney #undef __FUNCT__ 365087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILUUpperTriMatrix" 366087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) 3679ae82921SPaul Mullowney { 3689ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 3699ae82921SPaul Mullowney PetscInt n = A->rmap->n; 3709ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 371aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 3729ae82921SPaul Mullowney cusparseStatus_t stat; 3739ae82921SPaul Mullowney const PetscInt *aj = a->j,*adiag = a->diag,*vi; 3749ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 3759ae82921SPaul Mullowney PetscInt *AiUp, *AjUp; 3769ae82921SPaul Mullowney PetscScalar *AAUp; 3779ae82921SPaul Mullowney PetscInt i,nz, nzUpper, offset; 3789ae82921SPaul Mullowney PetscErrorCode ierr; 3799ae82921SPaul Mullowney 3809ae82921SPaul Mullowney PetscFunctionBegin; 381c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 3829ae82921SPaul Mullowney try { 3839ae82921SPaul Mullowney /* next, figure out the number of nonzeros in the upper triangular matrix. */ 3849ae82921SPaul Mullowney nzUpper = adiag[0]-adiag[n]; 3859ae82921SPaul Mullowney 3869ae82921SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 387c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); 388c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); 389c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); 3909ae82921SPaul Mullowney 3919ae82921SPaul Mullowney /* Fill the upper triangular matrix */ 3929ae82921SPaul Mullowney AiUp[0]=(PetscInt) 0; 3939ae82921SPaul Mullowney AiUp[n]=nzUpper; 3949ae82921SPaul Mullowney offset = nzUpper; 3959ae82921SPaul Mullowney for (i=n-1; i>=0; i--) { 3969ae82921SPaul Mullowney v = aa + adiag[i+1] + 1; 3979ae82921SPaul Mullowney vi = aj + adiag[i+1] + 1; 3989ae82921SPaul Mullowney 399e057df02SPaul Mullowney /* number of elements NOT on the diagonal */ 4009ae82921SPaul Mullowney nz = adiag[i] - adiag[i+1]-1; 4019ae82921SPaul Mullowney 402e057df02SPaul Mullowney /* decrement the offset */ 4039ae82921SPaul Mullowney offset -= (nz+1); 4049ae82921SPaul Mullowney 405e057df02SPaul Mullowney /* first, set the diagonal elements */ 4069ae82921SPaul Mullowney AjUp[offset] = (PetscInt) i; 40709f51544SAlejandro Lamas Daviña AAUp[offset] = (MatScalar)1./v[nz]; 4089ae82921SPaul Mullowney AiUp[i] = AiUp[i+1] - (nz+1); 4099ae82921SPaul Mullowney 4109ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset+1]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 4119ae82921SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset+1]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 4129ae82921SPaul Mullowney } 4132205254eSKarl Rupp 414aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 415aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 4162205254eSKarl Rupp 417aa372e3fSPaul Mullowney /* Create the matrix description */ 418c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); 419c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 420c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 421c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); 422c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); 423aa372e3fSPaul Mullowney 424aa372e3fSPaul Mullowney /* Create the solve analysis information */ 425c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); 426aa372e3fSPaul Mullowney 427aa372e3fSPaul Mullowney /* set the operation */ 428aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 429aa372e3fSPaul Mullowney 430aa372e3fSPaul Mullowney /* set the matrix */ 431aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 432aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = n; 433aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = n; 434aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = nzUpper; 435aa372e3fSPaul Mullowney 436aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 437aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); 438aa372e3fSPaul Mullowney 439aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); 440aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); 441aa372e3fSPaul Mullowney 442aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); 443aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); 444aa372e3fSPaul Mullowney 445aa372e3fSPaul Mullowney /* perform the solve analysis */ 446aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 447aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 448aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 449c41cb2e2SAlejandro Lamas Daviña upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); 450aa372e3fSPaul Mullowney 451aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 452aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 4532205254eSKarl Rupp 454c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AiUp);CHKERRCUDA(ierr); 455c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AjUp);CHKERRCUDA(ierr); 456c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AAUp);CHKERRCUDA(ierr); 4579ae82921SPaul Mullowney } catch(char *ex) { 4589ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 4599ae82921SPaul Mullowney } 4609ae82921SPaul Mullowney } 4619ae82921SPaul Mullowney PetscFunctionReturn(0); 4629ae82921SPaul Mullowney } 4639ae82921SPaul Mullowney 4649ae82921SPaul Mullowney #undef __FUNCT__ 465087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU" 466087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) 4679ae82921SPaul Mullowney { 4689ae82921SPaul Mullowney PetscErrorCode ierr; 4699ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 4709ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 4719ae82921SPaul Mullowney IS isrow = a->row,iscol = a->icol; 4729ae82921SPaul Mullowney PetscBool row_identity,col_identity; 4739ae82921SPaul Mullowney const PetscInt *r,*c; 4749ae82921SPaul Mullowney PetscInt n = A->rmap->n; 4759ae82921SPaul Mullowney 4769ae82921SPaul Mullowney PetscFunctionBegin; 477087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); 478087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); 4792205254eSKarl Rupp 480aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 481aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 482aa372e3fSPaul Mullowney cusparseTriFactors->nnz=a->nz; 4839ae82921SPaul Mullowney 484c41cb2e2SAlejandro Lamas Daviña A->valid_GPU_matrix = PETSC_CUDA_BOTH; 485e057df02SPaul Mullowney /*lower triangular indices */ 4869ae82921SPaul Mullowney ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); 4879ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 4882205254eSKarl Rupp if (!row_identity) { 489aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 490aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(r, r+n); 4912205254eSKarl Rupp } 4929ae82921SPaul Mullowney ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); 4939ae82921SPaul Mullowney 494e057df02SPaul Mullowney /*upper triangular indices */ 4959ae82921SPaul Mullowney ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); 4969ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 4972205254eSKarl Rupp if (!col_identity) { 498aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 499aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(c, c+n); 5002205254eSKarl Rupp } 5019ae82921SPaul Mullowney ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); 5029ae82921SPaul Mullowney PetscFunctionReturn(0); 5039ae82921SPaul Mullowney } 5049ae82921SPaul Mullowney 5059ae82921SPaul Mullowney #undef __FUNCT__ 506087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildICCTriMatrices" 507087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) 508087f3262SPaul Mullowney { 509087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 510087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 511aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 512aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 513087f3262SPaul Mullowney cusparseStatus_t stat; 514087f3262SPaul Mullowney PetscErrorCode ierr; 515087f3262SPaul Mullowney PetscInt *AiUp, *AjUp; 516087f3262SPaul Mullowney PetscScalar *AAUp; 517087f3262SPaul Mullowney PetscScalar *AALo; 518087f3262SPaul Mullowney PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; 519087f3262SPaul Mullowney Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; 520087f3262SPaul Mullowney const PetscInt *ai = b->i,*aj = b->j,*vj; 521087f3262SPaul Mullowney const MatScalar *aa = b->a,*v; 522087f3262SPaul Mullowney 523087f3262SPaul Mullowney PetscFunctionBegin; 524c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 525087f3262SPaul Mullowney try { 526087f3262SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 527c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); 528c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); 529c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); 530c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); 531087f3262SPaul Mullowney 532087f3262SPaul Mullowney /* Fill the upper triangular matrix */ 533087f3262SPaul Mullowney AiUp[0]=(PetscInt) 0; 534087f3262SPaul Mullowney AiUp[n]=nzUpper; 535087f3262SPaul Mullowney offset = 0; 536087f3262SPaul Mullowney for (i=0; i<n; i++) { 537087f3262SPaul Mullowney /* set the pointers */ 538087f3262SPaul Mullowney v = aa + ai[i]; 539087f3262SPaul Mullowney vj = aj + ai[i]; 540087f3262SPaul Mullowney nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ 541087f3262SPaul Mullowney 542087f3262SPaul Mullowney /* first, set the diagonal elements */ 543087f3262SPaul Mullowney AjUp[offset] = (PetscInt) i; 54409f51544SAlejandro Lamas Daviña AAUp[offset] = (MatScalar)1.0/v[nz]; 545087f3262SPaul Mullowney AiUp[i] = offset; 54609f51544SAlejandro Lamas Daviña AALo[offset] = (MatScalar)1.0/v[nz]; 547087f3262SPaul Mullowney 548087f3262SPaul Mullowney offset+=1; 549087f3262SPaul Mullowney if (nz>0) { 550087f3262SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset]), vj, nz*sizeof(PetscInt));CHKERRQ(ierr); 551087f3262SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 552087f3262SPaul Mullowney for (j=offset; j<offset+nz; j++) { 553087f3262SPaul Mullowney AAUp[j] = -AAUp[j]; 554087f3262SPaul Mullowney AALo[j] = AAUp[j]/v[nz]; 555087f3262SPaul Mullowney } 556087f3262SPaul Mullowney offset+=nz; 557087f3262SPaul Mullowney } 558087f3262SPaul Mullowney } 559087f3262SPaul Mullowney 560aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 561aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 562087f3262SPaul Mullowney 563aa372e3fSPaul Mullowney /* Create the matrix description */ 564c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); 565c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 566c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 567c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); 568c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); 569087f3262SPaul Mullowney 570aa372e3fSPaul Mullowney /* Create the solve analysis information */ 571c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); 572aa372e3fSPaul Mullowney 573aa372e3fSPaul Mullowney /* set the operation */ 574aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 575aa372e3fSPaul Mullowney 576aa372e3fSPaul Mullowney /* set the matrix */ 577aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 578aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = A->rmap->n; 579aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = A->cmap->n; 580aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = a->nz; 581aa372e3fSPaul Mullowney 582aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 583aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 584aa372e3fSPaul Mullowney 585aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 586aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 587aa372e3fSPaul Mullowney 588aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 589aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); 590aa372e3fSPaul Mullowney 591aa372e3fSPaul Mullowney /* perform the solve analysis */ 592aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 593aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 594aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 595c41cb2e2SAlejandro Lamas Daviña upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); 596aa372e3fSPaul Mullowney 597aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 598aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 599aa372e3fSPaul Mullowney 600aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 601aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 602aa372e3fSPaul Mullowney 603aa372e3fSPaul Mullowney /* Create the matrix description */ 604c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); 605c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 606c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 607c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); 608c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); 609aa372e3fSPaul Mullowney 610aa372e3fSPaul Mullowney /* Create the solve analysis information */ 611c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); 612aa372e3fSPaul Mullowney 613aa372e3fSPaul Mullowney /* set the operation */ 614aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; 615aa372e3fSPaul Mullowney 616aa372e3fSPaul Mullowney /* set the matrix */ 617aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 618aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = A->rmap->n; 619aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = A->cmap->n; 620aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = a->nz; 621aa372e3fSPaul Mullowney 622aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 623aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 624aa372e3fSPaul Mullowney 625aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 626aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 627aa372e3fSPaul Mullowney 628aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 629aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); 630aa372e3fSPaul Mullowney 631aa372e3fSPaul Mullowney /* perform the solve analysis */ 632aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 633aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 634aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 635c41cb2e2SAlejandro Lamas Daviña loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); 636aa372e3fSPaul Mullowney 637aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 638aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 639087f3262SPaul Mullowney 640c41cb2e2SAlejandro Lamas Daviña A->valid_GPU_matrix = PETSC_CUDA_BOTH; 641c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AiUp);CHKERRCUDA(ierr); 642c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AjUp);CHKERRCUDA(ierr); 643c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AAUp);CHKERRCUDA(ierr); 644c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AALo);CHKERRCUDA(ierr); 645087f3262SPaul Mullowney } catch(char *ex) { 646087f3262SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 647087f3262SPaul Mullowney } 648087f3262SPaul Mullowney } 649087f3262SPaul Mullowney PetscFunctionReturn(0); 650087f3262SPaul Mullowney } 651087f3262SPaul Mullowney 652087f3262SPaul Mullowney #undef __FUNCT__ 653087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU" 654087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) 6559ae82921SPaul Mullowney { 6569ae82921SPaul Mullowney PetscErrorCode ierr; 657087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 658087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 659087f3262SPaul Mullowney IS ip = a->row; 660087f3262SPaul Mullowney const PetscInt *rip; 661087f3262SPaul Mullowney PetscBool perm_identity; 662087f3262SPaul Mullowney PetscInt n = A->rmap->n; 663087f3262SPaul Mullowney 664087f3262SPaul Mullowney PetscFunctionBegin; 665087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); 666aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 667aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 668aa372e3fSPaul Mullowney cusparseTriFactors->nnz=(a->nz-n)*2 + n; 669aa372e3fSPaul Mullowney 670087f3262SPaul Mullowney /*lower triangular indices */ 671087f3262SPaul Mullowney ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); 672087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 673087f3262SPaul Mullowney if (!perm_identity) { 674aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 675aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(rip, rip+n); 676aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 677aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(rip, rip+n); 678087f3262SPaul Mullowney } 679087f3262SPaul Mullowney ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); 680087f3262SPaul Mullowney PetscFunctionReturn(0); 681087f3262SPaul Mullowney } 682087f3262SPaul Mullowney 683087f3262SPaul Mullowney #undef __FUNCT__ 6849ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorNumeric_SeqAIJCUSPARSE" 6856fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 6869ae82921SPaul Mullowney { 6879ae82921SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 6889ae82921SPaul Mullowney IS isrow = b->row,iscol = b->col; 6899ae82921SPaul Mullowney PetscBool row_identity,col_identity; 690b175d8bbSPaul Mullowney PetscErrorCode ierr; 6919ae82921SPaul Mullowney 6929ae82921SPaul Mullowney PetscFunctionBegin; 6939ae82921SPaul Mullowney ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 694e057df02SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 6959ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 6969ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 697bda325fcSPaul Mullowney if (row_identity && col_identity) { 698bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 699bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 700bda325fcSPaul Mullowney } else { 701bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 702bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 703bda325fcSPaul Mullowney } 7048dc1d2a3SPaul Mullowney 705e057df02SPaul Mullowney /* get the triangular factors */ 706087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 7079ae82921SPaul Mullowney PetscFunctionReturn(0); 7089ae82921SPaul Mullowney } 7099ae82921SPaul Mullowney 710087f3262SPaul Mullowney #undef __FUNCT__ 711087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorNumeric_SeqAIJCUSPARSE" 712087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 713087f3262SPaul Mullowney { 714087f3262SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 715087f3262SPaul Mullowney IS ip = b->row; 716087f3262SPaul Mullowney PetscBool perm_identity; 717b175d8bbSPaul Mullowney PetscErrorCode ierr; 718087f3262SPaul Mullowney 719087f3262SPaul Mullowney PetscFunctionBegin; 720087f3262SPaul Mullowney ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 721087f3262SPaul Mullowney 722087f3262SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 723087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 724087f3262SPaul Mullowney if (perm_identity) { 725087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 726087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 727087f3262SPaul Mullowney } else { 728087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 729087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 730087f3262SPaul Mullowney } 731087f3262SPaul Mullowney 732087f3262SPaul Mullowney /* get the triangular factors */ 733087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 734087f3262SPaul Mullowney PetscFunctionReturn(0); 735087f3262SPaul Mullowney } 7369ae82921SPaul Mullowney 737bda325fcSPaul Mullowney #undef __FUNCT__ 738bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEAnalyzeTransposeForSolve" 739b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) 740bda325fcSPaul Mullowney { 741bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 742aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 743aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 744aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 745aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 746bda325fcSPaul Mullowney cusparseStatus_t stat; 747aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 748aa372e3fSPaul Mullowney cusparseMatrixType_t matrixType; 749aa372e3fSPaul Mullowney cusparseFillMode_t fillMode; 750aa372e3fSPaul Mullowney cusparseDiagType_t diagType; 751b175d8bbSPaul Mullowney 752bda325fcSPaul Mullowney PetscFunctionBegin; 753bda325fcSPaul Mullowney 754aa372e3fSPaul Mullowney /*********************************************/ 755aa372e3fSPaul Mullowney /* Now the Transpose of the Lower Tri Factor */ 756aa372e3fSPaul Mullowney /*********************************************/ 757aa372e3fSPaul Mullowney 758aa372e3fSPaul Mullowney /* allocate space for the transpose of the lower triangular factor */ 759aa372e3fSPaul Mullowney loTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 760aa372e3fSPaul Mullowney 761aa372e3fSPaul Mullowney /* set the matrix descriptors of the lower triangular factor */ 762aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(loTriFactor->descr); 763aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(loTriFactor->descr); 764aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 765aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 766aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(loTriFactor->descr); 767aa372e3fSPaul Mullowney 768aa372e3fSPaul Mullowney /* Create the matrix description */ 769c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUDA(stat); 770c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUDA(stat); 771c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUDA(stat); 772c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUDA(stat); 773c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUDA(stat); 774aa372e3fSPaul Mullowney 775aa372e3fSPaul Mullowney /* Create the solve analysis information */ 776c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&loTriFactorT->solveInfo);CHKERRCUDA(stat); 777aa372e3fSPaul Mullowney 778aa372e3fSPaul Mullowney /* set the operation */ 779aa372e3fSPaul Mullowney loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 780aa372e3fSPaul Mullowney 781aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the lower triangular factor*/ 782aa372e3fSPaul Mullowney loTriFactorT->csrMat = new CsrMatrix; 783aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_rows; 784aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_cols; 785aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; 786aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactor->csrMat->num_rows+1); 787aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactor->csrMat->num_entries); 788aa372e3fSPaul Mullowney loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactor->csrMat->num_entries); 789aa372e3fSPaul Mullowney 790aa372e3fSPaul Mullowney /* compute the transpose of the lower triangular factor, i.e. the CSC */ 791aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, 792aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, 793aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 794aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 795aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 796aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 797aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 798aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 799c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 800aa372e3fSPaul Mullowney 801aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 802aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, 803aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, 804aa372e3fSPaul Mullowney loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), 805aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), 806c41cb2e2SAlejandro Lamas Daviña loTriFactorT->solveInfo);CHKERRCUDA(stat); 807aa372e3fSPaul Mullowney 808aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 809aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; 810aa372e3fSPaul Mullowney 811aa372e3fSPaul Mullowney /*********************************************/ 812aa372e3fSPaul Mullowney /* Now the Transpose of the Upper Tri Factor */ 813aa372e3fSPaul Mullowney /*********************************************/ 814aa372e3fSPaul Mullowney 815aa372e3fSPaul Mullowney /* allocate space for the transpose of the upper triangular factor */ 816aa372e3fSPaul Mullowney upTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 817aa372e3fSPaul Mullowney 818aa372e3fSPaul Mullowney /* set the matrix descriptors of the upper triangular factor */ 819aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(upTriFactor->descr); 820aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(upTriFactor->descr); 821aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 822aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 823aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(upTriFactor->descr); 824aa372e3fSPaul Mullowney 825aa372e3fSPaul Mullowney /* Create the matrix description */ 826c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUDA(stat); 827c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUDA(stat); 828c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUDA(stat); 829c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUDA(stat); 830c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUDA(stat); 831aa372e3fSPaul Mullowney 832aa372e3fSPaul Mullowney /* Create the solve analysis information */ 833c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&upTriFactorT->solveInfo);CHKERRCUDA(stat); 834aa372e3fSPaul Mullowney 835aa372e3fSPaul Mullowney /* set the operation */ 836aa372e3fSPaul Mullowney upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 837aa372e3fSPaul Mullowney 838aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the upper triangular factor*/ 839aa372e3fSPaul Mullowney upTriFactorT->csrMat = new CsrMatrix; 840aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_rows; 841aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_cols; 842aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; 843aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactor->csrMat->num_rows+1); 844aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactor->csrMat->num_entries); 845aa372e3fSPaul Mullowney upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactor->csrMat->num_entries); 846aa372e3fSPaul Mullowney 847aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 848aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, 849aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, 850aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 851aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 852aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 853aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 854aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 855aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 856c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 857aa372e3fSPaul Mullowney 858aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 859aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, 860aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, 861aa372e3fSPaul Mullowney upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), 862aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), 863c41cb2e2SAlejandro Lamas Daviña upTriFactorT->solveInfo);CHKERRCUDA(stat); 864aa372e3fSPaul Mullowney 865aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 866aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; 867bda325fcSPaul Mullowney PetscFunctionReturn(0); 868bda325fcSPaul Mullowney } 869bda325fcSPaul Mullowney 870bda325fcSPaul Mullowney #undef __FUNCT__ 871bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEGenerateTransposeForMult" 872b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEGenerateTransposeForMult(Mat A) 873bda325fcSPaul Mullowney { 874aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 875aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 876aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 877bda325fcSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 878bda325fcSPaul Mullowney cusparseStatus_t stat; 879aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 880b06137fdSPaul Mullowney cudaError_t err; 881b175d8bbSPaul Mullowney 882bda325fcSPaul Mullowney PetscFunctionBegin; 883aa372e3fSPaul Mullowney 884aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 885aa372e3fSPaul Mullowney matstructT = new Mat_SeqAIJCUSPARSEMultStruct; 886c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUDA(stat); 887aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 888c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUDA(stat); 889c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); 890aa372e3fSPaul Mullowney 891b06137fdSPaul Mullowney /* set alpha and beta */ 892c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstructT->alpha),sizeof(PetscScalar));CHKERRCUDA(err); 893c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstructT->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 894c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstructT->beta),sizeof(PetscScalar));CHKERRCUDA(err); 895c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstructT->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 896c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); 897b06137fdSPaul Mullowney 898aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 899aa372e3fSPaul Mullowney CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; 900aa372e3fSPaul Mullowney CsrMatrix *matrixT= new CsrMatrix; 901aa372e3fSPaul Mullowney matrixT->num_rows = A->rmap->n; 902aa372e3fSPaul Mullowney matrixT->num_cols = A->cmap->n; 903aa372e3fSPaul Mullowney matrixT->num_entries = a->nz; 904aa372e3fSPaul Mullowney matrixT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 905aa372e3fSPaul Mullowney matrixT->column_indices = new THRUSTINTARRAY32(a->nz); 906aa372e3fSPaul Mullowney matrixT->values = new THRUSTARRAY(a->nz); 907aa372e3fSPaul Mullowney 908aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 909aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 910aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, matrix->num_rows, 911aa372e3fSPaul Mullowney matrix->num_cols, matrix->num_entries, 912aa372e3fSPaul Mullowney matrix->values->data().get(), 913aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 914aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 915aa372e3fSPaul Mullowney matrixT->values->data().get(), 916aa372e3fSPaul Mullowney matrixT->column_indices->data().get(), 917aa372e3fSPaul Mullowney matrixT->row_offsets->data().get(), 918c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 919aa372e3fSPaul Mullowney 920aa372e3fSPaul Mullowney /* assign the pointer */ 921aa372e3fSPaul Mullowney matstructT->mat = matrixT; 922aa372e3fSPaul Mullowney 923aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 9242692e278SPaul Mullowney #if CUDA_VERSION>=5000 925aa372e3fSPaul Mullowney /* First convert HYB to CSR */ 926aa372e3fSPaul Mullowney CsrMatrix *temp= new CsrMatrix; 927aa372e3fSPaul Mullowney temp->num_rows = A->rmap->n; 928aa372e3fSPaul Mullowney temp->num_cols = A->cmap->n; 929aa372e3fSPaul Mullowney temp->num_entries = a->nz; 930aa372e3fSPaul Mullowney temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 931aa372e3fSPaul Mullowney temp->column_indices = new THRUSTINTARRAY32(a->nz); 932aa372e3fSPaul Mullowney temp->values = new THRUSTARRAY(a->nz); 933aa372e3fSPaul Mullowney 9342692e278SPaul Mullowney 935aa372e3fSPaul Mullowney stat = cusparse_hyb2csr(cusparsestruct->handle, 936aa372e3fSPaul Mullowney matstruct->descr, (cusparseHybMat_t)matstruct->mat, 937aa372e3fSPaul Mullowney temp->values->data().get(), 938aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 939c41cb2e2SAlejandro Lamas Daviña temp->column_indices->data().get());CHKERRCUDA(stat); 940aa372e3fSPaul Mullowney 941aa372e3fSPaul Mullowney /* Next, convert CSR to CSC (i.e. the matrix transpose) */ 942aa372e3fSPaul Mullowney CsrMatrix *tempT= new CsrMatrix; 943aa372e3fSPaul Mullowney tempT->num_rows = A->rmap->n; 944aa372e3fSPaul Mullowney tempT->num_cols = A->cmap->n; 945aa372e3fSPaul Mullowney tempT->num_entries = a->nz; 946aa372e3fSPaul Mullowney tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 947aa372e3fSPaul Mullowney tempT->column_indices = new THRUSTINTARRAY32(a->nz); 948aa372e3fSPaul Mullowney tempT->values = new THRUSTARRAY(a->nz); 949aa372e3fSPaul Mullowney 950aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, 951aa372e3fSPaul Mullowney temp->num_cols, temp->num_entries, 952aa372e3fSPaul Mullowney temp->values->data().get(), 953aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 954aa372e3fSPaul Mullowney temp->column_indices->data().get(), 955aa372e3fSPaul Mullowney tempT->values->data().get(), 956aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 957aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 958c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 959aa372e3fSPaul Mullowney 960aa372e3fSPaul Mullowney /* Last, convert CSC to HYB */ 961aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 962c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); 963aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 964aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 965aa372e3fSPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, 966aa372e3fSPaul Mullowney matstructT->descr, tempT->values->data().get(), 967aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 968aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 969c41cb2e2SAlejandro Lamas Daviña hybMat, 0, partition);CHKERRCUDA(stat); 970aa372e3fSPaul Mullowney 971aa372e3fSPaul Mullowney /* assign the pointer */ 972aa372e3fSPaul Mullowney matstructT->mat = hybMat; 973aa372e3fSPaul Mullowney 974aa372e3fSPaul Mullowney /* delete temporaries */ 975aa372e3fSPaul Mullowney if (tempT) { 976aa372e3fSPaul Mullowney if (tempT->values) delete (THRUSTARRAY*) tempT->values; 977aa372e3fSPaul Mullowney if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; 978aa372e3fSPaul Mullowney if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; 979aa372e3fSPaul Mullowney delete (CsrMatrix*) tempT; 980087f3262SPaul Mullowney } 981aa372e3fSPaul Mullowney if (temp) { 982aa372e3fSPaul Mullowney if (temp->values) delete (THRUSTARRAY*) temp->values; 983aa372e3fSPaul Mullowney if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; 984aa372e3fSPaul Mullowney if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; 985aa372e3fSPaul Mullowney delete (CsrMatrix*) temp; 986aa372e3fSPaul Mullowney } 9872692e278SPaul Mullowney #else 9882692e278SPaul Mullowney SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format for the Matrix Transpose (in MatMultTranspose) require CUDA 5.0 or later."); 9892692e278SPaul Mullowney #endif 990aa372e3fSPaul Mullowney } 991aa372e3fSPaul Mullowney /* assign the compressed row indices */ 992aa372e3fSPaul Mullowney matstructT->cprowIndices = new THRUSTINTARRAY; 993aa372e3fSPaul Mullowney 994aa372e3fSPaul Mullowney /* assign the pointer */ 995aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; 996bda325fcSPaul Mullowney PetscFunctionReturn(0); 997bda325fcSPaul Mullowney } 998bda325fcSPaul Mullowney 999bda325fcSPaul Mullowney #undef __FUNCT__ 1000bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE" 10016fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 1002bda325fcSPaul Mullowney { 1003c41cb2e2SAlejandro Lamas Daviña PetscInt n = xx->map->n; 1004c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray, *barray; 1005c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> xGPU,bGPU; 1006bda325fcSPaul Mullowney cusparseStatus_t stat; 1007bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1008aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1009aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1010aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1011b175d8bbSPaul Mullowney PetscErrorCode ierr; 1012bda325fcSPaul Mullowney 1013bda325fcSPaul Mullowney PetscFunctionBegin; 1014aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1015aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1016bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1017aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1018aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1019bda325fcSPaul Mullowney } 1020bda325fcSPaul Mullowney 1021bda325fcSPaul Mullowney /* Get the GPU pointers */ 1022c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1023c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 1024c41cb2e2SAlejandro Lamas Daviña xGPU = thrust::device_pointer_cast(xarray); 1025c41cb2e2SAlejandro Lamas Daviña bGPU = thrust::device_pointer_cast(barray); 1026bda325fcSPaul Mullowney 1027aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1028c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), 1029c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()), 1030c41cb2e2SAlejandro Lamas Daviña xGPU); 1031aa372e3fSPaul Mullowney 1032aa372e3fSPaul Mullowney /* First, solve U */ 1033aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1034aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1035aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1036aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1037aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1038aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1039c41cb2e2SAlejandro Lamas Daviña xarray, tempGPU->data().get());CHKERRCUDA(stat); 1040aa372e3fSPaul Mullowney 1041aa372e3fSPaul Mullowney /* Then, solve L */ 1042aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1043aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1044aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1045aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1046aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1047aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1048c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 1049aa372e3fSPaul Mullowney 1050aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1051c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), 1052c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()), 1053aa372e3fSPaul Mullowney tempGPU->begin()); 1054aa372e3fSPaul Mullowney 1055aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1056c41cb2e2SAlejandro Lamas Daviña thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); 1057bda325fcSPaul Mullowney 1058bda325fcSPaul Mullowney /* restore */ 1059c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1060c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1061c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1062087f3262SPaul Mullowney 1063aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1064bda325fcSPaul Mullowney PetscFunctionReturn(0); 1065bda325fcSPaul Mullowney } 1066bda325fcSPaul Mullowney 1067bda325fcSPaul Mullowney #undef __FUNCT__ 1068bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering" 10696fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 1070bda325fcSPaul Mullowney { 1071c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray, *barray; 1072bda325fcSPaul Mullowney cusparseStatus_t stat; 1073bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1074aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1075aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1076aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1077b175d8bbSPaul Mullowney PetscErrorCode ierr; 1078bda325fcSPaul Mullowney 1079bda325fcSPaul Mullowney PetscFunctionBegin; 1080aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1081aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1082bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1083aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1084aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1085bda325fcSPaul Mullowney } 1086bda325fcSPaul Mullowney 1087bda325fcSPaul Mullowney /* Get the GPU pointers */ 1088c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1089c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 1090bda325fcSPaul Mullowney 1091aa372e3fSPaul Mullowney /* First, solve U */ 1092aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1093aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1094aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1095aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1096aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1097aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1098c41cb2e2SAlejandro Lamas Daviña barray, tempGPU->data().get());CHKERRCUDA(stat); 1099aa372e3fSPaul Mullowney 1100aa372e3fSPaul Mullowney /* Then, solve L */ 1101aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1102aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1103aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1104aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1105aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1106aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1107c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 1108bda325fcSPaul Mullowney 1109bda325fcSPaul Mullowney /* restore */ 1110c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1111c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1112c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1113aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1114bda325fcSPaul Mullowney PetscFunctionReturn(0); 1115bda325fcSPaul Mullowney } 1116bda325fcSPaul Mullowney 11179ae82921SPaul Mullowney #undef __FUNCT__ 11189ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE" 11196fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 11209ae82921SPaul Mullowney { 1121c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray, *barray; 1122c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> xGPU,bGPU; 11239ae82921SPaul Mullowney cusparseStatus_t stat; 11249ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1125aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1126aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1127aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1128b175d8bbSPaul Mullowney PetscErrorCode ierr; 1129ebc8f436SDominic Meiser VecType t; 1130ebc8f436SDominic Meiser PetscBool flg; 11319ae82921SPaul Mullowney 11329ae82921SPaul Mullowney PetscFunctionBegin; 1133ebc8f436SDominic Meiser ierr = VecGetType(bb,&t);CHKERRQ(ierr); 1134c41cb2e2SAlejandro Lamas Daviña ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); 1135c41cb2e2SAlejandro Lamas Daviña if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #2). Can only deal with %s\n.",t,VECSEQCUDA); 1136ebc8f436SDominic Meiser ierr = VecGetType(xx,&t);CHKERRQ(ierr); 1137c41cb2e2SAlejandro Lamas Daviña ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); 1138c41cb2e2SAlejandro Lamas Daviña if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #3). Can only deal with %s\n.",t,VECSEQCUDA); 1139ebc8f436SDominic Meiser 1140e057df02SPaul Mullowney /* Get the GPU pointers */ 1141c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1142c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 1143c41cb2e2SAlejandro Lamas Daviña xGPU = thrust::device_pointer_cast(xarray); 1144c41cb2e2SAlejandro Lamas Daviña bGPU = thrust::device_pointer_cast(barray); 11459ae82921SPaul Mullowney 1146aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1147c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), 1148c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), 1149c41cb2e2SAlejandro Lamas Daviña xGPU); 1150aa372e3fSPaul Mullowney 1151aa372e3fSPaul Mullowney /* Next, solve L */ 1152aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1153aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1154aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1155aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1156aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1157aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1158c41cb2e2SAlejandro Lamas Daviña xarray, tempGPU->data().get());CHKERRCUDA(stat); 1159aa372e3fSPaul Mullowney 1160aa372e3fSPaul Mullowney /* Then, solve U */ 1161aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1162aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1163aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1164aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1165aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1166aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1167c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 1168aa372e3fSPaul Mullowney 1169aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1170c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), 1171c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->end()), 1172aa372e3fSPaul Mullowney tempGPU->begin()); 1173aa372e3fSPaul Mullowney 1174aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1175c41cb2e2SAlejandro Lamas Daviña thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); 11769ae82921SPaul Mullowney 1177c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1178c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1179c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1180aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 11819ae82921SPaul Mullowney PetscFunctionReturn(0); 11829ae82921SPaul Mullowney } 11839ae82921SPaul Mullowney 11849ae82921SPaul Mullowney #undef __FUNCT__ 11859ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE_NaturalOrdering" 11866fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 11879ae82921SPaul Mullowney { 1188c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray, *barray; 11899ae82921SPaul Mullowney cusparseStatus_t stat; 11909ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1191aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1192aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1193aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1194b175d8bbSPaul Mullowney PetscErrorCode ierr; 11959ae82921SPaul Mullowney 11969ae82921SPaul Mullowney PetscFunctionBegin; 1197e057df02SPaul Mullowney /* Get the GPU pointers */ 1198c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1199c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 12009ae82921SPaul Mullowney 1201aa372e3fSPaul Mullowney /* First, solve L */ 1202aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1203aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1204aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1205aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1206aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1207aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1208c41cb2e2SAlejandro Lamas Daviña barray, tempGPU->data().get());CHKERRCUDA(stat); 1209aa372e3fSPaul Mullowney 1210aa372e3fSPaul Mullowney /* Next, solve U */ 1211aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1212aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1213aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1214aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1215aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1216aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1217c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 12189ae82921SPaul Mullowney 1219c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1220c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1221c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1222aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 12239ae82921SPaul Mullowney PetscFunctionReturn(0); 12249ae82921SPaul Mullowney } 12259ae82921SPaul Mullowney 12269ae82921SPaul Mullowney #undef __FUNCT__ 1227e057df02SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSECopyToGPU" 12286fa9248bSJed Brown static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) 12299ae82921SPaul Mullowney { 12309ae82921SPaul Mullowney 1231aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1232aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 12339ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 12349ae82921SPaul Mullowney PetscInt m = A->rmap->n,*ii,*ridx; 12359ae82921SPaul Mullowney PetscErrorCode ierr; 1236aa372e3fSPaul Mullowney cusparseStatus_t stat; 1237b06137fdSPaul Mullowney cudaError_t err; 12389ae82921SPaul Mullowney 12399ae82921SPaul Mullowney PetscFunctionBegin; 1240c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 12419ae82921SPaul Mullowney ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 1242ce814652SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&matstruct,cusparsestruct->format); 12439ae82921SPaul Mullowney try { 1244aa372e3fSPaul Mullowney cusparsestruct->nonzerorow=0; 1245aa372e3fSPaul Mullowney for (int j = 0; j<m; j++) cusparsestruct->nonzerorow += ((a->i[j+1]-a->i[j])>0); 12469ae82921SPaul Mullowney 12479ae82921SPaul Mullowney if (a->compressedrow.use) { 12489ae82921SPaul Mullowney m = a->compressedrow.nrows; 12499ae82921SPaul Mullowney ii = a->compressedrow.i; 12509ae82921SPaul Mullowney ridx = a->compressedrow.rindex; 12519ae82921SPaul Mullowney } else { 1252b06137fdSPaul Mullowney /* Forcing compressed row on the GPU */ 12539ae82921SPaul Mullowney int k=0; 1254854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow+1, &ii);CHKERRQ(ierr); 1255854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow, &ridx);CHKERRQ(ierr); 12569ae82921SPaul Mullowney ii[0]=0; 12579ae82921SPaul Mullowney for (int j = 0; j<m; j++) { 12589ae82921SPaul Mullowney if ((a->i[j+1]-a->i[j])>0) { 12599ae82921SPaul Mullowney ii[k] = a->i[j]; 12609ae82921SPaul Mullowney ridx[k]= j; 12619ae82921SPaul Mullowney k++; 12629ae82921SPaul Mullowney } 12639ae82921SPaul Mullowney } 1264aa372e3fSPaul Mullowney ii[cusparsestruct->nonzerorow] = a->nz; 1265aa372e3fSPaul Mullowney m = cusparsestruct->nonzerorow; 12669ae82921SPaul Mullowney } 12679ae82921SPaul Mullowney 1268aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 1269aa372e3fSPaul Mullowney matstruct = new Mat_SeqAIJCUSPARSEMultStruct; 1270c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUDA(stat); 1271c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 1272c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); 12739ae82921SPaul Mullowney 1274c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstruct->alpha),sizeof(PetscScalar));CHKERRCUDA(err); 1275c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstruct->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 1276c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstruct->beta),sizeof(PetscScalar));CHKERRCUDA(err); 1277c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstruct->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 1278c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); 1279b06137fdSPaul Mullowney 1280aa372e3fSPaul Mullowney /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ 1281aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1282aa372e3fSPaul Mullowney /* set the matrix */ 1283aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1284a65300a6SPaul Mullowney matrix->num_rows = m; 1285aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1286aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1287a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1288a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 12899ae82921SPaul Mullowney 1290aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1291aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1292aa372e3fSPaul Mullowney 1293aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1294aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1295aa372e3fSPaul Mullowney 1296aa372e3fSPaul Mullowney /* assign the pointer */ 1297aa372e3fSPaul Mullowney matstruct->mat = matrix; 1298aa372e3fSPaul Mullowney 1299aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 13002692e278SPaul Mullowney #if CUDA_VERSION>=4020 1301aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1302a65300a6SPaul Mullowney matrix->num_rows = m; 1303aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1304aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1305a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1306a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 1307aa372e3fSPaul Mullowney 1308aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1309aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1310aa372e3fSPaul Mullowney 1311aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1312aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1313aa372e3fSPaul Mullowney 1314aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 1315c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); 1316aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 1317aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 1318a65300a6SPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, matrix->num_rows, matrix->num_cols, 1319aa372e3fSPaul Mullowney matstruct->descr, matrix->values->data().get(), 1320aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 1321aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 1322c41cb2e2SAlejandro Lamas Daviña hybMat, 0, partition);CHKERRCUDA(stat); 1323aa372e3fSPaul Mullowney /* assign the pointer */ 1324aa372e3fSPaul Mullowney matstruct->mat = hybMat; 1325aa372e3fSPaul Mullowney 1326aa372e3fSPaul Mullowney if (matrix) { 1327aa372e3fSPaul Mullowney if (matrix->values) delete (THRUSTARRAY*)matrix->values; 1328aa372e3fSPaul Mullowney if (matrix->column_indices) delete (THRUSTINTARRAY32*)matrix->column_indices; 1329aa372e3fSPaul Mullowney if (matrix->row_offsets) delete (THRUSTINTARRAY32*)matrix->row_offsets; 1330aa372e3fSPaul Mullowney delete (CsrMatrix*)matrix; 1331087f3262SPaul Mullowney } 13322692e278SPaul Mullowney #endif 1333087f3262SPaul Mullowney } 1334ca45077fSPaul Mullowney 1335aa372e3fSPaul Mullowney /* assign the compressed row indices */ 1336aa372e3fSPaul Mullowney matstruct->cprowIndices = new THRUSTINTARRAY(m); 1337aa372e3fSPaul Mullowney matstruct->cprowIndices->assign(ridx,ridx+m); 1338aa372e3fSPaul Mullowney 1339aa372e3fSPaul Mullowney /* assign the pointer */ 1340aa372e3fSPaul Mullowney cusparsestruct->mat = matstruct; 1341aa372e3fSPaul Mullowney 13429ae82921SPaul Mullowney if (!a->compressedrow.use) { 13439ae82921SPaul Mullowney ierr = PetscFree(ii);CHKERRQ(ierr); 13449ae82921SPaul Mullowney ierr = PetscFree(ridx);CHKERRQ(ierr); 13459ae82921SPaul Mullowney } 1346aa372e3fSPaul Mullowney cusparsestruct->workVector = new THRUSTARRAY; 1347aa372e3fSPaul Mullowney cusparsestruct->workVector->resize(m); 13489ae82921SPaul Mullowney } catch(char *ex) { 13499ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 13509ae82921SPaul Mullowney } 1351c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 13522205254eSKarl Rupp 1353c41cb2e2SAlejandro Lamas Daviña A->valid_GPU_matrix = PETSC_CUDA_BOTH; 13542205254eSKarl Rupp 13559ae82921SPaul Mullowney ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 13569ae82921SPaul Mullowney } 13579ae82921SPaul Mullowney PetscFunctionReturn(0); 13589ae82921SPaul Mullowney } 13599ae82921SPaul Mullowney 13609ae82921SPaul Mullowney #undef __FUNCT__ 13612a7a6963SBarry Smith #define __FUNCT__ "MatCreateVecs_SeqAIJCUSPARSE" 13622a7a6963SBarry Smith static PetscErrorCode MatCreateVecs_SeqAIJCUSPARSE(Mat mat, Vec *right, Vec *left) 13639ae82921SPaul Mullowney { 13649ae82921SPaul Mullowney PetscErrorCode ierr; 136533d57670SJed Brown PetscInt rbs,cbs; 13669ae82921SPaul Mullowney 13679ae82921SPaul Mullowney PetscFunctionBegin; 136833d57670SJed Brown ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr); 13699ae82921SPaul Mullowney if (right) { 1370ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr); 13719ae82921SPaul Mullowney ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 137233d57670SJed Brown ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr); 1373c41cb2e2SAlejandro Lamas Daviña ierr = VecSetType(*right,VECSEQCUDA);CHKERRQ(ierr); 13749ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->cmap,&(*right)->map);CHKERRQ(ierr); 13759ae82921SPaul Mullowney } 13769ae82921SPaul Mullowney if (left) { 1377ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr); 13789ae82921SPaul Mullowney ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 137933d57670SJed Brown ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr); 1380c41cb2e2SAlejandro Lamas Daviña ierr = VecSetType(*left,VECSEQCUDA);CHKERRQ(ierr); 13819ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->rmap,&(*left)->map);CHKERRQ(ierr); 13829ae82921SPaul Mullowney } 13839ae82921SPaul Mullowney PetscFunctionReturn(0); 13849ae82921SPaul Mullowney } 13859ae82921SPaul Mullowney 1386c41cb2e2SAlejandro Lamas Daviña struct VecCUDAPlusEquals 1387aa372e3fSPaul Mullowney { 1388aa372e3fSPaul Mullowney template <typename Tuple> 1389aa372e3fSPaul Mullowney __host__ __device__ 1390aa372e3fSPaul Mullowney void operator()(Tuple t) 1391aa372e3fSPaul Mullowney { 1392aa372e3fSPaul Mullowney thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); 1393aa372e3fSPaul Mullowney } 1394aa372e3fSPaul Mullowney }; 1395aa372e3fSPaul Mullowney 13969ae82921SPaul Mullowney #undef __FUNCT__ 13979ae82921SPaul Mullowney #define __FUNCT__ "MatMult_SeqAIJCUSPARSE" 13986fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 13999ae82921SPaul Mullowney { 14009ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1401aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1402aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1403c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray,*yarray; 1404b175d8bbSPaul Mullowney PetscErrorCode ierr; 1405aa372e3fSPaul Mullowney cusparseStatus_t stat; 14069ae82921SPaul Mullowney 14079ae82921SPaul Mullowney PetscFunctionBegin; 1408e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1409e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1410c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1411c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1412aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1413aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1414aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1415aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, mat->num_entries, 1416b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), 1417c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstruct->beta, 1418c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 1419aa372e3fSPaul Mullowney } else { 14202692e278SPaul Mullowney #if CUDA_VERSION>=4020 1421aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1422aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1423b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1424c41cb2e2SAlejandro Lamas Daviña xarray, matstruct->beta, 1425c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 14262692e278SPaul Mullowney #endif 14279ae82921SPaul Mullowney } 1428c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1429c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1430aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 1431c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1432ca45077fSPaul Mullowney } 1433aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 14349ae82921SPaul Mullowney PetscFunctionReturn(0); 14359ae82921SPaul Mullowney } 14369ae82921SPaul Mullowney 14379ae82921SPaul Mullowney #undef __FUNCT__ 1438ca45077fSPaul Mullowney #define __FUNCT__ "MatMultTranspose_SeqAIJCUSPARSE" 14396fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 1440ca45077fSPaul Mullowney { 1441ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1442aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1443aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1444c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray,*yarray; 1445b175d8bbSPaul Mullowney PetscErrorCode ierr; 1446aa372e3fSPaul Mullowney cusparseStatus_t stat; 1447ca45077fSPaul Mullowney 1448ca45077fSPaul Mullowney PetscFunctionBegin; 1449e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1450e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1451aa372e3fSPaul Mullowney if (!matstructT) { 1452bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1453aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1454bda325fcSPaul Mullowney } 1455c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1456c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1457aa372e3fSPaul Mullowney 1458aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1459aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1460aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1461aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, 1462b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1463aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1464c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstructT->beta, 1465c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 1466aa372e3fSPaul Mullowney } else { 14672692e278SPaul Mullowney #if CUDA_VERSION>=4020 1468aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1469aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1470b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1471c41cb2e2SAlejandro Lamas Daviña xarray, matstructT->beta, 1472c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 14732692e278SPaul Mullowney #endif 1474ca45077fSPaul Mullowney } 1475c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1476c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1477aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 1478c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1479ca45077fSPaul Mullowney } 1480aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 1481ca45077fSPaul Mullowney PetscFunctionReturn(0); 1482ca45077fSPaul Mullowney } 1483ca45077fSPaul Mullowney 1484aa372e3fSPaul Mullowney 1485ca45077fSPaul Mullowney #undef __FUNCT__ 14869ae82921SPaul Mullowney #define __FUNCT__ "MatMultAdd_SeqAIJCUSPARSE" 14876fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 14889ae82921SPaul Mullowney { 14899ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1490aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1491aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1492c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> zptr; 1493c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray,*zarray; 1494b175d8bbSPaul Mullowney PetscErrorCode ierr; 1495aa372e3fSPaul Mullowney cusparseStatus_t stat; 14966e111a19SKarl Rupp 14979ae82921SPaul Mullowney PetscFunctionBegin; 1498e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1499e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 15009ae82921SPaul Mullowney try { 1501c41cb2e2SAlejandro Lamas Daviña ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); 1502c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1503c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr); 1504c41cb2e2SAlejandro Lamas Daviña zptr = thrust::device_pointer_cast(zarray); 15059ae82921SPaul Mullowney 1506e057df02SPaul Mullowney /* multiply add */ 1507aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1508aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1509b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1510b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1511b06137fdSPaul Mullowney size of the workVector */ 1512aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1513a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1514b06137fdSPaul Mullowney mat->num_entries, matstruct->alpha, matstruct->descr, 1515aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1516c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstruct->beta, 1517c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1518aa372e3fSPaul Mullowney } else { 15192692e278SPaul Mullowney #if CUDA_VERSION>=4020 1520aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1521a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1522aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1523b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1524c41cb2e2SAlejandro Lamas Daviña xarray, matstruct->beta, 1525c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1526a65300a6SPaul Mullowney } 15272692e278SPaul Mullowney #endif 1528aa372e3fSPaul Mullowney } 1529aa372e3fSPaul Mullowney 1530aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1531c41cb2e2SAlejandro Lamas Daviña thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), 1532c41cb2e2SAlejandro Lamas Daviña thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1533c41cb2e2SAlejandro Lamas Daviña VecCUDAPlusEquals()); 1534c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1535c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); 15369ae82921SPaul Mullowney 15379ae82921SPaul Mullowney } catch(char *ex) { 15389ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 15399ae82921SPaul Mullowney } 1540c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 15419ae82921SPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 15429ae82921SPaul Mullowney PetscFunctionReturn(0); 15439ae82921SPaul Mullowney } 15449ae82921SPaul Mullowney 15459ae82921SPaul Mullowney #undef __FUNCT__ 1546b175d8bbSPaul Mullowney #define __FUNCT__ "MatMultTransposeAdd_SeqAIJCUSPARSE" 15476fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 1548ca45077fSPaul Mullowney { 1549ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1550aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1551aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1552c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> zptr; 1553c41cb2e2SAlejandro Lamas Daviña PetscScalar *xarray,*zarray; 1554b175d8bbSPaul Mullowney PetscErrorCode ierr; 1555aa372e3fSPaul Mullowney cusparseStatus_t stat; 15566e111a19SKarl Rupp 1557ca45077fSPaul Mullowney PetscFunctionBegin; 1558e057df02SPaul Mullowney /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE 1559e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ 1560aa372e3fSPaul Mullowney if (!matstructT) { 1561bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1562aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1563bda325fcSPaul Mullowney } 1564aa372e3fSPaul Mullowney 1565ca45077fSPaul Mullowney try { 1566c41cb2e2SAlejandro Lamas Daviña ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); 1567c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1568c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr); 1569c41cb2e2SAlejandro Lamas Daviña zptr = thrust::device_pointer_cast(zarray); 1570ca45077fSPaul Mullowney 1571e057df02SPaul Mullowney /* multiply add with matrix transpose */ 1572aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1573aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1574b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1575b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1576b06137fdSPaul Mullowney size of the workVector */ 1577aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1578a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1579b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1580aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1581c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstructT->beta, 1582c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1583aa372e3fSPaul Mullowney } else { 15842692e278SPaul Mullowney #if CUDA_VERSION>=4020 1585aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1586a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1587aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1588b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1589c41cb2e2SAlejandro Lamas Daviña xarray, matstructT->beta, 1590c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1591a65300a6SPaul Mullowney } 15922692e278SPaul Mullowney #endif 1593aa372e3fSPaul Mullowney } 1594aa372e3fSPaul Mullowney 1595aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1596c41cb2e2SAlejandro Lamas Daviña thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))), 1597c41cb2e2SAlejandro Lamas Daviña thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1598c41cb2e2SAlejandro Lamas Daviña VecCUDAPlusEquals()); 1599ca45077fSPaul Mullowney 1600c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1601c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); 1602ca45077fSPaul Mullowney 1603ca45077fSPaul Mullowney } catch(char *ex) { 1604ca45077fSPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 1605ca45077fSPaul Mullowney } 1606c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1607ca45077fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 1608ca45077fSPaul Mullowney PetscFunctionReturn(0); 1609ca45077fSPaul Mullowney } 1610ca45077fSPaul Mullowney 1611ca45077fSPaul Mullowney #undef __FUNCT__ 16129ae82921SPaul Mullowney #define __FUNCT__ "MatAssemblyEnd_SeqAIJCUSPARSE" 16136fa9248bSJed Brown static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) 16149ae82921SPaul Mullowney { 16159ae82921SPaul Mullowney PetscErrorCode ierr; 16166e111a19SKarl Rupp 16179ae82921SPaul Mullowney PetscFunctionBegin; 16189ae82921SPaul Mullowney ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); 1619bc3f50f2SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 1620e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); 1621bc3f50f2SPaul Mullowney } 16229ae82921SPaul Mullowney if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0); 1623bbf3fe20SPaul Mullowney A->ops->mult = MatMult_SeqAIJCUSPARSE; 1624bbf3fe20SPaul Mullowney A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1625bbf3fe20SPaul Mullowney A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1626bbf3fe20SPaul Mullowney A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 16279ae82921SPaul Mullowney PetscFunctionReturn(0); 16289ae82921SPaul Mullowney } 16299ae82921SPaul Mullowney 16309ae82921SPaul Mullowney /* --------------------------------------------------------------------------------*/ 16319ae82921SPaul Mullowney #undef __FUNCT__ 16329ae82921SPaul Mullowney #define __FUNCT__ "MatCreateSeqAIJCUSPARSE" 1633e057df02SPaul Mullowney /*@ 16349ae82921SPaul Mullowney MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format 1635e057df02SPaul Mullowney (the default parallel PETSc format). This matrix will ultimately pushed down 1636e057df02SPaul Mullowney to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix 1637e057df02SPaul Mullowney assembly performance the user should preallocate the matrix storage by setting 1638e057df02SPaul Mullowney the parameter nz (or the array nnz). By setting these parameters accurately, 1639e057df02SPaul Mullowney performance during matrix assembly can be increased by more than a factor of 50. 16409ae82921SPaul Mullowney 16419ae82921SPaul Mullowney Collective on MPI_Comm 16429ae82921SPaul Mullowney 16439ae82921SPaul Mullowney Input Parameters: 16449ae82921SPaul Mullowney + comm - MPI communicator, set to PETSC_COMM_SELF 16459ae82921SPaul Mullowney . m - number of rows 16469ae82921SPaul Mullowney . n - number of columns 16479ae82921SPaul Mullowney . nz - number of nonzeros per row (same for all rows) 16489ae82921SPaul Mullowney - nnz - array containing the number of nonzeros in the various rows 16490298fd71SBarry Smith (possibly different for each row) or NULL 16509ae82921SPaul Mullowney 16519ae82921SPaul Mullowney Output Parameter: 16529ae82921SPaul Mullowney . A - the matrix 16539ae82921SPaul Mullowney 16549ae82921SPaul Mullowney It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), 16559ae82921SPaul Mullowney MatXXXXSetPreallocation() paradgm instead of this routine directly. 16569ae82921SPaul Mullowney [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] 16579ae82921SPaul Mullowney 16589ae82921SPaul Mullowney Notes: 16599ae82921SPaul Mullowney If nnz is given then nz is ignored 16609ae82921SPaul Mullowney 16619ae82921SPaul Mullowney The AIJ format (also called the Yale sparse matrix format or 16629ae82921SPaul Mullowney compressed row storage), is fully compatible with standard Fortran 77 16639ae82921SPaul Mullowney storage. That is, the stored row and column indices can begin at 16649ae82921SPaul Mullowney either one (as in Fortran) or zero. See the users' manual for details. 16659ae82921SPaul Mullowney 16669ae82921SPaul Mullowney Specify the preallocated storage with either nz or nnz (not both). 16670298fd71SBarry Smith Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory 16689ae82921SPaul Mullowney allocation. For large problems you MUST preallocate memory or you 16699ae82921SPaul Mullowney will get TERRIBLE performance, see the users' manual chapter on matrices. 16709ae82921SPaul Mullowney 16719ae82921SPaul Mullowney By default, this format uses inodes (identical nodes) when possible, to 16729ae82921SPaul Mullowney improve numerical efficiency of matrix-vector products and solves. We 16739ae82921SPaul Mullowney search for consecutive rows with the same nonzero structure, thereby 16749ae82921SPaul Mullowney reusing matrix information to achieve increased efficiency. 16759ae82921SPaul Mullowney 16769ae82921SPaul Mullowney Level: intermediate 16779ae82921SPaul Mullowney 1678e057df02SPaul Mullowney .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE 16799ae82921SPaul Mullowney @*/ 16809ae82921SPaul Mullowney PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) 16819ae82921SPaul Mullowney { 16829ae82921SPaul Mullowney PetscErrorCode ierr; 16839ae82921SPaul Mullowney 16849ae82921SPaul Mullowney PetscFunctionBegin; 16859ae82921SPaul Mullowney ierr = MatCreate(comm,A);CHKERRQ(ierr); 16869ae82921SPaul Mullowney ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); 16879ae82921SPaul Mullowney ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 16889ae82921SPaul Mullowney ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); 16899ae82921SPaul Mullowney PetscFunctionReturn(0); 16909ae82921SPaul Mullowney } 16919ae82921SPaul Mullowney 16929ae82921SPaul Mullowney #undef __FUNCT__ 16939ae82921SPaul Mullowney #define __FUNCT__ "MatDestroy_SeqAIJCUSPARSE" 16946fa9248bSJed Brown static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) 16959ae82921SPaul Mullowney { 16969ae82921SPaul Mullowney PetscErrorCode ierr; 1697ab25e6cbSDominic Meiser 16989ae82921SPaul Mullowney PetscFunctionBegin; 16999ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 1700c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) { 1701ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); 17029ae82921SPaul Mullowney } 17039ae82921SPaul Mullowney } else { 1704ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); 1705aa372e3fSPaul Mullowney } 17069ae82921SPaul Mullowney ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); 17079ae82921SPaul Mullowney PetscFunctionReturn(0); 17089ae82921SPaul Mullowney } 17099ae82921SPaul Mullowney 17109ae82921SPaul Mullowney #undef __FUNCT__ 17119ae82921SPaul Mullowney #define __FUNCT__ "MatCreate_SeqAIJCUSPARSE" 17128cc058d9SJed Brown PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) 17139ae82921SPaul Mullowney { 17149ae82921SPaul Mullowney PetscErrorCode ierr; 1715aa372e3fSPaul Mullowney cusparseStatus_t stat; 1716aa372e3fSPaul Mullowney cusparseHandle_t handle=0; 17179ae82921SPaul Mullowney 17189ae82921SPaul Mullowney PetscFunctionBegin; 17199ae82921SPaul Mullowney ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); 17209ae82921SPaul Mullowney if (B->factortype==MAT_FACTOR_NONE) { 1721e057df02SPaul Mullowney /* you cannot check the inode.use flag here since the matrix was just created. 1722e057df02SPaul Mullowney now build a GPU matrix data structure */ 17239ae82921SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSE; 17249ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->mat = 0; 1725aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->matTranspose = 0; 1726aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->workVector = 0; 1727e057df02SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->format = MAT_CUSPARSE_CSR; 1728aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 1729aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = 0; 1730c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreate(&handle);CHKERRCUDA(stat); 1731aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = handle; 1732aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 17339ae82921SPaul Mullowney } else { 17349ae82921SPaul Mullowney /* NEXT, set the pointers to the triangular factors */ 1735debe9ee2SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSETriFactors; 17369ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtr = 0; 17379ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtr = 0; 1738aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtrTranspose = 0; 1739aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtrTranspose = 0; 1740aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->rpermIndices = 0; 1741aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->cpermIndices = 0; 1742aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->workVector = 0; 1743aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = 0; 1744c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreate(&handle);CHKERRCUDA(stat); 1745aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = handle; 1746aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->nnz = 0; 17479ae82921SPaul Mullowney } 1748aa372e3fSPaul Mullowney 17499ae82921SPaul Mullowney B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; 17509ae82921SPaul Mullowney B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; 17512a7a6963SBarry Smith B->ops->getvecs = MatCreateVecs_SeqAIJCUSPARSE; 17529ae82921SPaul Mullowney B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; 1753ca45077fSPaul Mullowney B->ops->mult = MatMult_SeqAIJCUSPARSE; 1754ca45077fSPaul Mullowney B->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1755ca45077fSPaul Mullowney B->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1756ca45077fSPaul Mullowney B->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 17572205254eSKarl Rupp 17589ae82921SPaul Mullowney ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 17592205254eSKarl Rupp 1760c41cb2e2SAlejandro Lamas Daviña B->valid_GPU_matrix = PETSC_CUDA_UNALLOCATED; 17612205254eSKarl Rupp 1762bdf89e91SBarry Smith ierr = PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); 17639ae82921SPaul Mullowney PetscFunctionReturn(0); 17649ae82921SPaul Mullowney } 17659ae82921SPaul Mullowney 1766e057df02SPaul Mullowney /*M 1767e057df02SPaul Mullowney MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. 1768e057df02SPaul Mullowney 1769e057df02SPaul Mullowney A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either 17702692e278SPaul Mullowney CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. 17712692e278SPaul Mullowney All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. 1772e057df02SPaul Mullowney 1773e057df02SPaul Mullowney Options Database Keys: 1774e057df02SPaul Mullowney + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() 1775aa372e3fSPaul Mullowney . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1776aa372e3fSPaul Mullowney . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1777e057df02SPaul Mullowney 1778e057df02SPaul Mullowney Level: beginner 1779e057df02SPaul Mullowney 17808468deeeSKarl Rupp .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 1781e057df02SPaul Mullowney M*/ 17827f756511SDominic Meiser 178342c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat,MatFactorType,Mat*); 178442c9c57cSBarry Smith 17850f39cd5aSBarry Smith 178642c9c57cSBarry Smith #undef __FUNCT__ 178742c9c57cSBarry Smith #define __FUNCT__ "MatSolverPackageRegister_CUSPARSE" 178829b38603SBarry Smith PETSC_EXTERN PetscErrorCode MatSolverPackageRegister_CUSPARSE(void) 178942c9c57cSBarry Smith { 179042c9c57cSBarry Smith PetscErrorCode ierr; 179142c9c57cSBarry Smith 179242c9c57cSBarry Smith PetscFunctionBegin; 179342c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 179442c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 179542c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 179642c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 179742c9c57cSBarry Smith PetscFunctionReturn(0); 179842c9c57cSBarry Smith } 179929b38603SBarry Smith 180081e08676SBarry Smith 18017f756511SDominic Meiser #undef __FUNCT__ 18027f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSE_Destroy" 18037f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) 18047f756511SDominic Meiser { 18057f756511SDominic Meiser cusparseStatus_t stat; 18067f756511SDominic Meiser cusparseHandle_t handle; 18077f756511SDominic Meiser 18087f756511SDominic Meiser PetscFunctionBegin; 18097f756511SDominic Meiser if (*cusparsestruct) { 18107f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format); 18117f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format); 18127f756511SDominic Meiser delete (*cusparsestruct)->workVector; 18137f756511SDominic Meiser if (handle = (*cusparsestruct)->handle) { 1814c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroy(handle);CHKERRCUDA(stat); 18157f756511SDominic Meiser } 18167f756511SDominic Meiser delete *cusparsestruct; 18177f756511SDominic Meiser *cusparsestruct = 0; 18187f756511SDominic Meiser } 18197f756511SDominic Meiser PetscFunctionReturn(0); 18207f756511SDominic Meiser } 18217f756511SDominic Meiser 18227f756511SDominic Meiser #undef __FUNCT__ 18237f756511SDominic Meiser #define __FUNCT__ "CsrMatrix_Destroy" 18247f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) 18257f756511SDominic Meiser { 18267f756511SDominic Meiser PetscFunctionBegin; 18277f756511SDominic Meiser if (*mat) { 18287f756511SDominic Meiser delete (*mat)->values; 18297f756511SDominic Meiser delete (*mat)->column_indices; 18307f756511SDominic Meiser delete (*mat)->row_offsets; 18317f756511SDominic Meiser delete *mat; 18327f756511SDominic Meiser *mat = 0; 18337f756511SDominic Meiser } 18347f756511SDominic Meiser PetscFunctionReturn(0); 18357f756511SDominic Meiser } 18367f756511SDominic Meiser 18377f756511SDominic Meiser #undef __FUNCT__ 18387f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactorStruct_Destroy" 18397f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) 18407f756511SDominic Meiser { 18417f756511SDominic Meiser cusparseStatus_t stat; 18427f756511SDominic Meiser PetscErrorCode ierr; 18437f756511SDominic Meiser 18447f756511SDominic Meiser PetscFunctionBegin; 18457f756511SDominic Meiser if (*trifactor) { 1846c41cb2e2SAlejandro Lamas Daviña if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUDA(stat); } 1847c41cb2e2SAlejandro Lamas Daviña if ((*trifactor)->solveInfo) { stat = cusparseDestroySolveAnalysisInfo((*trifactor)->solveInfo);CHKERRCUDA(stat); } 18487f756511SDominic Meiser ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); 18497f756511SDominic Meiser delete *trifactor; 18507f756511SDominic Meiser *trifactor = 0; 18517f756511SDominic Meiser } 18527f756511SDominic Meiser PetscFunctionReturn(0); 18537f756511SDominic Meiser } 18547f756511SDominic Meiser 18557f756511SDominic Meiser #undef __FUNCT__ 18567f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSEMultStruct_Destroy" 18577f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) 18587f756511SDominic Meiser { 18597f756511SDominic Meiser CsrMatrix *mat; 18607f756511SDominic Meiser cusparseStatus_t stat; 18617f756511SDominic Meiser cudaError_t err; 18627f756511SDominic Meiser 18637f756511SDominic Meiser PetscFunctionBegin; 18647f756511SDominic Meiser if (*matstruct) { 18657f756511SDominic Meiser if ((*matstruct)->mat) { 18667f756511SDominic Meiser if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { 18677f756511SDominic Meiser cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; 1868c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroyHybMat(hybMat);CHKERRCUDA(stat); 18697f756511SDominic Meiser } else { 18707f756511SDominic Meiser mat = (CsrMatrix*)(*matstruct)->mat; 18717f756511SDominic Meiser CsrMatrix_Destroy(&mat); 18727f756511SDominic Meiser } 18737f756511SDominic Meiser } 1874c41cb2e2SAlejandro Lamas Daviña if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUDA(stat); } 18757f756511SDominic Meiser delete (*matstruct)->cprowIndices; 1876c41cb2e2SAlejandro Lamas Daviña if ((*matstruct)->alpha) { err=cudaFree((*matstruct)->alpha);CHKERRCUDA(err); } 1877c41cb2e2SAlejandro Lamas Daviña if ((*matstruct)->beta) { err=cudaFree((*matstruct)->beta);CHKERRCUDA(err); } 18787f756511SDominic Meiser delete *matstruct; 18797f756511SDominic Meiser *matstruct = 0; 18807f756511SDominic Meiser } 18817f756511SDominic Meiser PetscFunctionReturn(0); 18827f756511SDominic Meiser } 18837f756511SDominic Meiser 18847f756511SDominic Meiser #undef __FUNCT__ 18857f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactors_Destroy" 18867f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) 18877f756511SDominic Meiser { 18887f756511SDominic Meiser cusparseHandle_t handle; 18897f756511SDominic Meiser cusparseStatus_t stat; 18907f756511SDominic Meiser 18917f756511SDominic Meiser PetscFunctionBegin; 18927f756511SDominic Meiser if (*trifactors) { 18937f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtr); 18947f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtr); 18957f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose); 18967f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose); 18977f756511SDominic Meiser delete (*trifactors)->rpermIndices; 18987f756511SDominic Meiser delete (*trifactors)->cpermIndices; 18997f756511SDominic Meiser delete (*trifactors)->workVector; 19007f756511SDominic Meiser if (handle = (*trifactors)->handle) { 1901c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroy(handle);CHKERRCUDA(stat); 19027f756511SDominic Meiser } 19037f756511SDominic Meiser delete *trifactors; 19047f756511SDominic Meiser *trifactors = 0; 19057f756511SDominic Meiser } 19067f756511SDominic Meiser PetscFunctionReturn(0); 19077f756511SDominic Meiser } 19087f756511SDominic Meiser 1909