19ae82921SPaul Mullowney /* 29ae82921SPaul Mullowney Defines the basic matrix operations for the AIJ (compressed row) 3bc3f50f2SPaul Mullowney matrix storage format using the CUSPARSE library, 49ae82921SPaul Mullowney */ 5dced61a5SBarry Smith #define PETSC_SKIP_SPINLOCK 69ae82921SPaul Mullowney 73d13b8fdSMatthew G. Knepley #include <petscconf.h> 83d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 9087f3262SPaul Mullowney #include <../src/mat/impls/sbaij/seq/sbaij.h> 103d13b8fdSMatthew G. Knepley #include <../src/vec/vec/impls/dvecimpl.h> 11af0996ceSBarry Smith #include <petsc/private/vecimpl.h> 129ae82921SPaul Mullowney #undef VecType 133d13b8fdSMatthew G. Knepley #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> 14bc3f50f2SPaul Mullowney 15e057df02SPaul Mullowney const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; 169ae82921SPaul Mullowney 17087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 18087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); 19087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 20087f3262SPaul Mullowney 216fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 226fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); 236fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); 24087f3262SPaul Mullowney 256fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); 266fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 276fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 286fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); 294416b707SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat); 306fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); 316fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 326fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); 336fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); 349ae82921SPaul Mullowney 357f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); 367f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); 377f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); 387f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); 397f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); 407f756511SDominic Meiser 419ae82921SPaul Mullowney #undef __FUNCT__ 42b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetStream" 43b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream) 44b06137fdSPaul Mullowney { 45b06137fdSPaul Mullowney cusparseStatus_t stat; 46b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 47b06137fdSPaul Mullowney 48b06137fdSPaul Mullowney PetscFunctionBegin; 49b06137fdSPaul Mullowney cusparsestruct->stream = stream; 50c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUDA(stat); 51b06137fdSPaul Mullowney PetscFunctionReturn(0); 52b06137fdSPaul Mullowney } 53b06137fdSPaul Mullowney 54b06137fdSPaul Mullowney #undef __FUNCT__ 55b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSESetHandle" 56b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle) 57b06137fdSPaul Mullowney { 58b06137fdSPaul Mullowney cusparseStatus_t stat; 59b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 60b06137fdSPaul Mullowney 61b06137fdSPaul Mullowney PetscFunctionBegin; 6216a2e217SAlejandro Lamas Daviña if (cusparsestruct->handle) { 63c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUDA(stat); 6416a2e217SAlejandro Lamas Daviña } 65b06137fdSPaul Mullowney cusparsestruct->handle = handle; 66c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); 67b06137fdSPaul Mullowney PetscFunctionReturn(0); 68b06137fdSPaul Mullowney } 69b06137fdSPaul Mullowney 70b06137fdSPaul Mullowney #undef __FUNCT__ 71b06137fdSPaul Mullowney #define __FUNCT__ "MatCUSPARSEClearHandle" 72b06137fdSPaul Mullowney PetscErrorCode MatCUSPARSEClearHandle(Mat A) 73b06137fdSPaul Mullowney { 74b06137fdSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 75b06137fdSPaul Mullowney PetscFunctionBegin; 76b06137fdSPaul Mullowney if (cusparsestruct->handle) 77b06137fdSPaul Mullowney cusparsestruct->handle = 0; 78b06137fdSPaul Mullowney PetscFunctionReturn(0); 79b06137fdSPaul Mullowney } 80b06137fdSPaul Mullowney 81b06137fdSPaul Mullowney #undef __FUNCT__ 829ae82921SPaul Mullowney #define __FUNCT__ "MatFactorGetSolverPackage_seqaij_cusparse" 839ae82921SPaul Mullowney PetscErrorCode MatFactorGetSolverPackage_seqaij_cusparse(Mat A,const MatSolverPackage *type) 849ae82921SPaul Mullowney { 859ae82921SPaul Mullowney PetscFunctionBegin; 869ae82921SPaul Mullowney *type = MATSOLVERCUSPARSE; 879ae82921SPaul Mullowney PetscFunctionReturn(0); 889ae82921SPaul Mullowney } 899ae82921SPaul Mullowney 90c708e6cdSJed Brown /*MC 91087f3262SPaul Mullowney MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices 92087f3262SPaul Mullowney on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported 93087f3262SPaul Mullowney algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer 94087f3262SPaul Mullowney performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the 95087f3262SPaul Mullowney CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these 96087f3262SPaul Mullowney algorithms are not recommended. This class does NOT support direct solver operations. 97c708e6cdSJed Brown 989ae82921SPaul Mullowney Level: beginner 99c708e6cdSJed Brown 100c708e6cdSJed Brown .seealso: PCFactorSetMatSolverPackage(), MatSolverPackage, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 101c708e6cdSJed Brown M*/ 1029ae82921SPaul Mullowney 1039ae82921SPaul Mullowney #undef __FUNCT__ 10442c9c57cSBarry Smith #define __FUNCT__ "MatGetFactor_seqaijcusparse_cusparse" 10542c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) 1069ae82921SPaul Mullowney { 1079ae82921SPaul Mullowney PetscErrorCode ierr; 108bc3f50f2SPaul Mullowney PetscInt n = A->rmap->n; 1099ae82921SPaul Mullowney 1109ae82921SPaul Mullowney PetscFunctionBegin; 111bc3f50f2SPaul Mullowney ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); 112404133a2SPaul Mullowney (*B)->factortype = ftype; 113bc3f50f2SPaul Mullowney ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); 1149ae82921SPaul Mullowney ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 1152205254eSKarl Rupp 116087f3262SPaul Mullowney if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { 11733d57670SJed Brown ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); 1189ae82921SPaul Mullowney (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; 1199ae82921SPaul Mullowney (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; 120087f3262SPaul Mullowney } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { 121087f3262SPaul Mullowney (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; 122087f3262SPaul Mullowney (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; 1239ae82921SPaul Mullowney } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); 124bc3f50f2SPaul Mullowney 125fa03d054SJed Brown ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); 12662a20339SJed Brown ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverPackage_C",MatFactorGetSolverPackage_seqaij_cusparse);CHKERRQ(ierr); 1279ae82921SPaul Mullowney PetscFunctionReturn(0); 1289ae82921SPaul Mullowney } 1299ae82921SPaul Mullowney 1309ae82921SPaul Mullowney #undef __FUNCT__ 131e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat_SeqAIJCUSPARSE" 132bc3f50f2SPaul Mullowney PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 133ca45077fSPaul Mullowney { 134aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1356e111a19SKarl Rupp 136ca45077fSPaul Mullowney PetscFunctionBegin; 1372692e278SPaul Mullowney #if CUDA_VERSION>=4020 138ca45077fSPaul Mullowney switch (op) { 139e057df02SPaul Mullowney case MAT_CUSPARSE_MULT: 140aa372e3fSPaul Mullowney cusparsestruct->format = format; 141ca45077fSPaul Mullowney break; 142e057df02SPaul Mullowney case MAT_CUSPARSE_ALL: 143aa372e3fSPaul Mullowney cusparsestruct->format = format; 144ca45077fSPaul Mullowney break; 145ca45077fSPaul Mullowney default: 14636d62e41SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); 147ca45077fSPaul Mullowney } 1482692e278SPaul Mullowney #else 1496c4ed002SBarry Smith if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format require CUDA 4.2 or later."); 1502692e278SPaul Mullowney #endif 151ca45077fSPaul Mullowney PetscFunctionReturn(0); 152ca45077fSPaul Mullowney } 1539ae82921SPaul Mullowney 154e057df02SPaul Mullowney /*@ 155e057df02SPaul Mullowney MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular 156e057df02SPaul Mullowney operation. Only the MatMult operation can use different GPU storage formats 157aa372e3fSPaul Mullowney for MPIAIJCUSPARSE matrices. 158e057df02SPaul Mullowney Not Collective 159e057df02SPaul Mullowney 160e057df02SPaul Mullowney Input Parameters: 1618468deeeSKarl Rupp + A - Matrix of type SEQAIJCUSPARSE 16236d62e41SPaul Mullowney . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. 1632692e278SPaul Mullowney - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) 164e057df02SPaul Mullowney 165e057df02SPaul Mullowney Output Parameter: 166e057df02SPaul Mullowney 167e057df02SPaul Mullowney Level: intermediate 168e057df02SPaul Mullowney 1698468deeeSKarl Rupp .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 170e057df02SPaul Mullowney @*/ 171e057df02SPaul Mullowney #undef __FUNCT__ 172e057df02SPaul Mullowney #define __FUNCT__ "MatCUSPARSESetFormat" 173e057df02SPaul Mullowney PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) 174e057df02SPaul Mullowney { 175e057df02SPaul Mullowney PetscErrorCode ierr; 1766e111a19SKarl Rupp 177e057df02SPaul Mullowney PetscFunctionBegin; 178e057df02SPaul Mullowney PetscValidHeaderSpecific(A, MAT_CLASSID,1); 179e057df02SPaul Mullowney ierr = PetscTryMethod(A, "MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); 180e057df02SPaul Mullowney PetscFunctionReturn(0); 181e057df02SPaul Mullowney } 182e057df02SPaul Mullowney 1839ae82921SPaul Mullowney #undef __FUNCT__ 1849ae82921SPaul Mullowney #define __FUNCT__ "MatSetFromOptions_SeqAIJCUSPARSE" 1854416b707SBarry Smith static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) 1869ae82921SPaul Mullowney { 1879ae82921SPaul Mullowney PetscErrorCode ierr; 188e057df02SPaul Mullowney MatCUSPARSEStorageFormat format; 1899ae82921SPaul Mullowney PetscBool flg; 190a183c035SDominic Meiser Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1916e111a19SKarl Rupp 1929ae82921SPaul Mullowney PetscFunctionBegin; 193e55864a3SBarry Smith ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); 194e057df02SPaul Mullowney ierr = PetscObjectOptionsBegin((PetscObject)A); 1959ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 196e057df02SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", 197a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 198e057df02SPaul Mullowney if (flg) { 199e057df02SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr); 200045c96e1SPaul Mullowney } 2019ae82921SPaul Mullowney } 2024c87dfd4SPaul Mullowney ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", 203a183c035SDominic Meiser "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); 2044c87dfd4SPaul Mullowney if (flg) { 2054c87dfd4SPaul Mullowney ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); 2064c87dfd4SPaul Mullowney } 2079ae82921SPaul Mullowney ierr = PetscOptionsEnd();CHKERRQ(ierr); 2089ae82921SPaul Mullowney PetscFunctionReturn(0); 2099ae82921SPaul Mullowney 2109ae82921SPaul Mullowney } 2119ae82921SPaul Mullowney 2129ae82921SPaul Mullowney #undef __FUNCT__ 2139ae82921SPaul Mullowney #define __FUNCT__ "MatILUFactorSymbolic_SeqAIJCUSPARSE" 2146fa9248bSJed Brown static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2159ae82921SPaul Mullowney { 2169ae82921SPaul Mullowney PetscErrorCode ierr; 2179ae82921SPaul Mullowney 2189ae82921SPaul Mullowney PetscFunctionBegin; 2199ae82921SPaul Mullowney ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2209ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2219ae82921SPaul Mullowney PetscFunctionReturn(0); 2229ae82921SPaul Mullowney } 2239ae82921SPaul Mullowney 2249ae82921SPaul Mullowney #undef __FUNCT__ 2259ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorSymbolic_SeqAIJCUSPARSE" 2266fa9248bSJed Brown static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) 2279ae82921SPaul Mullowney { 2289ae82921SPaul Mullowney PetscErrorCode ierr; 2299ae82921SPaul Mullowney 2309ae82921SPaul Mullowney PetscFunctionBegin; 2319ae82921SPaul Mullowney ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); 2329ae82921SPaul Mullowney B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; 2339ae82921SPaul Mullowney PetscFunctionReturn(0); 2349ae82921SPaul Mullowney } 2359ae82921SPaul Mullowney 2369ae82921SPaul Mullowney #undef __FUNCT__ 237087f3262SPaul Mullowney #define __FUNCT__ "MatICCFactorSymbolic_SeqAIJCUSPARSE" 238087f3262SPaul Mullowney static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 239087f3262SPaul Mullowney { 240087f3262SPaul Mullowney PetscErrorCode ierr; 241087f3262SPaul Mullowney 242087f3262SPaul Mullowney PetscFunctionBegin; 243087f3262SPaul Mullowney ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 244087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 245087f3262SPaul Mullowney PetscFunctionReturn(0); 246087f3262SPaul Mullowney } 247087f3262SPaul Mullowney 248087f3262SPaul Mullowney #undef __FUNCT__ 249087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorSymbolic_SeqAIJCUSPARSE" 250087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) 251087f3262SPaul Mullowney { 252087f3262SPaul Mullowney PetscErrorCode ierr; 253087f3262SPaul Mullowney 254087f3262SPaul Mullowney PetscFunctionBegin; 255087f3262SPaul Mullowney ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); 256087f3262SPaul Mullowney B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; 257087f3262SPaul Mullowney PetscFunctionReturn(0); 258087f3262SPaul Mullowney } 259087f3262SPaul Mullowney 260087f3262SPaul Mullowney #undef __FUNCT__ 261087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILULowerTriMatrix" 262087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) 2639ae82921SPaul Mullowney { 2649ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 2659ae82921SPaul Mullowney PetscInt n = A->rmap->n; 2669ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 267aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 2689ae82921SPaul Mullowney cusparseStatus_t stat; 2699ae82921SPaul Mullowney const PetscInt *ai = a->i,*aj = a->j,*vi; 2709ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 2719ae82921SPaul Mullowney PetscInt *AiLo, *AjLo; 2729ae82921SPaul Mullowney PetscScalar *AALo; 2739ae82921SPaul Mullowney PetscInt i,nz, nzLower, offset, rowOffset; 274b175d8bbSPaul Mullowney PetscErrorCode ierr; 2759ae82921SPaul Mullowney 2769ae82921SPaul Mullowney PetscFunctionBegin; 277c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 2789ae82921SPaul Mullowney try { 2799ae82921SPaul Mullowney /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ 2809ae82921SPaul Mullowney nzLower=n+ai[n]-ai[1]; 2819ae82921SPaul Mullowney 2829ae82921SPaul Mullowney /* Allocate Space for the lower triangular matrix */ 283c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); 284c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(ierr); 285c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(ierr); 2869ae82921SPaul Mullowney 2879ae82921SPaul Mullowney /* Fill the lower triangular matrix */ 2889ae82921SPaul Mullowney AiLo[0] = (PetscInt) 0; 2899ae82921SPaul Mullowney AiLo[n] = nzLower; 2909ae82921SPaul Mullowney AjLo[0] = (PetscInt) 0; 2919ae82921SPaul Mullowney AALo[0] = (MatScalar) 1.0; 2929ae82921SPaul Mullowney v = aa; 2939ae82921SPaul Mullowney vi = aj; 2949ae82921SPaul Mullowney offset = 1; 2959ae82921SPaul Mullowney rowOffset= 1; 2969ae82921SPaul Mullowney for (i=1; i<n; i++) { 2979ae82921SPaul Mullowney nz = ai[i+1] - ai[i]; 298e057df02SPaul Mullowney /* additional 1 for the term on the diagonal */ 2999ae82921SPaul Mullowney AiLo[i] = rowOffset; 3009ae82921SPaul Mullowney rowOffset += nz+1; 3019ae82921SPaul Mullowney 3029ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjLo[offset]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 3039ae82921SPaul Mullowney ierr = PetscMemcpy(&(AALo[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 3049ae82921SPaul Mullowney 3059ae82921SPaul Mullowney offset += nz; 3069ae82921SPaul Mullowney AjLo[offset] = (PetscInt) i; 3079ae82921SPaul Mullowney AALo[offset] = (MatScalar) 1.0; 3089ae82921SPaul Mullowney offset += 1; 3099ae82921SPaul Mullowney 3109ae82921SPaul Mullowney v += nz; 3119ae82921SPaul Mullowney vi += nz; 3129ae82921SPaul Mullowney } 3132205254eSKarl Rupp 314aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 315aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 3162205254eSKarl Rupp 317aa372e3fSPaul Mullowney /* Create the matrix description */ 318c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); 319c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 320c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 321c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUDA(stat); 322c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); 323aa372e3fSPaul Mullowney 324aa372e3fSPaul Mullowney /* Create the solve analysis information */ 325c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); 326aa372e3fSPaul Mullowney 327aa372e3fSPaul Mullowney /* set the operation */ 328aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 329aa372e3fSPaul Mullowney 330aa372e3fSPaul Mullowney /* set the matrix */ 331aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 332aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = n; 333aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = n; 334aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = nzLower; 335aa372e3fSPaul Mullowney 336aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 337aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); 338aa372e3fSPaul Mullowney 339aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); 340aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); 341aa372e3fSPaul Mullowney 342aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); 343aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); 344aa372e3fSPaul Mullowney 345aa372e3fSPaul Mullowney /* perform the solve analysis */ 346aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 347aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 348aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 349c41cb2e2SAlejandro Lamas Daviña loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); 350aa372e3fSPaul Mullowney 351aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 352aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 3532205254eSKarl Rupp 354c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AiLo);CHKERRCUDA(ierr); 355c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AjLo);CHKERRCUDA(ierr); 356c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AALo);CHKERRCUDA(ierr); 3579ae82921SPaul Mullowney } catch(char *ex) { 3589ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 3599ae82921SPaul Mullowney } 3609ae82921SPaul Mullowney } 3619ae82921SPaul Mullowney PetscFunctionReturn(0); 3629ae82921SPaul Mullowney } 3639ae82921SPaul Mullowney 3649ae82921SPaul Mullowney #undef __FUNCT__ 365087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILUUpperTriMatrix" 366087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) 3679ae82921SPaul Mullowney { 3689ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 3699ae82921SPaul Mullowney PetscInt n = A->rmap->n; 3709ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 371aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 3729ae82921SPaul Mullowney cusparseStatus_t stat; 3739ae82921SPaul Mullowney const PetscInt *aj = a->j,*adiag = a->diag,*vi; 3749ae82921SPaul Mullowney const MatScalar *aa = a->a,*v; 3759ae82921SPaul Mullowney PetscInt *AiUp, *AjUp; 3769ae82921SPaul Mullowney PetscScalar *AAUp; 3779ae82921SPaul Mullowney PetscInt i,nz, nzUpper, offset; 3789ae82921SPaul Mullowney PetscErrorCode ierr; 3799ae82921SPaul Mullowney 3809ae82921SPaul Mullowney PetscFunctionBegin; 381c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 3829ae82921SPaul Mullowney try { 3839ae82921SPaul Mullowney /* next, figure out the number of nonzeros in the upper triangular matrix. */ 3849ae82921SPaul Mullowney nzUpper = adiag[0]-adiag[n]; 3859ae82921SPaul Mullowney 3869ae82921SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 387c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); 388c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); 389c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); 3909ae82921SPaul Mullowney 3919ae82921SPaul Mullowney /* Fill the upper triangular matrix */ 3929ae82921SPaul Mullowney AiUp[0]=(PetscInt) 0; 3939ae82921SPaul Mullowney AiUp[n]=nzUpper; 3949ae82921SPaul Mullowney offset = nzUpper; 3959ae82921SPaul Mullowney for (i=n-1; i>=0; i--) { 3969ae82921SPaul Mullowney v = aa + adiag[i+1] + 1; 3979ae82921SPaul Mullowney vi = aj + adiag[i+1] + 1; 3989ae82921SPaul Mullowney 399e057df02SPaul Mullowney /* number of elements NOT on the diagonal */ 4009ae82921SPaul Mullowney nz = adiag[i] - adiag[i+1]-1; 4019ae82921SPaul Mullowney 402e057df02SPaul Mullowney /* decrement the offset */ 4039ae82921SPaul Mullowney offset -= (nz+1); 4049ae82921SPaul Mullowney 405e057df02SPaul Mullowney /* first, set the diagonal elements */ 4069ae82921SPaul Mullowney AjUp[offset] = (PetscInt) i; 40709f51544SAlejandro Lamas Daviña AAUp[offset] = (MatScalar)1./v[nz]; 4089ae82921SPaul Mullowney AiUp[i] = AiUp[i+1] - (nz+1); 4099ae82921SPaul Mullowney 4109ae82921SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset+1]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); 4119ae82921SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset+1]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 4129ae82921SPaul Mullowney } 4132205254eSKarl Rupp 414aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 415aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 4162205254eSKarl Rupp 417aa372e3fSPaul Mullowney /* Create the matrix description */ 418c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); 419c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 420c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 421c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); 422c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); 423aa372e3fSPaul Mullowney 424aa372e3fSPaul Mullowney /* Create the solve analysis information */ 425c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); 426aa372e3fSPaul Mullowney 427aa372e3fSPaul Mullowney /* set the operation */ 428aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 429aa372e3fSPaul Mullowney 430aa372e3fSPaul Mullowney /* set the matrix */ 431aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 432aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = n; 433aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = n; 434aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = nzUpper; 435aa372e3fSPaul Mullowney 436aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); 437aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); 438aa372e3fSPaul Mullowney 439aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); 440aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); 441aa372e3fSPaul Mullowney 442aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); 443aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); 444aa372e3fSPaul Mullowney 445aa372e3fSPaul Mullowney /* perform the solve analysis */ 446aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 447aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 448aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 449c41cb2e2SAlejandro Lamas Daviña upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); 450aa372e3fSPaul Mullowney 451aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 452aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 4532205254eSKarl Rupp 454c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AiUp);CHKERRCUDA(ierr); 455c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AjUp);CHKERRCUDA(ierr); 456c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AAUp);CHKERRCUDA(ierr); 4579ae82921SPaul Mullowney } catch(char *ex) { 4589ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 4599ae82921SPaul Mullowney } 4609ae82921SPaul Mullowney } 4619ae82921SPaul Mullowney PetscFunctionReturn(0); 4629ae82921SPaul Mullowney } 4639ae82921SPaul Mullowney 4649ae82921SPaul Mullowney #undef __FUNCT__ 465087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU" 466087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) 4679ae82921SPaul Mullowney { 4689ae82921SPaul Mullowney PetscErrorCode ierr; 4699ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 4709ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 4719ae82921SPaul Mullowney IS isrow = a->row,iscol = a->icol; 4729ae82921SPaul Mullowney PetscBool row_identity,col_identity; 4739ae82921SPaul Mullowney const PetscInt *r,*c; 4749ae82921SPaul Mullowney PetscInt n = A->rmap->n; 4759ae82921SPaul Mullowney 4769ae82921SPaul Mullowney PetscFunctionBegin; 477087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); 478087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); 4792205254eSKarl Rupp 480aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 481aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 482aa372e3fSPaul Mullowney cusparseTriFactors->nnz=a->nz; 4839ae82921SPaul Mullowney 484c41cb2e2SAlejandro Lamas Daviña A->valid_GPU_matrix = PETSC_CUDA_BOTH; 485e057df02SPaul Mullowney /*lower triangular indices */ 4869ae82921SPaul Mullowney ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); 4879ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 4882205254eSKarl Rupp if (!row_identity) { 489aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 490aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(r, r+n); 4912205254eSKarl Rupp } 4929ae82921SPaul Mullowney ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); 4939ae82921SPaul Mullowney 494e057df02SPaul Mullowney /*upper triangular indices */ 4959ae82921SPaul Mullowney ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); 4969ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 4972205254eSKarl Rupp if (!col_identity) { 498aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 499aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(c, c+n); 5002205254eSKarl Rupp } 5019ae82921SPaul Mullowney ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); 5029ae82921SPaul Mullowney PetscFunctionReturn(0); 5039ae82921SPaul Mullowney } 5049ae82921SPaul Mullowney 5059ae82921SPaul Mullowney #undef __FUNCT__ 506087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEBuildICCTriMatrices" 507087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) 508087f3262SPaul Mullowney { 509087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 510087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 511aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 512aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 513087f3262SPaul Mullowney cusparseStatus_t stat; 514087f3262SPaul Mullowney PetscErrorCode ierr; 515087f3262SPaul Mullowney PetscInt *AiUp, *AjUp; 516087f3262SPaul Mullowney PetscScalar *AAUp; 517087f3262SPaul Mullowney PetscScalar *AALo; 518087f3262SPaul Mullowney PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; 519087f3262SPaul Mullowney Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; 520087f3262SPaul Mullowney const PetscInt *ai = b->i,*aj = b->j,*vj; 521087f3262SPaul Mullowney const MatScalar *aa = b->a,*v; 522087f3262SPaul Mullowney 523087f3262SPaul Mullowney PetscFunctionBegin; 524c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 525087f3262SPaul Mullowney try { 526087f3262SPaul Mullowney /* Allocate Space for the upper triangular matrix */ 527c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); 528c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); 529c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); 530c41cb2e2SAlejandro Lamas Daviña ierr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); 531087f3262SPaul Mullowney 532087f3262SPaul Mullowney /* Fill the upper triangular matrix */ 533087f3262SPaul Mullowney AiUp[0]=(PetscInt) 0; 534087f3262SPaul Mullowney AiUp[n]=nzUpper; 535087f3262SPaul Mullowney offset = 0; 536087f3262SPaul Mullowney for (i=0; i<n; i++) { 537087f3262SPaul Mullowney /* set the pointers */ 538087f3262SPaul Mullowney v = aa + ai[i]; 539087f3262SPaul Mullowney vj = aj + ai[i]; 540087f3262SPaul Mullowney nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ 541087f3262SPaul Mullowney 542087f3262SPaul Mullowney /* first, set the diagonal elements */ 543087f3262SPaul Mullowney AjUp[offset] = (PetscInt) i; 54409f51544SAlejandro Lamas Daviña AAUp[offset] = (MatScalar)1.0/v[nz]; 545087f3262SPaul Mullowney AiUp[i] = offset; 54609f51544SAlejandro Lamas Daviña AALo[offset] = (MatScalar)1.0/v[nz]; 547087f3262SPaul Mullowney 548087f3262SPaul Mullowney offset+=1; 549087f3262SPaul Mullowney if (nz>0) { 550087f3262SPaul Mullowney ierr = PetscMemcpy(&(AjUp[offset]), vj, nz*sizeof(PetscInt));CHKERRQ(ierr); 551087f3262SPaul Mullowney ierr = PetscMemcpy(&(AAUp[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); 552087f3262SPaul Mullowney for (j=offset; j<offset+nz; j++) { 553087f3262SPaul Mullowney AAUp[j] = -AAUp[j]; 554087f3262SPaul Mullowney AALo[j] = AAUp[j]/v[nz]; 555087f3262SPaul Mullowney } 556087f3262SPaul Mullowney offset+=nz; 557087f3262SPaul Mullowney } 558087f3262SPaul Mullowney } 559087f3262SPaul Mullowney 560aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 561aa372e3fSPaul Mullowney upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 562087f3262SPaul Mullowney 563aa372e3fSPaul Mullowney /* Create the matrix description */ 564c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); 565c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 566c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 567c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); 568c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); 569087f3262SPaul Mullowney 570aa372e3fSPaul Mullowney /* Create the solve analysis information */ 571c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); 572aa372e3fSPaul Mullowney 573aa372e3fSPaul Mullowney /* set the operation */ 574aa372e3fSPaul Mullowney upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 575aa372e3fSPaul Mullowney 576aa372e3fSPaul Mullowney /* set the matrix */ 577aa372e3fSPaul Mullowney upTriFactor->csrMat = new CsrMatrix; 578aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows = A->rmap->n; 579aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols = A->cmap->n; 580aa372e3fSPaul Mullowney upTriFactor->csrMat->num_entries = a->nz; 581aa372e3fSPaul Mullowney 582aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 583aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 584aa372e3fSPaul Mullowney 585aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 586aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 587aa372e3fSPaul Mullowney 588aa372e3fSPaul Mullowney upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 589aa372e3fSPaul Mullowney upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); 590aa372e3fSPaul Mullowney 591aa372e3fSPaul Mullowney /* perform the solve analysis */ 592aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, 593aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, 594aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), 595c41cb2e2SAlejandro Lamas Daviña upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); 596aa372e3fSPaul Mullowney 597aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 598aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; 599aa372e3fSPaul Mullowney 600aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 601aa372e3fSPaul Mullowney loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; 602aa372e3fSPaul Mullowney 603aa372e3fSPaul Mullowney /* Create the matrix description */ 604c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); 605c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 606c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); 607c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); 608c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); 609aa372e3fSPaul Mullowney 610aa372e3fSPaul Mullowney /* Create the solve analysis information */ 611c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); 612aa372e3fSPaul Mullowney 613aa372e3fSPaul Mullowney /* set the operation */ 614aa372e3fSPaul Mullowney loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; 615aa372e3fSPaul Mullowney 616aa372e3fSPaul Mullowney /* set the matrix */ 617aa372e3fSPaul Mullowney loTriFactor->csrMat = new CsrMatrix; 618aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows = A->rmap->n; 619aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols = A->cmap->n; 620aa372e3fSPaul Mullowney loTriFactor->csrMat->num_entries = a->nz; 621aa372e3fSPaul Mullowney 622aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 623aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); 624aa372e3fSPaul Mullowney 625aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); 626aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); 627aa372e3fSPaul Mullowney 628aa372e3fSPaul Mullowney loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); 629aa372e3fSPaul Mullowney loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); 630aa372e3fSPaul Mullowney 631aa372e3fSPaul Mullowney /* perform the solve analysis */ 632aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, 633aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, 634aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), 635c41cb2e2SAlejandro Lamas Daviña loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); 636aa372e3fSPaul Mullowney 637aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 638aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; 639087f3262SPaul Mullowney 640c41cb2e2SAlejandro Lamas Daviña A->valid_GPU_matrix = PETSC_CUDA_BOTH; 641c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AiUp);CHKERRCUDA(ierr); 642c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AjUp);CHKERRCUDA(ierr); 643c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AAUp);CHKERRCUDA(ierr); 644c41cb2e2SAlejandro Lamas Daviña ierr = cudaFreeHost(AALo);CHKERRCUDA(ierr); 645087f3262SPaul Mullowney } catch(char *ex) { 646087f3262SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 647087f3262SPaul Mullowney } 648087f3262SPaul Mullowney } 649087f3262SPaul Mullowney PetscFunctionReturn(0); 650087f3262SPaul Mullowney } 651087f3262SPaul Mullowney 652087f3262SPaul Mullowney #undef __FUNCT__ 653087f3262SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU" 654087f3262SPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) 6559ae82921SPaul Mullowney { 6569ae82921SPaul Mullowney PetscErrorCode ierr; 657087f3262SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 658087f3262SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 659087f3262SPaul Mullowney IS ip = a->row; 660087f3262SPaul Mullowney const PetscInt *rip; 661087f3262SPaul Mullowney PetscBool perm_identity; 662087f3262SPaul Mullowney PetscInt n = A->rmap->n; 663087f3262SPaul Mullowney 664087f3262SPaul Mullowney PetscFunctionBegin; 665087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); 666aa372e3fSPaul Mullowney cusparseTriFactors->workVector = new THRUSTARRAY; 667aa372e3fSPaul Mullowney cusparseTriFactors->workVector->resize(n); 668aa372e3fSPaul Mullowney cusparseTriFactors->nnz=(a->nz-n)*2 + n; 669aa372e3fSPaul Mullowney 670087f3262SPaul Mullowney /*lower triangular indices */ 671087f3262SPaul Mullowney ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); 672087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 673087f3262SPaul Mullowney if (!perm_identity) { 674aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); 675aa372e3fSPaul Mullowney cusparseTriFactors->rpermIndices->assign(rip, rip+n); 676aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); 677aa372e3fSPaul Mullowney cusparseTriFactors->cpermIndices->assign(rip, rip+n); 678087f3262SPaul Mullowney } 679087f3262SPaul Mullowney ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); 680087f3262SPaul Mullowney PetscFunctionReturn(0); 681087f3262SPaul Mullowney } 682087f3262SPaul Mullowney 683087f3262SPaul Mullowney #undef __FUNCT__ 6849ae82921SPaul Mullowney #define __FUNCT__ "MatLUFactorNumeric_SeqAIJCUSPARSE" 6856fa9248bSJed Brown static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 6869ae82921SPaul Mullowney { 6879ae82921SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 6889ae82921SPaul Mullowney IS isrow = b->row,iscol = b->col; 6899ae82921SPaul Mullowney PetscBool row_identity,col_identity; 690b175d8bbSPaul Mullowney PetscErrorCode ierr; 6919ae82921SPaul Mullowney 6929ae82921SPaul Mullowney PetscFunctionBegin; 6939ae82921SPaul Mullowney ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 694e057df02SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 6959ae82921SPaul Mullowney ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); 6969ae82921SPaul Mullowney ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); 697bda325fcSPaul Mullowney if (row_identity && col_identity) { 698bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 699bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 700bda325fcSPaul Mullowney } else { 701bda325fcSPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 702bda325fcSPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 703bda325fcSPaul Mullowney } 7048dc1d2a3SPaul Mullowney 705e057df02SPaul Mullowney /* get the triangular factors */ 706087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 7079ae82921SPaul Mullowney PetscFunctionReturn(0); 7089ae82921SPaul Mullowney } 7099ae82921SPaul Mullowney 710087f3262SPaul Mullowney #undef __FUNCT__ 711087f3262SPaul Mullowney #define __FUNCT__ "MatCholeskyFactorNumeric_SeqAIJCUSPARSE" 712087f3262SPaul Mullowney static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) 713087f3262SPaul Mullowney { 714087f3262SPaul Mullowney Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 715087f3262SPaul Mullowney IS ip = b->row; 716087f3262SPaul Mullowney PetscBool perm_identity; 717b175d8bbSPaul Mullowney PetscErrorCode ierr; 718087f3262SPaul Mullowney 719087f3262SPaul Mullowney PetscFunctionBegin; 720087f3262SPaul Mullowney ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); 721087f3262SPaul Mullowney 722087f3262SPaul Mullowney /* determine which version of MatSolve needs to be used. */ 723087f3262SPaul Mullowney ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); 724087f3262SPaul Mullowney if (perm_identity) { 725087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; 726087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; 727087f3262SPaul Mullowney } else { 728087f3262SPaul Mullowney B->ops->solve = MatSolve_SeqAIJCUSPARSE; 729087f3262SPaul Mullowney B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; 730087f3262SPaul Mullowney } 731087f3262SPaul Mullowney 732087f3262SPaul Mullowney /* get the triangular factors */ 733087f3262SPaul Mullowney ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); 734087f3262SPaul Mullowney PetscFunctionReturn(0); 735087f3262SPaul Mullowney } 7369ae82921SPaul Mullowney 737bda325fcSPaul Mullowney #undef __FUNCT__ 738bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEAnalyzeTransposeForSolve" 739b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) 740bda325fcSPaul Mullowney { 741bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 742aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 743aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 744aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 745aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 746bda325fcSPaul Mullowney cusparseStatus_t stat; 747aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 748aa372e3fSPaul Mullowney cusparseMatrixType_t matrixType; 749aa372e3fSPaul Mullowney cusparseFillMode_t fillMode; 750aa372e3fSPaul Mullowney cusparseDiagType_t diagType; 751b175d8bbSPaul Mullowney 752bda325fcSPaul Mullowney PetscFunctionBegin; 753bda325fcSPaul Mullowney 754aa372e3fSPaul Mullowney /*********************************************/ 755aa372e3fSPaul Mullowney /* Now the Transpose of the Lower Tri Factor */ 756aa372e3fSPaul Mullowney /*********************************************/ 757aa372e3fSPaul Mullowney 758aa372e3fSPaul Mullowney /* allocate space for the transpose of the lower triangular factor */ 759aa372e3fSPaul Mullowney loTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 760aa372e3fSPaul Mullowney 761aa372e3fSPaul Mullowney /* set the matrix descriptors of the lower triangular factor */ 762aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(loTriFactor->descr); 763aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(loTriFactor->descr); 764aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 765aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 766aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(loTriFactor->descr); 767aa372e3fSPaul Mullowney 768aa372e3fSPaul Mullowney /* Create the matrix description */ 769c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUDA(stat); 770c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUDA(stat); 771c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUDA(stat); 772c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUDA(stat); 773c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUDA(stat); 774aa372e3fSPaul Mullowney 775aa372e3fSPaul Mullowney /* Create the solve analysis information */ 776c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&loTriFactorT->solveInfo);CHKERRCUDA(stat); 777aa372e3fSPaul Mullowney 778aa372e3fSPaul Mullowney /* set the operation */ 779aa372e3fSPaul Mullowney loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 780aa372e3fSPaul Mullowney 781aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the lower triangular factor*/ 782aa372e3fSPaul Mullowney loTriFactorT->csrMat = new CsrMatrix; 783aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_rows; 784aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_cols; 785aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; 786aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactor->csrMat->num_rows+1); 787aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactor->csrMat->num_entries); 788aa372e3fSPaul Mullowney loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactor->csrMat->num_entries); 789aa372e3fSPaul Mullowney 790aa372e3fSPaul Mullowney /* compute the transpose of the lower triangular factor, i.e. the CSC */ 791aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, 792aa372e3fSPaul Mullowney loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, 793aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 794aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 795aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 796aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 797aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 798aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 799c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 800aa372e3fSPaul Mullowney 801aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 802aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, 803aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, 804aa372e3fSPaul Mullowney loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), 805aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), 806c41cb2e2SAlejandro Lamas Daviña loTriFactorT->solveInfo);CHKERRCUDA(stat); 807aa372e3fSPaul Mullowney 808aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 809aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; 810aa372e3fSPaul Mullowney 811aa372e3fSPaul Mullowney /*********************************************/ 812aa372e3fSPaul Mullowney /* Now the Transpose of the Upper Tri Factor */ 813aa372e3fSPaul Mullowney /*********************************************/ 814aa372e3fSPaul Mullowney 815aa372e3fSPaul Mullowney /* allocate space for the transpose of the upper triangular factor */ 816aa372e3fSPaul Mullowney upTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; 817aa372e3fSPaul Mullowney 818aa372e3fSPaul Mullowney /* set the matrix descriptors of the upper triangular factor */ 819aa372e3fSPaul Mullowney matrixType = cusparseGetMatType(upTriFactor->descr); 820aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(upTriFactor->descr); 821aa372e3fSPaul Mullowney fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? 822aa372e3fSPaul Mullowney CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; 823aa372e3fSPaul Mullowney diagType = cusparseGetMatDiagType(upTriFactor->descr); 824aa372e3fSPaul Mullowney 825aa372e3fSPaul Mullowney /* Create the matrix description */ 826c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUDA(stat); 827c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUDA(stat); 828c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUDA(stat); 829c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUDA(stat); 830c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUDA(stat); 831aa372e3fSPaul Mullowney 832aa372e3fSPaul Mullowney /* Create the solve analysis information */ 833c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateSolveAnalysisInfo(&upTriFactorT->solveInfo);CHKERRCUDA(stat); 834aa372e3fSPaul Mullowney 835aa372e3fSPaul Mullowney /* set the operation */ 836aa372e3fSPaul Mullowney upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; 837aa372e3fSPaul Mullowney 838aa372e3fSPaul Mullowney /* allocate GPU space for the CSC of the upper triangular factor*/ 839aa372e3fSPaul Mullowney upTriFactorT->csrMat = new CsrMatrix; 840aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_rows; 841aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_cols; 842aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; 843aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactor->csrMat->num_rows+1); 844aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactor->csrMat->num_entries); 845aa372e3fSPaul Mullowney upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactor->csrMat->num_entries); 846aa372e3fSPaul Mullowney 847aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 848aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, 849aa372e3fSPaul Mullowney upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, 850aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 851aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 852aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 853aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 854aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 855aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 856c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 857aa372e3fSPaul Mullowney 858aa372e3fSPaul Mullowney /* perform the solve analysis on the transposed matrix */ 859aa372e3fSPaul Mullowney stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, 860aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, 861aa372e3fSPaul Mullowney upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), 862aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), 863c41cb2e2SAlejandro Lamas Daviña upTriFactorT->solveInfo);CHKERRCUDA(stat); 864aa372e3fSPaul Mullowney 865aa372e3fSPaul Mullowney /* assign the pointer. Is this really necessary? */ 866aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; 867bda325fcSPaul Mullowney PetscFunctionReturn(0); 868bda325fcSPaul Mullowney } 869bda325fcSPaul Mullowney 870bda325fcSPaul Mullowney #undef __FUNCT__ 871bda325fcSPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSEGenerateTransposeForMult" 872b175d8bbSPaul Mullowney static PetscErrorCode MatSeqAIJCUSPARSEGenerateTransposeForMult(Mat A) 873bda325fcSPaul Mullowney { 874aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 875aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 876aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 877bda325fcSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 878bda325fcSPaul Mullowney cusparseStatus_t stat; 879aa372e3fSPaul Mullowney cusparseIndexBase_t indexBase; 880b06137fdSPaul Mullowney cudaError_t err; 881b175d8bbSPaul Mullowney 882bda325fcSPaul Mullowney PetscFunctionBegin; 883aa372e3fSPaul Mullowney 884aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 885aa372e3fSPaul Mullowney matstructT = new Mat_SeqAIJCUSPARSEMultStruct; 886c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUDA(stat); 887aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 888c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUDA(stat); 889c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); 890aa372e3fSPaul Mullowney 891b06137fdSPaul Mullowney /* set alpha and beta */ 892c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstructT->alpha),sizeof(PetscScalar));CHKERRCUDA(err); 893c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstructT->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 894c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstructT->beta),sizeof(PetscScalar));CHKERRCUDA(err); 895c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstructT->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 896c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); 897b06137fdSPaul Mullowney 898aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 899aa372e3fSPaul Mullowney CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; 900aa372e3fSPaul Mullowney CsrMatrix *matrixT= new CsrMatrix; 901aa372e3fSPaul Mullowney matrixT->num_rows = A->rmap->n; 902aa372e3fSPaul Mullowney matrixT->num_cols = A->cmap->n; 903aa372e3fSPaul Mullowney matrixT->num_entries = a->nz; 904aa372e3fSPaul Mullowney matrixT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 905aa372e3fSPaul Mullowney matrixT->column_indices = new THRUSTINTARRAY32(a->nz); 906aa372e3fSPaul Mullowney matrixT->values = new THRUSTARRAY(a->nz); 907aa372e3fSPaul Mullowney 908aa372e3fSPaul Mullowney /* compute the transpose of the upper triangular factor, i.e. the CSC */ 909aa372e3fSPaul Mullowney indexBase = cusparseGetMatIndexBase(matstruct->descr); 910aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, matrix->num_rows, 911aa372e3fSPaul Mullowney matrix->num_cols, matrix->num_entries, 912aa372e3fSPaul Mullowney matrix->values->data().get(), 913aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 914aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 915aa372e3fSPaul Mullowney matrixT->values->data().get(), 916aa372e3fSPaul Mullowney matrixT->column_indices->data().get(), 917aa372e3fSPaul Mullowney matrixT->row_offsets->data().get(), 918c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 919aa372e3fSPaul Mullowney 920aa372e3fSPaul Mullowney /* assign the pointer */ 921aa372e3fSPaul Mullowney matstructT->mat = matrixT; 922aa372e3fSPaul Mullowney 923aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 9242692e278SPaul Mullowney #if CUDA_VERSION>=5000 925aa372e3fSPaul Mullowney /* First convert HYB to CSR */ 926aa372e3fSPaul Mullowney CsrMatrix *temp= new CsrMatrix; 927aa372e3fSPaul Mullowney temp->num_rows = A->rmap->n; 928aa372e3fSPaul Mullowney temp->num_cols = A->cmap->n; 929aa372e3fSPaul Mullowney temp->num_entries = a->nz; 930aa372e3fSPaul Mullowney temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 931aa372e3fSPaul Mullowney temp->column_indices = new THRUSTINTARRAY32(a->nz); 932aa372e3fSPaul Mullowney temp->values = new THRUSTARRAY(a->nz); 933aa372e3fSPaul Mullowney 9342692e278SPaul Mullowney 935aa372e3fSPaul Mullowney stat = cusparse_hyb2csr(cusparsestruct->handle, 936aa372e3fSPaul Mullowney matstruct->descr, (cusparseHybMat_t)matstruct->mat, 937aa372e3fSPaul Mullowney temp->values->data().get(), 938aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 939c41cb2e2SAlejandro Lamas Daviña temp->column_indices->data().get());CHKERRCUDA(stat); 940aa372e3fSPaul Mullowney 941aa372e3fSPaul Mullowney /* Next, convert CSR to CSC (i.e. the matrix transpose) */ 942aa372e3fSPaul Mullowney CsrMatrix *tempT= new CsrMatrix; 943aa372e3fSPaul Mullowney tempT->num_rows = A->rmap->n; 944aa372e3fSPaul Mullowney tempT->num_cols = A->cmap->n; 945aa372e3fSPaul Mullowney tempT->num_entries = a->nz; 946aa372e3fSPaul Mullowney tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); 947aa372e3fSPaul Mullowney tempT->column_indices = new THRUSTINTARRAY32(a->nz); 948aa372e3fSPaul Mullowney tempT->values = new THRUSTARRAY(a->nz); 949aa372e3fSPaul Mullowney 950aa372e3fSPaul Mullowney stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, 951aa372e3fSPaul Mullowney temp->num_cols, temp->num_entries, 952aa372e3fSPaul Mullowney temp->values->data().get(), 953aa372e3fSPaul Mullowney temp->row_offsets->data().get(), 954aa372e3fSPaul Mullowney temp->column_indices->data().get(), 955aa372e3fSPaul Mullowney tempT->values->data().get(), 956aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 957aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 958c41cb2e2SAlejandro Lamas Daviña CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); 959aa372e3fSPaul Mullowney 960aa372e3fSPaul Mullowney /* Last, convert CSC to HYB */ 961aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 962c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); 963aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 964aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 965aa372e3fSPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, 966aa372e3fSPaul Mullowney matstructT->descr, tempT->values->data().get(), 967aa372e3fSPaul Mullowney tempT->row_offsets->data().get(), 968aa372e3fSPaul Mullowney tempT->column_indices->data().get(), 969c41cb2e2SAlejandro Lamas Daviña hybMat, 0, partition);CHKERRCUDA(stat); 970aa372e3fSPaul Mullowney 971aa372e3fSPaul Mullowney /* assign the pointer */ 972aa372e3fSPaul Mullowney matstructT->mat = hybMat; 973aa372e3fSPaul Mullowney 974aa372e3fSPaul Mullowney /* delete temporaries */ 975aa372e3fSPaul Mullowney if (tempT) { 976aa372e3fSPaul Mullowney if (tempT->values) delete (THRUSTARRAY*) tempT->values; 977aa372e3fSPaul Mullowney if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; 978aa372e3fSPaul Mullowney if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; 979aa372e3fSPaul Mullowney delete (CsrMatrix*) tempT; 980087f3262SPaul Mullowney } 981aa372e3fSPaul Mullowney if (temp) { 982aa372e3fSPaul Mullowney if (temp->values) delete (THRUSTARRAY*) temp->values; 983aa372e3fSPaul Mullowney if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; 984aa372e3fSPaul Mullowney if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; 985aa372e3fSPaul Mullowney delete (CsrMatrix*) temp; 986aa372e3fSPaul Mullowney } 9872692e278SPaul Mullowney #else 9882692e278SPaul Mullowney SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format for the Matrix Transpose (in MatMultTranspose) require CUDA 5.0 or later."); 9892692e278SPaul Mullowney #endif 990aa372e3fSPaul Mullowney } 991aa372e3fSPaul Mullowney /* assign the compressed row indices */ 992aa372e3fSPaul Mullowney matstructT->cprowIndices = new THRUSTINTARRAY; 993aa372e3fSPaul Mullowney 994aa372e3fSPaul Mullowney /* assign the pointer */ 995aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; 996bda325fcSPaul Mullowney PetscFunctionReturn(0); 997bda325fcSPaul Mullowney } 998bda325fcSPaul Mullowney 999bda325fcSPaul Mullowney #undef __FUNCT__ 1000bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE" 10016fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 1002bda325fcSPaul Mullowney { 1003c41cb2e2SAlejandro Lamas Daviña PetscInt n = xx->map->n; 1004465f34aeSAlejandro Lamas Daviña const PetscScalar *barray; 1005465f34aeSAlejandro Lamas Daviña PetscScalar *xarray; 1006465f34aeSAlejandro Lamas Daviña thrust::device_ptr<const PetscScalar> bGPU; 1007465f34aeSAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> xGPU; 1008bda325fcSPaul Mullowney cusparseStatus_t stat; 1009bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1010aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1011aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1012aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1013b175d8bbSPaul Mullowney PetscErrorCode ierr; 1014bda325fcSPaul Mullowney 1015bda325fcSPaul Mullowney PetscFunctionBegin; 1016aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1017aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1018bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1019aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1020aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1021bda325fcSPaul Mullowney } 1022bda325fcSPaul Mullowney 1023bda325fcSPaul Mullowney /* Get the GPU pointers */ 1024c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1025c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 1026c41cb2e2SAlejandro Lamas Daviña xGPU = thrust::device_pointer_cast(xarray); 1027c41cb2e2SAlejandro Lamas Daviña bGPU = thrust::device_pointer_cast(barray); 1028bda325fcSPaul Mullowney 1029aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1030c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), 1031c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()), 1032c41cb2e2SAlejandro Lamas Daviña xGPU); 1033aa372e3fSPaul Mullowney 1034aa372e3fSPaul Mullowney /* First, solve U */ 1035aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1036aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1037aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1038aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1039aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1040aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1041c41cb2e2SAlejandro Lamas Daviña xarray, tempGPU->data().get());CHKERRCUDA(stat); 1042aa372e3fSPaul Mullowney 1043aa372e3fSPaul Mullowney /* Then, solve L */ 1044aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1045aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1046aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1047aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1048aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1049aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1050c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 1051aa372e3fSPaul Mullowney 1052aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1053c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), 1054c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()), 1055aa372e3fSPaul Mullowney tempGPU->begin()); 1056aa372e3fSPaul Mullowney 1057aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1058c41cb2e2SAlejandro Lamas Daviña thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); 1059bda325fcSPaul Mullowney 1060bda325fcSPaul Mullowney /* restore */ 1061c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1062c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1063c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1064087f3262SPaul Mullowney 1065aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1066bda325fcSPaul Mullowney PetscFunctionReturn(0); 1067bda325fcSPaul Mullowney } 1068bda325fcSPaul Mullowney 1069bda325fcSPaul Mullowney #undef __FUNCT__ 1070bda325fcSPaul Mullowney #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering" 10716fa9248bSJed Brown static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 1072bda325fcSPaul Mullowney { 1073465f34aeSAlejandro Lamas Daviña const PetscScalar *barray; 1074465f34aeSAlejandro Lamas Daviña PetscScalar *xarray; 1075bda325fcSPaul Mullowney cusparseStatus_t stat; 1076bda325fcSPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1077aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1078aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1079aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1080b175d8bbSPaul Mullowney PetscErrorCode ierr; 1081bda325fcSPaul Mullowney 1082bda325fcSPaul Mullowney PetscFunctionBegin; 1083aa372e3fSPaul Mullowney /* Analyze the matrix and create the transpose ... on the fly */ 1084aa372e3fSPaul Mullowney if (!loTriFactorT && !upTriFactorT) { 1085bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); 1086aa372e3fSPaul Mullowney loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; 1087aa372e3fSPaul Mullowney upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; 1088bda325fcSPaul Mullowney } 1089bda325fcSPaul Mullowney 1090bda325fcSPaul Mullowney /* Get the GPU pointers */ 1091c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1092c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 1093bda325fcSPaul Mullowney 1094aa372e3fSPaul Mullowney /* First, solve U */ 1095aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, 1096aa372e3fSPaul Mullowney upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, 1097aa372e3fSPaul Mullowney upTriFactorT->csrMat->values->data().get(), 1098aa372e3fSPaul Mullowney upTriFactorT->csrMat->row_offsets->data().get(), 1099aa372e3fSPaul Mullowney upTriFactorT->csrMat->column_indices->data().get(), 1100aa372e3fSPaul Mullowney upTriFactorT->solveInfo, 1101c41cb2e2SAlejandro Lamas Daviña barray, tempGPU->data().get());CHKERRCUDA(stat); 1102aa372e3fSPaul Mullowney 1103aa372e3fSPaul Mullowney /* Then, solve L */ 1104aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, 1105aa372e3fSPaul Mullowney loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, 1106aa372e3fSPaul Mullowney loTriFactorT->csrMat->values->data().get(), 1107aa372e3fSPaul Mullowney loTriFactorT->csrMat->row_offsets->data().get(), 1108aa372e3fSPaul Mullowney loTriFactorT->csrMat->column_indices->data().get(), 1109aa372e3fSPaul Mullowney loTriFactorT->solveInfo, 1110c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 1111bda325fcSPaul Mullowney 1112bda325fcSPaul Mullowney /* restore */ 1113c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1114c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1115c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1116aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 1117bda325fcSPaul Mullowney PetscFunctionReturn(0); 1118bda325fcSPaul Mullowney } 1119bda325fcSPaul Mullowney 11209ae82921SPaul Mullowney #undef __FUNCT__ 11219ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE" 11226fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) 11239ae82921SPaul Mullowney { 1124465f34aeSAlejandro Lamas Daviña const PetscScalar *barray; 1125465f34aeSAlejandro Lamas Daviña PetscScalar *xarray; 1126465f34aeSAlejandro Lamas Daviña thrust::device_ptr<const PetscScalar> bGPU; 1127465f34aeSAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> xGPU; 11289ae82921SPaul Mullowney cusparseStatus_t stat; 11299ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1130aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1131aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1132aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1133b175d8bbSPaul Mullowney PetscErrorCode ierr; 1134ebc8f436SDominic Meiser VecType t; 1135ebc8f436SDominic Meiser PetscBool flg; 11369ae82921SPaul Mullowney 11379ae82921SPaul Mullowney PetscFunctionBegin; 1138ebc8f436SDominic Meiser ierr = VecGetType(bb,&t);CHKERRQ(ierr); 1139c41cb2e2SAlejandro Lamas Daviña ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); 1140c41cb2e2SAlejandro Lamas Daviña if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #2). Can only deal with %s\n.",t,VECSEQCUDA); 1141ebc8f436SDominic Meiser ierr = VecGetType(xx,&t);CHKERRQ(ierr); 1142c41cb2e2SAlejandro Lamas Daviña ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); 1143c41cb2e2SAlejandro Lamas Daviña if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #3). Can only deal with %s\n.",t,VECSEQCUDA); 1144ebc8f436SDominic Meiser 1145e057df02SPaul Mullowney /* Get the GPU pointers */ 1146c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1147c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 1148c41cb2e2SAlejandro Lamas Daviña xGPU = thrust::device_pointer_cast(xarray); 1149c41cb2e2SAlejandro Lamas Daviña bGPU = thrust::device_pointer_cast(barray); 11509ae82921SPaul Mullowney 1151aa372e3fSPaul Mullowney /* First, reorder with the row permutation */ 1152c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), 1153c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), 1154c41cb2e2SAlejandro Lamas Daviña xGPU); 1155aa372e3fSPaul Mullowney 1156aa372e3fSPaul Mullowney /* Next, solve L */ 1157aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1158aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1159aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1160aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1161aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1162aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1163c41cb2e2SAlejandro Lamas Daviña xarray, tempGPU->data().get());CHKERRCUDA(stat); 1164aa372e3fSPaul Mullowney 1165aa372e3fSPaul Mullowney /* Then, solve U */ 1166aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1167aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1168aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1169aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1170aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1171aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1172c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 1173aa372e3fSPaul Mullowney 1174aa372e3fSPaul Mullowney /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ 1175c41cb2e2SAlejandro Lamas Daviña thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), 1176c41cb2e2SAlejandro Lamas Daviña thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->end()), 1177aa372e3fSPaul Mullowney tempGPU->begin()); 1178aa372e3fSPaul Mullowney 1179aa372e3fSPaul Mullowney /* Copy the temporary to the full solution. */ 1180c41cb2e2SAlejandro Lamas Daviña thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); 11819ae82921SPaul Mullowney 1182c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1183c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1184c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1185aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 11869ae82921SPaul Mullowney PetscFunctionReturn(0); 11879ae82921SPaul Mullowney } 11889ae82921SPaul Mullowney 11899ae82921SPaul Mullowney #undef __FUNCT__ 11909ae82921SPaul Mullowney #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE_NaturalOrdering" 11916fa9248bSJed Brown static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) 11929ae82921SPaul Mullowney { 1193465f34aeSAlejandro Lamas Daviña const PetscScalar *barray; 1194465f34aeSAlejandro Lamas Daviña PetscScalar *xarray; 11959ae82921SPaul Mullowney cusparseStatus_t stat; 11969ae82921SPaul Mullowney Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; 1197aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; 1198aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; 1199aa372e3fSPaul Mullowney THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; 1200b175d8bbSPaul Mullowney PetscErrorCode ierr; 12019ae82921SPaul Mullowney 12029ae82921SPaul Mullowney PetscFunctionBegin; 1203e057df02SPaul Mullowney /* Get the GPU pointers */ 1204c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); 1205c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); 12069ae82921SPaul Mullowney 1207aa372e3fSPaul Mullowney /* First, solve L */ 1208aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, 1209aa372e3fSPaul Mullowney loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, 1210aa372e3fSPaul Mullowney loTriFactor->csrMat->values->data().get(), 1211aa372e3fSPaul Mullowney loTriFactor->csrMat->row_offsets->data().get(), 1212aa372e3fSPaul Mullowney loTriFactor->csrMat->column_indices->data().get(), 1213aa372e3fSPaul Mullowney loTriFactor->solveInfo, 1214c41cb2e2SAlejandro Lamas Daviña barray, tempGPU->data().get());CHKERRCUDA(stat); 1215aa372e3fSPaul Mullowney 1216aa372e3fSPaul Mullowney /* Next, solve U */ 1217aa372e3fSPaul Mullowney stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, 1218aa372e3fSPaul Mullowney upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, 1219aa372e3fSPaul Mullowney upTriFactor->csrMat->values->data().get(), 1220aa372e3fSPaul Mullowney upTriFactor->csrMat->row_offsets->data().get(), 1221aa372e3fSPaul Mullowney upTriFactor->csrMat->column_indices->data().get(), 1222aa372e3fSPaul Mullowney upTriFactor->solveInfo, 1223c41cb2e2SAlejandro Lamas Daviña tempGPU->data().get(), xarray);CHKERRCUDA(stat); 12249ae82921SPaul Mullowney 1225c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); 1226c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); 1227c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1228aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); 12299ae82921SPaul Mullowney PetscFunctionReturn(0); 12309ae82921SPaul Mullowney } 12319ae82921SPaul Mullowney 12329ae82921SPaul Mullowney #undef __FUNCT__ 1233e057df02SPaul Mullowney #define __FUNCT__ "MatSeqAIJCUSPARSECopyToGPU" 12346fa9248bSJed Brown static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) 12359ae82921SPaul Mullowney { 12369ae82921SPaul Mullowney 1237aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1238aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 12399ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 12409ae82921SPaul Mullowney PetscInt m = A->rmap->n,*ii,*ridx; 12419ae82921SPaul Mullowney PetscErrorCode ierr; 1242aa372e3fSPaul Mullowney cusparseStatus_t stat; 1243b06137fdSPaul Mullowney cudaError_t err; 12449ae82921SPaul Mullowney 12459ae82921SPaul Mullowney PetscFunctionBegin; 1246c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { 12479ae82921SPaul Mullowney ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 1248*34d6c7a5SJose E. Roman if (A->assembled && A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { 1249*34d6c7a5SJose E. Roman CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; 1250*34d6c7a5SJose E. Roman /* copy values only */ 1251*34d6c7a5SJose E. Roman matrix->values->assign(a->a, a->a+a->nz); 1252*34d6c7a5SJose E. Roman } else { 1253ce814652SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&matstruct,cusparsestruct->format); 12549ae82921SPaul Mullowney try { 1255aa372e3fSPaul Mullowney cusparsestruct->nonzerorow=0; 1256aa372e3fSPaul Mullowney for (int j = 0; j<m; j++) cusparsestruct->nonzerorow += ((a->i[j+1]-a->i[j])>0); 12579ae82921SPaul Mullowney 12589ae82921SPaul Mullowney if (a->compressedrow.use) { 12599ae82921SPaul Mullowney m = a->compressedrow.nrows; 12609ae82921SPaul Mullowney ii = a->compressedrow.i; 12619ae82921SPaul Mullowney ridx = a->compressedrow.rindex; 12629ae82921SPaul Mullowney } else { 1263b06137fdSPaul Mullowney /* Forcing compressed row on the GPU */ 12649ae82921SPaul Mullowney int k=0; 1265854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow+1, &ii);CHKERRQ(ierr); 1266854ce69bSBarry Smith ierr = PetscMalloc1(cusparsestruct->nonzerorow, &ridx);CHKERRQ(ierr); 12679ae82921SPaul Mullowney ii[0]=0; 12689ae82921SPaul Mullowney for (int j = 0; j<m; j++) { 12699ae82921SPaul Mullowney if ((a->i[j+1]-a->i[j])>0) { 12709ae82921SPaul Mullowney ii[k] = a->i[j]; 12719ae82921SPaul Mullowney ridx[k]= j; 12729ae82921SPaul Mullowney k++; 12739ae82921SPaul Mullowney } 12749ae82921SPaul Mullowney } 1275aa372e3fSPaul Mullowney ii[cusparsestruct->nonzerorow] = a->nz; 1276aa372e3fSPaul Mullowney m = cusparsestruct->nonzerorow; 12779ae82921SPaul Mullowney } 12789ae82921SPaul Mullowney 1279aa372e3fSPaul Mullowney /* allocate space for the triangular factor information */ 1280aa372e3fSPaul Mullowney matstruct = new Mat_SeqAIJCUSPARSEMultStruct; 1281c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUDA(stat); 1282c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); 1283c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); 12849ae82921SPaul Mullowney 1285c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstruct->alpha),sizeof(PetscScalar));CHKERRCUDA(err); 1286c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstruct->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 1287c41cb2e2SAlejandro Lamas Daviña err = cudaMalloc((void **)&(matstruct->beta),sizeof(PetscScalar));CHKERRCUDA(err); 1288c41cb2e2SAlejandro Lamas Daviña err = cudaMemcpy(matstruct->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); 1289c41cb2e2SAlejandro Lamas Daviña stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); 1290b06137fdSPaul Mullowney 1291aa372e3fSPaul Mullowney /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ 1292aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1293aa372e3fSPaul Mullowney /* set the matrix */ 1294aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1295a65300a6SPaul Mullowney matrix->num_rows = m; 1296aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1297aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1298a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1299a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 13009ae82921SPaul Mullowney 1301aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1302aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1303aa372e3fSPaul Mullowney 1304aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1305aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1306aa372e3fSPaul Mullowney 1307aa372e3fSPaul Mullowney /* assign the pointer */ 1308aa372e3fSPaul Mullowney matstruct->mat = matrix; 1309aa372e3fSPaul Mullowney 1310aa372e3fSPaul Mullowney } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { 13112692e278SPaul Mullowney #if CUDA_VERSION>=4020 1312aa372e3fSPaul Mullowney CsrMatrix *matrix= new CsrMatrix; 1313a65300a6SPaul Mullowney matrix->num_rows = m; 1314aa372e3fSPaul Mullowney matrix->num_cols = A->cmap->n; 1315aa372e3fSPaul Mullowney matrix->num_entries = a->nz; 1316a65300a6SPaul Mullowney matrix->row_offsets = new THRUSTINTARRAY32(m+1); 1317a65300a6SPaul Mullowney matrix->row_offsets->assign(ii, ii + m+1); 1318aa372e3fSPaul Mullowney 1319aa372e3fSPaul Mullowney matrix->column_indices = new THRUSTINTARRAY32(a->nz); 1320aa372e3fSPaul Mullowney matrix->column_indices->assign(a->j, a->j+a->nz); 1321aa372e3fSPaul Mullowney 1322aa372e3fSPaul Mullowney matrix->values = new THRUSTARRAY(a->nz); 1323aa372e3fSPaul Mullowney matrix->values->assign(a->a, a->a+a->nz); 1324aa372e3fSPaul Mullowney 1325aa372e3fSPaul Mullowney cusparseHybMat_t hybMat; 1326c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); 1327aa372e3fSPaul Mullowney cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? 1328aa372e3fSPaul Mullowney CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; 1329a65300a6SPaul Mullowney stat = cusparse_csr2hyb(cusparsestruct->handle, matrix->num_rows, matrix->num_cols, 1330aa372e3fSPaul Mullowney matstruct->descr, matrix->values->data().get(), 1331aa372e3fSPaul Mullowney matrix->row_offsets->data().get(), 1332aa372e3fSPaul Mullowney matrix->column_indices->data().get(), 1333c41cb2e2SAlejandro Lamas Daviña hybMat, 0, partition);CHKERRCUDA(stat); 1334aa372e3fSPaul Mullowney /* assign the pointer */ 1335aa372e3fSPaul Mullowney matstruct->mat = hybMat; 1336aa372e3fSPaul Mullowney 1337aa372e3fSPaul Mullowney if (matrix) { 1338aa372e3fSPaul Mullowney if (matrix->values) delete (THRUSTARRAY*)matrix->values; 1339aa372e3fSPaul Mullowney if (matrix->column_indices) delete (THRUSTINTARRAY32*)matrix->column_indices; 1340aa372e3fSPaul Mullowney if (matrix->row_offsets) delete (THRUSTINTARRAY32*)matrix->row_offsets; 1341aa372e3fSPaul Mullowney delete (CsrMatrix*)matrix; 1342087f3262SPaul Mullowney } 13432692e278SPaul Mullowney #endif 1344087f3262SPaul Mullowney } 1345ca45077fSPaul Mullowney 1346aa372e3fSPaul Mullowney /* assign the compressed row indices */ 1347aa372e3fSPaul Mullowney matstruct->cprowIndices = new THRUSTINTARRAY(m); 1348aa372e3fSPaul Mullowney matstruct->cprowIndices->assign(ridx,ridx+m); 1349aa372e3fSPaul Mullowney 1350aa372e3fSPaul Mullowney /* assign the pointer */ 1351aa372e3fSPaul Mullowney cusparsestruct->mat = matstruct; 1352aa372e3fSPaul Mullowney 13539ae82921SPaul Mullowney if (!a->compressedrow.use) { 13549ae82921SPaul Mullowney ierr = PetscFree(ii);CHKERRQ(ierr); 13559ae82921SPaul Mullowney ierr = PetscFree(ridx);CHKERRQ(ierr); 13569ae82921SPaul Mullowney } 1357aa372e3fSPaul Mullowney cusparsestruct->workVector = new THRUSTARRAY; 1358aa372e3fSPaul Mullowney cusparsestruct->workVector->resize(m); 13599ae82921SPaul Mullowney } catch(char *ex) { 13609ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 13619ae82921SPaul Mullowney } 1362c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 13632205254eSKarl Rupp 1364c41cb2e2SAlejandro Lamas Daviña A->valid_GPU_matrix = PETSC_CUDA_BOTH; 1365*34d6c7a5SJose E. Roman cusparsestruct->nonzerostate = A->nonzerostate; 1366*34d6c7a5SJose E. Roman } 13679ae82921SPaul Mullowney ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); 13689ae82921SPaul Mullowney } 13699ae82921SPaul Mullowney PetscFunctionReturn(0); 13709ae82921SPaul Mullowney } 13719ae82921SPaul Mullowney 13729ae82921SPaul Mullowney #undef __FUNCT__ 13732a7a6963SBarry Smith #define __FUNCT__ "MatCreateVecs_SeqAIJCUSPARSE" 13742a7a6963SBarry Smith static PetscErrorCode MatCreateVecs_SeqAIJCUSPARSE(Mat mat, Vec *right, Vec *left) 13759ae82921SPaul Mullowney { 13769ae82921SPaul Mullowney PetscErrorCode ierr; 137733d57670SJed Brown PetscInt rbs,cbs; 13789ae82921SPaul Mullowney 13799ae82921SPaul Mullowney PetscFunctionBegin; 138033d57670SJed Brown ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr); 13819ae82921SPaul Mullowney if (right) { 1382ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr); 13839ae82921SPaul Mullowney ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 138433d57670SJed Brown ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr); 1385c41cb2e2SAlejandro Lamas Daviña ierr = VecSetType(*right,VECSEQCUDA);CHKERRQ(ierr); 13869ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->cmap,&(*right)->map);CHKERRQ(ierr); 13879ae82921SPaul Mullowney } 13889ae82921SPaul Mullowney if (left) { 1389ce94432eSBarry Smith ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr); 13909ae82921SPaul Mullowney ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr); 139133d57670SJed Brown ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr); 1392c41cb2e2SAlejandro Lamas Daviña ierr = VecSetType(*left,VECSEQCUDA);CHKERRQ(ierr); 13939ae82921SPaul Mullowney ierr = PetscLayoutReference(mat->rmap,&(*left)->map);CHKERRQ(ierr); 13949ae82921SPaul Mullowney } 13959ae82921SPaul Mullowney PetscFunctionReturn(0); 13969ae82921SPaul Mullowney } 13979ae82921SPaul Mullowney 1398c41cb2e2SAlejandro Lamas Daviña struct VecCUDAPlusEquals 1399aa372e3fSPaul Mullowney { 1400aa372e3fSPaul Mullowney template <typename Tuple> 1401aa372e3fSPaul Mullowney __host__ __device__ 1402aa372e3fSPaul Mullowney void operator()(Tuple t) 1403aa372e3fSPaul Mullowney { 1404aa372e3fSPaul Mullowney thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); 1405aa372e3fSPaul Mullowney } 1406aa372e3fSPaul Mullowney }; 1407aa372e3fSPaul Mullowney 14089ae82921SPaul Mullowney #undef __FUNCT__ 14099ae82921SPaul Mullowney #define __FUNCT__ "MatMult_SeqAIJCUSPARSE" 14106fa9248bSJed Brown static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 14119ae82921SPaul Mullowney { 14129ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1413aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1414aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1415465f34aeSAlejandro Lamas Daviña const PetscScalar *xarray; 1416465f34aeSAlejandro Lamas Daviña PetscScalar *yarray; 1417b175d8bbSPaul Mullowney PetscErrorCode ierr; 1418aa372e3fSPaul Mullowney cusparseStatus_t stat; 14199ae82921SPaul Mullowney 14209ae82921SPaul Mullowney PetscFunctionBegin; 1421*34d6c7a5SJose E. Roman /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ 1422*34d6c7a5SJose E. Roman ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); 1423c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1424c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1425aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1426aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1427aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1428aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, mat->num_entries, 1429b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), 1430c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstruct->beta, 1431c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 1432aa372e3fSPaul Mullowney } else { 14332692e278SPaul Mullowney #if CUDA_VERSION>=4020 1434aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1435aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1436b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1437c41cb2e2SAlejandro Lamas Daviña xarray, matstruct->beta, 1438c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 14392692e278SPaul Mullowney #endif 14409ae82921SPaul Mullowney } 1441c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1442c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1443aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 1444c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1445ca45077fSPaul Mullowney } 1446aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 14479ae82921SPaul Mullowney PetscFunctionReturn(0); 14489ae82921SPaul Mullowney } 14499ae82921SPaul Mullowney 14509ae82921SPaul Mullowney #undef __FUNCT__ 1451ca45077fSPaul Mullowney #define __FUNCT__ "MatMultTranspose_SeqAIJCUSPARSE" 14526fa9248bSJed Brown static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) 1453ca45077fSPaul Mullowney { 1454ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1455aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1456aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1457465f34aeSAlejandro Lamas Daviña const PetscScalar *xarray; 1458465f34aeSAlejandro Lamas Daviña PetscScalar *yarray; 1459b175d8bbSPaul Mullowney PetscErrorCode ierr; 1460aa372e3fSPaul Mullowney cusparseStatus_t stat; 1461ca45077fSPaul Mullowney 1462ca45077fSPaul Mullowney PetscFunctionBegin; 1463*34d6c7a5SJose E. Roman /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ 1464*34d6c7a5SJose E. Roman ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); 1465aa372e3fSPaul Mullowney if (!matstructT) { 1466bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1467aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1468bda325fcSPaul Mullowney } 1469c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1470c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); 1471aa372e3fSPaul Mullowney 1472aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1473aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1474aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1475aa372e3fSPaul Mullowney mat->num_rows, mat->num_cols, 1476b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1477aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1478c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstructT->beta, 1479c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 1480aa372e3fSPaul Mullowney } else { 14812692e278SPaul Mullowney #if CUDA_VERSION>=4020 1482aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1483aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1484b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1485c41cb2e2SAlejandro Lamas Daviña xarray, matstructT->beta, 1486c41cb2e2SAlejandro Lamas Daviña yarray);CHKERRCUDA(stat); 14872692e278SPaul Mullowney #endif 1488ca45077fSPaul Mullowney } 1489c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1490c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); 1491aa372e3fSPaul Mullowney if (!cusparsestruct->stream) { 1492c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1493ca45077fSPaul Mullowney } 1494aa372e3fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); 1495ca45077fSPaul Mullowney PetscFunctionReturn(0); 1496ca45077fSPaul Mullowney } 1497ca45077fSPaul Mullowney 1498aa372e3fSPaul Mullowney 1499ca45077fSPaul Mullowney #undef __FUNCT__ 15009ae82921SPaul Mullowney #define __FUNCT__ "MatMultAdd_SeqAIJCUSPARSE" 15016fa9248bSJed Brown static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 15029ae82921SPaul Mullowney { 15039ae82921SPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1504aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1505aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; 1506c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> zptr; 1507465f34aeSAlejandro Lamas Daviña const PetscScalar *xarray; 1508465f34aeSAlejandro Lamas Daviña PetscScalar *zarray; 1509b175d8bbSPaul Mullowney PetscErrorCode ierr; 1510aa372e3fSPaul Mullowney cusparseStatus_t stat; 15116e111a19SKarl Rupp 15129ae82921SPaul Mullowney PetscFunctionBegin; 1513*34d6c7a5SJose E. Roman /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ 1514*34d6c7a5SJose E. Roman ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); 15159ae82921SPaul Mullowney try { 1516c41cb2e2SAlejandro Lamas Daviña ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); 1517c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1518*34d6c7a5SJose E. Roman ierr = VecCUDAGetArrayReadWrite(zz,&zarray);CHKERRQ(ierr); 1519c41cb2e2SAlejandro Lamas Daviña zptr = thrust::device_pointer_cast(zarray); 15209ae82921SPaul Mullowney 1521e057df02SPaul Mullowney /* multiply add */ 1522aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1523aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstruct->mat; 1524b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1525b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1526b06137fdSPaul Mullowney size of the workVector */ 1527aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1528a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1529b06137fdSPaul Mullowney mat->num_entries, matstruct->alpha, matstruct->descr, 1530aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1531c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstruct->beta, 1532c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1533aa372e3fSPaul Mullowney } else { 15342692e278SPaul Mullowney #if CUDA_VERSION>=4020 1535aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; 1536a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1537aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1538b06137fdSPaul Mullowney matstruct->alpha, matstruct->descr, hybMat, 1539c41cb2e2SAlejandro Lamas Daviña xarray, matstruct->beta, 1540c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1541a65300a6SPaul Mullowney } 15422692e278SPaul Mullowney #endif 1543aa372e3fSPaul Mullowney } 1544aa372e3fSPaul Mullowney 1545aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1546c41cb2e2SAlejandro Lamas Daviña thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), 1547c41cb2e2SAlejandro Lamas Daviña thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1548c41cb2e2SAlejandro Lamas Daviña VecCUDAPlusEquals()); 1549c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1550*34d6c7a5SJose E. Roman ierr = VecCUDARestoreArrayReadWrite(zz,&zarray);CHKERRQ(ierr); 15519ae82921SPaul Mullowney 15529ae82921SPaul Mullowney } catch(char *ex) { 15539ae82921SPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 15549ae82921SPaul Mullowney } 1555c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 15569ae82921SPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 15579ae82921SPaul Mullowney PetscFunctionReturn(0); 15589ae82921SPaul Mullowney } 15599ae82921SPaul Mullowney 15609ae82921SPaul Mullowney #undef __FUNCT__ 1561b175d8bbSPaul Mullowney #define __FUNCT__ "MatMultTransposeAdd_SeqAIJCUSPARSE" 15626fa9248bSJed Brown static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) 1563ca45077fSPaul Mullowney { 1564ca45077fSPaul Mullowney Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 1565aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; 1566aa372e3fSPaul Mullowney Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1567c41cb2e2SAlejandro Lamas Daviña thrust::device_ptr<PetscScalar> zptr; 1568465f34aeSAlejandro Lamas Daviña const PetscScalar *xarray; 1569465f34aeSAlejandro Lamas Daviña PetscScalar *zarray; 1570b175d8bbSPaul Mullowney PetscErrorCode ierr; 1571aa372e3fSPaul Mullowney cusparseStatus_t stat; 15726e111a19SKarl Rupp 1573ca45077fSPaul Mullowney PetscFunctionBegin; 1574*34d6c7a5SJose E. Roman /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ 1575*34d6c7a5SJose E. Roman ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); 1576aa372e3fSPaul Mullowney if (!matstructT) { 1577bda325fcSPaul Mullowney ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); 1578aa372e3fSPaul Mullowney matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; 1579bda325fcSPaul Mullowney } 1580aa372e3fSPaul Mullowney 1581ca45077fSPaul Mullowney try { 1582c41cb2e2SAlejandro Lamas Daviña ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); 1583c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); 1584*34d6c7a5SJose E. Roman ierr = VecCUDAGetArrayReadWrite(zz,&zarray);CHKERRQ(ierr); 1585c41cb2e2SAlejandro Lamas Daviña zptr = thrust::device_pointer_cast(zarray); 1586ca45077fSPaul Mullowney 1587e057df02SPaul Mullowney /* multiply add with matrix transpose */ 1588aa372e3fSPaul Mullowney if (cusparsestruct->format==MAT_CUSPARSE_CSR) { 1589aa372e3fSPaul Mullowney CsrMatrix *mat = (CsrMatrix*)matstructT->mat; 1590b06137fdSPaul Mullowney /* here we need to be careful to set the number of rows in the multiply to the 1591b06137fdSPaul Mullowney number of compressed rows in the matrix ... which is equivalent to the 1592b06137fdSPaul Mullowney size of the workVector */ 1593aa372e3fSPaul Mullowney stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1594a65300a6SPaul Mullowney mat->num_rows, mat->num_cols, 1595b06137fdSPaul Mullowney mat->num_entries, matstructT->alpha, matstructT->descr, 1596aa372e3fSPaul Mullowney mat->values->data().get(), mat->row_offsets->data().get(), 1597c41cb2e2SAlejandro Lamas Daviña mat->column_indices->data().get(), xarray, matstructT->beta, 1598c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1599aa372e3fSPaul Mullowney } else { 16002692e278SPaul Mullowney #if CUDA_VERSION>=4020 1601aa372e3fSPaul Mullowney cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; 1602a65300a6SPaul Mullowney if (cusparsestruct->workVector->size()) { 1603aa372e3fSPaul Mullowney stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, 1604b06137fdSPaul Mullowney matstructT->alpha, matstructT->descr, hybMat, 1605c41cb2e2SAlejandro Lamas Daviña xarray, matstructT->beta, 1606c41cb2e2SAlejandro Lamas Daviña cusparsestruct->workVector->data().get());CHKERRCUDA(stat); 1607a65300a6SPaul Mullowney } 16082692e278SPaul Mullowney #endif 1609aa372e3fSPaul Mullowney } 1610aa372e3fSPaul Mullowney 1611aa372e3fSPaul Mullowney /* scatter the data from the temporary into the full vector with a += operation */ 1612c41cb2e2SAlejandro Lamas Daviña thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))), 1613c41cb2e2SAlejandro Lamas Daviña thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))) + cusparsestruct->workVector->size(), 1614c41cb2e2SAlejandro Lamas Daviña VecCUDAPlusEquals()); 1615ca45077fSPaul Mullowney 1616c41cb2e2SAlejandro Lamas Daviña ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); 1617*34d6c7a5SJose E. Roman ierr = VecCUDARestoreArrayReadWrite(zz,&zarray);CHKERRQ(ierr); 1618ca45077fSPaul Mullowney 1619ca45077fSPaul Mullowney } catch(char *ex) { 1620ca45077fSPaul Mullowney SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); 1621ca45077fSPaul Mullowney } 1622c41cb2e2SAlejandro Lamas Daviña ierr = WaitForGPU();CHKERRCUDA(ierr); 1623ca45077fSPaul Mullowney ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); 1624ca45077fSPaul Mullowney PetscFunctionReturn(0); 1625ca45077fSPaul Mullowney } 1626ca45077fSPaul Mullowney 1627ca45077fSPaul Mullowney #undef __FUNCT__ 16289ae82921SPaul Mullowney #define __FUNCT__ "MatAssemblyEnd_SeqAIJCUSPARSE" 16296fa9248bSJed Brown static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) 16309ae82921SPaul Mullowney { 16319ae82921SPaul Mullowney PetscErrorCode ierr; 16326e111a19SKarl Rupp 16339ae82921SPaul Mullowney PetscFunctionBegin; 16349ae82921SPaul Mullowney ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); 1635bc3f50f2SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 1636e057df02SPaul Mullowney ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); 1637bc3f50f2SPaul Mullowney } 16389ae82921SPaul Mullowney if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0); 1639bbf3fe20SPaul Mullowney A->ops->mult = MatMult_SeqAIJCUSPARSE; 1640bbf3fe20SPaul Mullowney A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1641bbf3fe20SPaul Mullowney A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1642bbf3fe20SPaul Mullowney A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 16439ae82921SPaul Mullowney PetscFunctionReturn(0); 16449ae82921SPaul Mullowney } 16459ae82921SPaul Mullowney 16469ae82921SPaul Mullowney /* --------------------------------------------------------------------------------*/ 16479ae82921SPaul Mullowney #undef __FUNCT__ 16489ae82921SPaul Mullowney #define __FUNCT__ "MatCreateSeqAIJCUSPARSE" 1649e057df02SPaul Mullowney /*@ 16509ae82921SPaul Mullowney MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format 1651e057df02SPaul Mullowney (the default parallel PETSc format). This matrix will ultimately pushed down 1652e057df02SPaul Mullowney to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix 1653e057df02SPaul Mullowney assembly performance the user should preallocate the matrix storage by setting 1654e057df02SPaul Mullowney the parameter nz (or the array nnz). By setting these parameters accurately, 1655e057df02SPaul Mullowney performance during matrix assembly can be increased by more than a factor of 50. 16569ae82921SPaul Mullowney 16579ae82921SPaul Mullowney Collective on MPI_Comm 16589ae82921SPaul Mullowney 16599ae82921SPaul Mullowney Input Parameters: 16609ae82921SPaul Mullowney + comm - MPI communicator, set to PETSC_COMM_SELF 16619ae82921SPaul Mullowney . m - number of rows 16629ae82921SPaul Mullowney . n - number of columns 16639ae82921SPaul Mullowney . nz - number of nonzeros per row (same for all rows) 16649ae82921SPaul Mullowney - nnz - array containing the number of nonzeros in the various rows 16650298fd71SBarry Smith (possibly different for each row) or NULL 16669ae82921SPaul Mullowney 16679ae82921SPaul Mullowney Output Parameter: 16689ae82921SPaul Mullowney . A - the matrix 16699ae82921SPaul Mullowney 16709ae82921SPaul Mullowney It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), 16719ae82921SPaul Mullowney MatXXXXSetPreallocation() paradgm instead of this routine directly. 16729ae82921SPaul Mullowney [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] 16739ae82921SPaul Mullowney 16749ae82921SPaul Mullowney Notes: 16759ae82921SPaul Mullowney If nnz is given then nz is ignored 16769ae82921SPaul Mullowney 16779ae82921SPaul Mullowney The AIJ format (also called the Yale sparse matrix format or 16789ae82921SPaul Mullowney compressed row storage), is fully compatible with standard Fortran 77 16799ae82921SPaul Mullowney storage. That is, the stored row and column indices can begin at 16809ae82921SPaul Mullowney either one (as in Fortran) or zero. See the users' manual for details. 16819ae82921SPaul Mullowney 16829ae82921SPaul Mullowney Specify the preallocated storage with either nz or nnz (not both). 16830298fd71SBarry Smith Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory 16849ae82921SPaul Mullowney allocation. For large problems you MUST preallocate memory or you 16859ae82921SPaul Mullowney will get TERRIBLE performance, see the users' manual chapter on matrices. 16869ae82921SPaul Mullowney 16879ae82921SPaul Mullowney By default, this format uses inodes (identical nodes) when possible, to 16889ae82921SPaul Mullowney improve numerical efficiency of matrix-vector products and solves. We 16899ae82921SPaul Mullowney search for consecutive rows with the same nonzero structure, thereby 16909ae82921SPaul Mullowney reusing matrix information to achieve increased efficiency. 16919ae82921SPaul Mullowney 16929ae82921SPaul Mullowney Level: intermediate 16939ae82921SPaul Mullowney 1694e057df02SPaul Mullowney .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE 16959ae82921SPaul Mullowney @*/ 16969ae82921SPaul Mullowney PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) 16979ae82921SPaul Mullowney { 16989ae82921SPaul Mullowney PetscErrorCode ierr; 16999ae82921SPaul Mullowney 17009ae82921SPaul Mullowney PetscFunctionBegin; 17019ae82921SPaul Mullowney ierr = MatCreate(comm,A);CHKERRQ(ierr); 17029ae82921SPaul Mullowney ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); 17039ae82921SPaul Mullowney ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 17049ae82921SPaul Mullowney ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); 17059ae82921SPaul Mullowney PetscFunctionReturn(0); 17069ae82921SPaul Mullowney } 17079ae82921SPaul Mullowney 17089ae82921SPaul Mullowney #undef __FUNCT__ 17099ae82921SPaul Mullowney #define __FUNCT__ "MatDestroy_SeqAIJCUSPARSE" 17106fa9248bSJed Brown static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) 17119ae82921SPaul Mullowney { 17129ae82921SPaul Mullowney PetscErrorCode ierr; 1713ab25e6cbSDominic Meiser 17149ae82921SPaul Mullowney PetscFunctionBegin; 17159ae82921SPaul Mullowney if (A->factortype==MAT_FACTOR_NONE) { 1716c41cb2e2SAlejandro Lamas Daviña if (A->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) { 1717ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); 17189ae82921SPaul Mullowney } 17199ae82921SPaul Mullowney } else { 1720ab25e6cbSDominic Meiser ierr = Mat_SeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); 1721aa372e3fSPaul Mullowney } 17229ae82921SPaul Mullowney ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); 17239ae82921SPaul Mullowney PetscFunctionReturn(0); 17249ae82921SPaul Mullowney } 17259ae82921SPaul Mullowney 17269ae82921SPaul Mullowney #undef __FUNCT__ 17279ae82921SPaul Mullowney #define __FUNCT__ "MatCreate_SeqAIJCUSPARSE" 17288cc058d9SJed Brown PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) 17299ae82921SPaul Mullowney { 17309ae82921SPaul Mullowney PetscErrorCode ierr; 1731aa372e3fSPaul Mullowney cusparseStatus_t stat; 1732aa372e3fSPaul Mullowney cusparseHandle_t handle=0; 17339ae82921SPaul Mullowney 17349ae82921SPaul Mullowney PetscFunctionBegin; 17359ae82921SPaul Mullowney ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); 17369ae82921SPaul Mullowney if (B->factortype==MAT_FACTOR_NONE) { 1737e057df02SPaul Mullowney /* you cannot check the inode.use flag here since the matrix was just created. 1738e057df02SPaul Mullowney now build a GPU matrix data structure */ 17399ae82921SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSE; 17409ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->mat = 0; 1741aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->matTranspose = 0; 1742aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->workVector = 0; 1743e057df02SPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->format = MAT_CUSPARSE_CSR; 1744aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 1745aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = 0; 1746c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreate(&handle);CHKERRCUDA(stat); 1747aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = handle; 1748aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; 17499ae82921SPaul Mullowney } else { 17509ae82921SPaul Mullowney /* NEXT, set the pointers to the triangular factors */ 1751debe9ee2SPaul Mullowney B->spptr = new Mat_SeqAIJCUSPARSETriFactors; 17529ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtr = 0; 17539ae82921SPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtr = 0; 1754aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtrTranspose = 0; 1755aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtrTranspose = 0; 1756aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->rpermIndices = 0; 1757aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->cpermIndices = 0; 1758aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->workVector = 0; 1759aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = 0; 1760c41cb2e2SAlejandro Lamas Daviña stat = cusparseCreate(&handle);CHKERRCUDA(stat); 1761aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = handle; 1762aa372e3fSPaul Mullowney ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->nnz = 0; 17639ae82921SPaul Mullowney } 1764aa372e3fSPaul Mullowney 17659ae82921SPaul Mullowney B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; 17669ae82921SPaul Mullowney B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; 17672a7a6963SBarry Smith B->ops->getvecs = MatCreateVecs_SeqAIJCUSPARSE; 17689ae82921SPaul Mullowney B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; 1769ca45077fSPaul Mullowney B->ops->mult = MatMult_SeqAIJCUSPARSE; 1770ca45077fSPaul Mullowney B->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; 1771ca45077fSPaul Mullowney B->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; 1772ca45077fSPaul Mullowney B->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; 17732205254eSKarl Rupp 17749ae82921SPaul Mullowney ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); 17752205254eSKarl Rupp 1776c41cb2e2SAlejandro Lamas Daviña B->valid_GPU_matrix = PETSC_CUDA_UNALLOCATED; 17772205254eSKarl Rupp 1778bdf89e91SBarry Smith ierr = PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); 17799ae82921SPaul Mullowney PetscFunctionReturn(0); 17809ae82921SPaul Mullowney } 17819ae82921SPaul Mullowney 1782e057df02SPaul Mullowney /*M 1783e057df02SPaul Mullowney MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. 1784e057df02SPaul Mullowney 1785e057df02SPaul Mullowney A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either 17862692e278SPaul Mullowney CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. 17872692e278SPaul Mullowney All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. 1788e057df02SPaul Mullowney 1789e057df02SPaul Mullowney Options Database Keys: 1790e057df02SPaul Mullowney + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() 1791aa372e3fSPaul Mullowney . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1792aa372e3fSPaul Mullowney . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). 1793e057df02SPaul Mullowney 1794e057df02SPaul Mullowney Level: beginner 1795e057df02SPaul Mullowney 17968468deeeSKarl Rupp .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation 1797e057df02SPaul Mullowney M*/ 17987f756511SDominic Meiser 179942c9c57cSBarry Smith PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat,MatFactorType,Mat*); 180042c9c57cSBarry Smith 18010f39cd5aSBarry Smith 180242c9c57cSBarry Smith #undef __FUNCT__ 180342c9c57cSBarry Smith #define __FUNCT__ "MatSolverPackageRegister_CUSPARSE" 180429b38603SBarry Smith PETSC_EXTERN PetscErrorCode MatSolverPackageRegister_CUSPARSE(void) 180542c9c57cSBarry Smith { 180642c9c57cSBarry Smith PetscErrorCode ierr; 180742c9c57cSBarry Smith 180842c9c57cSBarry Smith PetscFunctionBegin; 180942c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 181042c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 181142c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 181242c9c57cSBarry Smith ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); 181342c9c57cSBarry Smith PetscFunctionReturn(0); 181442c9c57cSBarry Smith } 181529b38603SBarry Smith 181681e08676SBarry Smith 18177f756511SDominic Meiser #undef __FUNCT__ 18187f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSE_Destroy" 18197f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) 18207f756511SDominic Meiser { 18217f756511SDominic Meiser cusparseStatus_t stat; 18227f756511SDominic Meiser cusparseHandle_t handle; 18237f756511SDominic Meiser 18247f756511SDominic Meiser PetscFunctionBegin; 18257f756511SDominic Meiser if (*cusparsestruct) { 18267f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format); 18277f756511SDominic Meiser Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format); 18287f756511SDominic Meiser delete (*cusparsestruct)->workVector; 18297f756511SDominic Meiser if (handle = (*cusparsestruct)->handle) { 1830c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroy(handle);CHKERRCUDA(stat); 18317f756511SDominic Meiser } 18327f756511SDominic Meiser delete *cusparsestruct; 18337f756511SDominic Meiser *cusparsestruct = 0; 18347f756511SDominic Meiser } 18357f756511SDominic Meiser PetscFunctionReturn(0); 18367f756511SDominic Meiser } 18377f756511SDominic Meiser 18387f756511SDominic Meiser #undef __FUNCT__ 18397f756511SDominic Meiser #define __FUNCT__ "CsrMatrix_Destroy" 18407f756511SDominic Meiser static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) 18417f756511SDominic Meiser { 18427f756511SDominic Meiser PetscFunctionBegin; 18437f756511SDominic Meiser if (*mat) { 18447f756511SDominic Meiser delete (*mat)->values; 18457f756511SDominic Meiser delete (*mat)->column_indices; 18467f756511SDominic Meiser delete (*mat)->row_offsets; 18477f756511SDominic Meiser delete *mat; 18487f756511SDominic Meiser *mat = 0; 18497f756511SDominic Meiser } 18507f756511SDominic Meiser PetscFunctionReturn(0); 18517f756511SDominic Meiser } 18527f756511SDominic Meiser 18537f756511SDominic Meiser #undef __FUNCT__ 18547f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactorStruct_Destroy" 18557f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) 18567f756511SDominic Meiser { 18577f756511SDominic Meiser cusparseStatus_t stat; 18587f756511SDominic Meiser PetscErrorCode ierr; 18597f756511SDominic Meiser 18607f756511SDominic Meiser PetscFunctionBegin; 18617f756511SDominic Meiser if (*trifactor) { 1862c41cb2e2SAlejandro Lamas Daviña if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUDA(stat); } 1863c41cb2e2SAlejandro Lamas Daviña if ((*trifactor)->solveInfo) { stat = cusparseDestroySolveAnalysisInfo((*trifactor)->solveInfo);CHKERRCUDA(stat); } 18647f756511SDominic Meiser ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); 18657f756511SDominic Meiser delete *trifactor; 18667f756511SDominic Meiser *trifactor = 0; 18677f756511SDominic Meiser } 18687f756511SDominic Meiser PetscFunctionReturn(0); 18697f756511SDominic Meiser } 18707f756511SDominic Meiser 18717f756511SDominic Meiser #undef __FUNCT__ 18727f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSEMultStruct_Destroy" 18737f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) 18747f756511SDominic Meiser { 18757f756511SDominic Meiser CsrMatrix *mat; 18767f756511SDominic Meiser cusparseStatus_t stat; 18777f756511SDominic Meiser cudaError_t err; 18787f756511SDominic Meiser 18797f756511SDominic Meiser PetscFunctionBegin; 18807f756511SDominic Meiser if (*matstruct) { 18817f756511SDominic Meiser if ((*matstruct)->mat) { 18827f756511SDominic Meiser if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { 18837f756511SDominic Meiser cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; 1884c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroyHybMat(hybMat);CHKERRCUDA(stat); 18857f756511SDominic Meiser } else { 18867f756511SDominic Meiser mat = (CsrMatrix*)(*matstruct)->mat; 18877f756511SDominic Meiser CsrMatrix_Destroy(&mat); 18887f756511SDominic Meiser } 18897f756511SDominic Meiser } 1890c41cb2e2SAlejandro Lamas Daviña if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUDA(stat); } 18917f756511SDominic Meiser delete (*matstruct)->cprowIndices; 1892c41cb2e2SAlejandro Lamas Daviña if ((*matstruct)->alpha) { err=cudaFree((*matstruct)->alpha);CHKERRCUDA(err); } 1893c41cb2e2SAlejandro Lamas Daviña if ((*matstruct)->beta) { err=cudaFree((*matstruct)->beta);CHKERRCUDA(err); } 18947f756511SDominic Meiser delete *matstruct; 18957f756511SDominic Meiser *matstruct = 0; 18967f756511SDominic Meiser } 18977f756511SDominic Meiser PetscFunctionReturn(0); 18987f756511SDominic Meiser } 18997f756511SDominic Meiser 19007f756511SDominic Meiser #undef __FUNCT__ 19017f756511SDominic Meiser #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactors_Destroy" 19027f756511SDominic Meiser static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) 19037f756511SDominic Meiser { 19047f756511SDominic Meiser cusparseHandle_t handle; 19057f756511SDominic Meiser cusparseStatus_t stat; 19067f756511SDominic Meiser 19077f756511SDominic Meiser PetscFunctionBegin; 19087f756511SDominic Meiser if (*trifactors) { 19097f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtr); 19107f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtr); 19117f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose); 19127f756511SDominic Meiser Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose); 19137f756511SDominic Meiser delete (*trifactors)->rpermIndices; 19147f756511SDominic Meiser delete (*trifactors)->cpermIndices; 19157f756511SDominic Meiser delete (*trifactors)->workVector; 19167f756511SDominic Meiser if (handle = (*trifactors)->handle) { 1917c41cb2e2SAlejandro Lamas Daviña stat = cusparseDestroy(handle);CHKERRCUDA(stat); 19187f756511SDominic Meiser } 19197f756511SDominic Meiser delete *trifactors; 19207f756511SDominic Meiser *trifactors = 0; 19217f756511SDominic Meiser } 19227f756511SDominic Meiser PetscFunctionReturn(0); 19237f756511SDominic Meiser } 19247f756511SDominic Meiser 1925