1 #if !defined(__MPIAIJ_H) 2 #define __MPIAIJ_H 3 4 #include <../src/mat/impls/aij/seq/aij.h> 5 6 typedef struct { /* used by MatCreateMPIAIJSumSeqAIJ for reusing the merged matrix */ 7 PetscLayout rowmap; 8 PetscInt **buf_ri,**buf_rj; 9 PetscMPIInt *len_s,*len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 10 PetscMPIInt nsend,nrecv; 11 PetscInt *bi,*bj; /* i and j array of the local portion of mpi C (matrix product) - rename to ci, cj! */ 12 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 13 } Mat_Merge_SeqsToMPI; 14 15 typedef struct { /* used by MatPtAPXXX_MPIAIJ_MPIAIJ() and MatMatMultXXX_MPIAIJ_MPIAIJ() */ 16 PetscInt *startsj_s,*startsj_r; /* used by MatGetBrowsOfAoCols_MPIAIJ */ 17 PetscScalar *bufa; /* used by MatGetBrowsOfAoCols_MPIAIJ */ 18 Mat P_loc,P_oth; /* partial B_seq -- intend to replace B_seq */ 19 PetscInt *api,*apj; /* symbolic i and j arrays of the local product A_loc*B_seq */ 20 PetscScalar *apv; 21 MatReuse reuse; /* flag to skip MatGetBrowsOfAoCols_MPIAIJ() and MatMPIAIJGetLocalMat() in 1st call of MatPtAPNumeric_MPIAIJ_MPIAIJ() */ 22 PetscScalar *apa; /* tmp array for store a row of A*P used in MatMatMult() */ 23 Mat A_loc; /* used by MatTransposeMatMult(), contains api and apj */ 24 ISLocalToGlobalMapping ltog; /* mapping from local column indices to global column indices for A_loc */ 25 Mat Pt; /* used by MatTransposeMatMult(), Pt = P^T */ 26 Mat Rd,Ro,AP_loc,C_loc,C_oth; 27 PetscInt algType; /* implementation algorithm */ 28 PetscSF sf; /* use it to communicate remote part of C */ 29 PetscInt *c_othi,*c_rmti; 30 31 Mat_Merge_SeqsToMPI *merge; 32 } Mat_APMPI; 33 34 typedef struct { 35 Mat A,B; /* local submatrices: A (diag part), 36 B (off-diag part) */ 37 PetscMPIInt size; /* size of communicator */ 38 PetscMPIInt rank; /* rank of proc in communicator */ 39 40 /* The following variables are used for matrix assembly */ 41 PetscBool donotstash; /* PETSC_TRUE if off processor entries dropped */ 42 MPI_Request *send_waits; /* array of send requests */ 43 MPI_Request *recv_waits; /* array of receive requests */ 44 PetscInt nsends,nrecvs; /* numbers of sends and receives */ 45 PetscScalar *svalues,*rvalues; /* sending and receiving data */ 46 PetscInt rmax; /* maximum message length */ 47 #if defined(PETSC_USE_CTABLE) 48 PetscTable colmap; 49 #else 50 PetscInt *colmap; /* local col number of off-diag col */ 51 #endif 52 PetscInt *garray; /* global index of all off-processor columns */ 53 54 /* The following variables are used for matrix-vector products */ 55 Vec lvec; /* local vector */ 56 Vec diag; 57 VecScatter Mvctx; /* scatter context for vector */ 58 PetscBool roworiented; /* if true, row-oriented input, default true */ 59 60 /* The following variables are for MatGetRow() */ 61 PetscInt *rowindices; /* column indices for row */ 62 PetscScalar *rowvalues; /* nonzero values in row */ 63 PetscBool getrowactive; /* indicates MatGetRow(), not restored */ 64 65 PetscInt *ld; /* number of entries per row left of diagonal block */ 66 67 /* Used by device classes */ 68 void * spptr; 69 70 } Mat_MPIAIJ; 71 72 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat); 73 74 PETSC_INTERN PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat,MatAssemblyType); 75 76 PETSC_INTERN PetscErrorCode MatSetUpMultiply_MPIAIJ(Mat); 77 PETSC_INTERN PetscErrorCode MatDisAssemble_MPIAIJ(Mat); 78 PETSC_INTERN PetscErrorCode MatDuplicate_MPIAIJ(Mat,MatDuplicateOption,Mat*); 79 PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat,PetscInt,IS [],PetscInt); 80 PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ_Scalable(Mat,PetscInt,IS [],PetscInt); 81 PETSC_INTERN PetscErrorCode MatFDColoringCreate_MPIXAIJ(Mat,ISColoring,MatFDColoring); 82 PETSC_INTERN PetscErrorCode MatFDColoringSetUp_MPIXAIJ(Mat,ISColoring,MatFDColoring); 83 PETSC_INTERN PetscErrorCode MatCreateSubMatrices_MPIAIJ (Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]); 84 PETSC_INTERN PetscErrorCode MatCreateSubMatricesMPI_MPIAIJ (Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]); 85 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_All(Mat,MatCreateSubMatrixOption,MatReuse,Mat *[]); 86 PETSC_INTERN PetscErrorCode MatView_MPIAIJ(Mat,PetscViewer); 87 88 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat,IS,IS,MatReuse,Mat*); 89 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat,IS,IS,PetscInt,MatReuse,Mat*); 90 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat,IS,IS,IS,MatReuse,Mat*); 91 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat,IS,IS,MatReuse,Mat*); 92 PETSC_INTERN PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat,MPI_Comm,MatReuse,Mat*); 93 94 PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ(Mat,PetscViewer); 95 PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ_Binary(Mat,PetscViewer); 96 PETSC_INTERN PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat); 97 98 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat); 99 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJBACKEND(Mat); 100 PETSC_INTERN PetscErrorCode MatProductSymbolic_MPIAIJBACKEND(Mat); 101 PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat); 102 103 PETSC_INTERN PetscErrorCode MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ(Mat); 104 105 PETSC_INTERN PetscErrorCode MatProductSymbolic_RARt_MPIAIJ_MPIAIJ(Mat); 106 PETSC_INTERN PetscErrorCode MatProductNumeric_RARt_MPIAIJ_MPIAIJ(Mat); 107 108 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,PetscReal,Mat); 109 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat,Mat,PetscReal,Mat); 110 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat); 111 PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat); 112 PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,Mat); 113 114 PETSC_INTERN PetscErrorCode MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ(Mat,Mat,Mat,PetscReal,Mat); 115 PETSC_INTERN PetscErrorCode MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ(Mat,Mat,Mat,Mat); 116 117 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat); 118 PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat); 119 120 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat,Mat,PetscReal,Mat); 121 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce(Mat,Mat,PetscReal,Mat); 122 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce_merged(Mat,Mat,PetscReal,Mat); 123 PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat,Mat,Mat); 124 PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce(Mat,Mat,Mat); 125 PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged(Mat,Mat,Mat); 126 127 #if defined(PETSC_HAVE_HYPRE) 128 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat); 129 #endif 130 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIDense(Mat,MatType,MatReuse,Mat*); 131 #if defined(PETSC_HAVE_SCALAPACK) 132 PETSC_INTERN PetscErrorCode MatConvert_AIJ_ScaLAPACK(Mat,MatType,MatReuse,Mat*); 133 #endif 134 135 PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ(Mat); 136 PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_PtAP(void*); 137 PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void*); 138 139 PETSC_INTERN PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat,Mat,MatReuse,PetscInt**,PetscInt**,MatScalar**,Mat*); 140 PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar [],InsertMode); 141 PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]); 142 PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat,const PetscInt[],const PetscInt[]); 143 PETSC_INTERN PetscErrorCode MatSetOption_MPIAIJ(Mat,MatOption,PetscBool); 144 145 PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,PetscReal,Mat); 146 PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat); 147 PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat); 148 PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,Mat); 149 PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat,Mat,Mat); 150 PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat); 151 PETSC_INTERN PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat,Mat*); 152 153 PETSC_INTERN PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems*,Mat); 154 PETSC_INTERN PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]); 155 156 #if !defined(PETSC_USE_COMPLEX) && !defined(PETSC_USE_REAL_SINGLE) && !defined(PETSC_USE_REAL___FLOAT128) && !defined(PETSC_USE_REAL___FP16) 157 PETSC_INTERN PetscErrorCode MatLUFactorSymbolic_MPIAIJ_TFS(Mat,IS,IS,const MatFactorInfo*,Mat*); 158 #endif 159 PETSC_INTERN PetscErrorCode MatSolve_MPIAIJ(Mat,Vec,Vec); 160 PETSC_INTERN PetscErrorCode MatILUFactor_MPIAIJ(Mat,IS,IS,const MatFactorInfo*); 161 162 PETSC_INTERN PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,PetscInt*); 163 164 extern PetscErrorCode MatGetDiagonalBlock_MPIAIJ(Mat,Mat*); 165 extern PetscErrorCode MatDiagonalScaleLocal_MPIAIJ(Mat,Vec); 166 167 PETSC_INTERN PetscErrorCode MatGetSeqMats_MPIAIJ(Mat,Mat*,Mat*); 168 PETSC_INTERN PetscErrorCode MatSetSeqMats_MPIAIJ(Mat,IS,IS,IS,MatStructure,Mat,Mat); 169 170 /* compute apa = A[i,:]*P = Ad[i,:]*P_loc + Ao*[i,:]*P_oth using sparse axpy */ 171 #define AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa) \ 172 {\ 173 PetscInt _anz,_pnz,_j,_k,*_ai,*_aj,_row,*_pi,*_pj,_nextp,*_apJ;\ 174 PetscScalar *_aa,_valtmp,*_pa;\ 175 _apJ = apj + api[i];\ 176 /* diagonal portion of A */\ 177 _ai = ad->i;\ 178 _anz = _ai[i+1] - _ai[i];\ 179 _aj = ad->j + _ai[i];\ 180 _aa = ad->a + _ai[i];\ 181 for (_j=0; _j<_anz; _j++) {\ 182 _row = _aj[_j]; \ 183 _pi = p_loc->i; \ 184 _pnz = _pi[_row+1] - _pi[_row]; \ 185 _pj = p_loc->j + _pi[_row]; \ 186 _pa = p_loc->a + _pi[_row]; \ 187 /* perform sparse axpy */ \ 188 _valtmp = _aa[_j]; \ 189 _nextp = 0; \ 190 for (_k=0; _nextp<_pnz; _k++) { \ 191 if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */\ 192 apa[_k] += _valtmp*_pa[_nextp++]; \ 193 } \ 194 } \ 195 (void)PetscLogFlops(2.0*_pnz); \ 196 } \ 197 /* off-diagonal portion of A */ \ 198 if (p_oth){ \ 199 _ai = ao->i;\ 200 _anz = _ai[i+1] - _ai[i]; \ 201 _aj = ao->j + _ai[i]; \ 202 _aa = ao->a + _ai[i]; \ 203 for (_j=0; _j<_anz; _j++) { \ 204 _row = _aj[_j]; \ 205 _pi = p_oth->i; \ 206 _pnz = _pi[_row+1] - _pi[_row]; \ 207 _pj = p_oth->j + _pi[_row]; \ 208 _pa = p_oth->a + _pi[_row]; \ 209 /* perform sparse axpy */ \ 210 _valtmp = _aa[_j]; \ 211 _nextp = 0; \ 212 for (_k=0; _nextp<_pnz; _k++) { \ 213 if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */\ 214 apa[_k] += _valtmp*_pa[_nextp++]; \ 215 } \ 216 } \ 217 (void)PetscLogFlops(2.0*_pnz); \ 218 } \ 219 }\ 220 } 221 222 #define AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa) \ 223 {\ 224 PetscInt _anz,_pnz,_j,_k,*_ai,*_aj,_row,*_pi,*_pj;\ 225 PetscScalar *_aa,_valtmp,*_pa; \ 226 /* diagonal portion of A */\ 227 _ai = ad->i;\ 228 _anz = _ai[i+1] - _ai[i];\ 229 _aj = ad->j + _ai[i];\ 230 _aa = ad->a + _ai[i];\ 231 for (_j=0; _j<_anz; _j++) {\ 232 _row = _aj[_j]; \ 233 _pi = p_loc->i; \ 234 _pnz = _pi[_row+1] - _pi[_row]; \ 235 _pj = p_loc->j + _pi[_row]; \ 236 _pa = p_loc->a + _pi[_row]; \ 237 /* perform dense axpy */ \ 238 _valtmp = _aa[_j]; \ 239 for (_k=0; _k<_pnz; _k++) { \ 240 apa[_pj[_k]] += _valtmp*_pa[_k]; \ 241 } \ 242 (void)PetscLogFlops(2.0*_pnz); \ 243 } \ 244 /* off-diagonal portion of A */ \ 245 if (p_oth){ \ 246 _ai = ao->i;\ 247 _anz = _ai[i+1] - _ai[i]; \ 248 _aj = ao->j + _ai[i]; \ 249 _aa = ao->a + _ai[i]; \ 250 for (_j=0; _j<_anz; _j++) { \ 251 _row = _aj[_j]; \ 252 _pi = p_oth->i; \ 253 _pnz = _pi[_row+1] - _pi[_row]; \ 254 _pj = p_oth->j + _pi[_row]; \ 255 _pa = p_oth->a + _pi[_row]; \ 256 /* perform dense axpy */ \ 257 _valtmp = _aa[_j]; \ 258 for (_k=0; _k<_pnz; _k++) { \ 259 apa[_pj[_k]] += _valtmp*_pa[_k]; \ 260 } \ 261 (void)PetscLogFlops(2.0*_pnz); \ 262 } \ 263 }\ 264 } 265 266 #endif 267