xref: /petsc/src/mat/impls/aij/mpi/mpiaij.h (revision 117ef88edefbfc12e7c19efe87a19a2e1b0acd4f)
1 #if !defined(__MPIAIJ_H)
2 #define __MPIAIJ_H
3 
4 #include <../src/mat/impls/aij/seq/aij.h>
5 
6 typedef struct { /* used by MatCreateMPIAIJSumSeqAIJ for reusing the merged matrix */
7   PetscLayout rowmap;
8   PetscInt    **buf_ri,**buf_rj;
9   PetscMPIInt *len_s,*len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
10   PetscMPIInt nsend,nrecv;
11   PetscInt    *bi,*bj;    /* i and j array of the local portion of mpi C (matrix product) - rename to ci, cj! */
12   PetscInt    *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
13 } Mat_Merge_SeqsToMPI;
14 
15 typedef struct { /* used by MatPtAPXXX_MPIAIJ_MPIAIJ() and MatMatMultXXX_MPIAIJ_MPIAIJ() */
16   PetscInt               *startsj_s,*startsj_r;    /* used by MatGetBrowsOfAoCols_MPIAIJ */
17   PetscScalar            *bufa;                    /* used by MatGetBrowsOfAoCols_MPIAIJ */
18   Mat                     P_loc,P_oth;             /* partial B_seq -- intend to replace B_seq */
19   PetscInt                *api,*apj;               /* symbolic i and j arrays of the local product A_loc*B_seq */
20   PetscScalar             *apv;
21   MatReuse                reuse;                   /* flag to skip MatGetBrowsOfAoCols_MPIAIJ() and MatMPIAIJGetLocalMat() in 1st call of MatPtAPNumeric_MPIAIJ_MPIAIJ() */
22   PetscScalar             *apa;                    /* tmp array for store a row of A*P used in MatMatMult() */
23   Mat                     A_loc;                   /* used by MatTransposeMatMult(), contains api and apj */
24   ISLocalToGlobalMapping  ltog;                    /* mapping from local column indices to global column indices for A_loc */
25   Mat                     Pt;                      /* used by MatTransposeMatMult(), Pt = P^T */
26   Mat                     Rd,Ro,AP_loc,C_loc,C_oth;
27   PetscInt                algType;                 /* implementation algorithm */
28   PetscSF                 sf;                      /* use it to communicate remote part of C */
29   PetscInt                *c_othi,*c_rmti;
30 
31   Mat_Merge_SeqsToMPI *merge;
32 } Mat_APMPI;
33 
34 typedef struct {
35   Mat A,B;                             /* local submatrices: A (diag part),
36                                            B (off-diag part) */
37   PetscMPIInt size;                     /* size of communicator */
38   PetscMPIInt rank;                     /* rank of proc in communicator */
39 
40   /* The following variables are used for matrix assembly */
41   PetscBool   donotstash;               /* PETSC_TRUE if off processor entries dropped */
42   MPI_Request *send_waits;              /* array of send requests */
43   MPI_Request *recv_waits;              /* array of receive requests */
44   PetscInt    nsends,nrecvs;           /* numbers of sends and receives */
45   PetscScalar *svalues,*rvalues;       /* sending and receiving data */
46   PetscInt    rmax;                     /* maximum message length */
47 #if defined(PETSC_USE_CTABLE)
48   PetscTable colmap;
49 #else
50   PetscInt *colmap;                     /* local col number of off-diag col */
51 #endif
52   PetscInt *garray;                     /* global index of all off-processor columns */
53 
54   /* The following variables are used for matrix-vector products */
55   Vec        lvec;                 /* local vector */
56   Vec        diag;
57   VecScatter Mvctx,Mvctx_mpi1;     /* scatter context for vector */
58   PetscBool  Mvctx_mpi1_flg;       /* if true, additional Mvctx_mpi1 is requested for mat-mat ops, default false */
59   PetscBool  roworiented;          /* if true, row-oriented input, default true */
60 
61   /* The following variables are for MatGetRow() */
62   PetscInt    *rowindices;         /* column indices for row */
63   PetscScalar *rowvalues;          /* nonzero values in row */
64   PetscBool   getrowactive;        /* indicates MatGetRow(), not restored */
65 
66   /* Used by MatDistribute_MPIAIJ() to allow reuse of previous matrix allocation  and nonzero pattern */
67   PetscInt *ld;                    /* number of entries per row left of diagona block */
68 
69   /* Used by MPICUSPARSE classes */
70   void * spptr;
71 
72 } Mat_MPIAIJ;
73 
74 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat);
75 
76 PETSC_INTERN PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat,MatAssemblyType);
77 
78 PETSC_INTERN PetscErrorCode MatSetUpMultiply_MPIAIJ(Mat);
79 PETSC_INTERN PetscErrorCode MatDisAssemble_MPIAIJ(Mat);
80 PETSC_INTERN PetscErrorCode MatDuplicate_MPIAIJ(Mat,MatDuplicateOption,Mat*);
81 PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat,PetscInt,IS [],PetscInt);
82 PETSC_INTERN PetscErrorCode MatIncreaseOverlap_MPIAIJ_Scalable(Mat,PetscInt,IS [],PetscInt);
83 PETSC_INTERN PetscErrorCode MatFDColoringCreate_MPIXAIJ(Mat,ISColoring,MatFDColoring);
84 PETSC_INTERN PetscErrorCode MatFDColoringSetUp_MPIXAIJ(Mat,ISColoring,MatFDColoring);
85 PETSC_INTERN PetscErrorCode MatCreateSubMatrices_MPIAIJ (Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]);
86 PETSC_INTERN PetscErrorCode MatCreateSubMatricesMPI_MPIAIJ (Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]);
87 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_All(Mat,MatCreateSubMatrixOption,MatReuse,Mat *[]);
88 PETSC_INTERN PetscErrorCode MatView_MPIAIJ(Mat,PetscViewer);
89 
90 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat,IS,IS,MatReuse,Mat*);
91 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat,IS,IS,PetscInt,MatReuse,Mat*);
92 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat,IS,IS,IS,MatReuse,Mat*);
93 PETSC_INTERN PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat,IS,IS,MatReuse,Mat*);
94 PETSC_INTERN PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat,MPI_Comm,MatReuse,Mat*);
95 
96 PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ(Mat,PetscViewer);
97 PETSC_INTERN PetscErrorCode MatLoad_MPIAIJ_Binary(Mat,PetscViewer);
98 PETSC_INTERN PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat);
99 
100 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat);
101 PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat);
102 
103 PETSC_INTERN PetscErrorCode MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ(Mat);
104 
105 PETSC_INTERN PetscErrorCode MatProductSymbolic_RARt_MPIAIJ_MPIAIJ(Mat);
106 PETSC_INTERN PetscErrorCode MatProductNumeric_RARt_MPIAIJ_MPIAIJ(Mat);
107 
108 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,PetscReal,Mat);
109 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat,Mat,PetscReal,Mat);
110 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat);
111 PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat);
112 PETSC_INTERN PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,Mat);
113 
114 PETSC_INTERN PetscErrorCode MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ(Mat,Mat,Mat,PetscReal,Mat);
115 PETSC_INTERN PetscErrorCode MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ(Mat,Mat,Mat,Mat);
116 
117 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat);
118 PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat);
119 
120 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat,Mat,PetscReal,Mat);
121 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce(Mat,Mat,PetscReal,Mat);
122 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce_merged(Mat,Mat,PetscReal,Mat);
123 PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat,Mat,Mat);
124 PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce(Mat,Mat,Mat);
125 PETSC_INTERN PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged(Mat,Mat,Mat);
126 
127 #if defined(PETSC_HAVE_HYPRE)
128 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat);
129 #endif
130 
131 PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ(Mat);
132 PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_PtAP(void*);
133 PETSC_INTERN PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void*);
134 
135 PETSC_INTERN PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat,Mat,MatReuse,PetscInt**,PetscInt**,MatScalar**,Mat*);
136 PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar [],InsertMode);
137 PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]);
138 PETSC_INTERN PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat,const PetscInt[],const PetscInt[]);
139 PETSC_INTERN PetscErrorCode MatSetOption_MPIAIJ(Mat,MatOption,PetscBool);
140 
141 PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,PetscReal,Mat);
142 PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat,Mat,PetscReal,Mat);
143 PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat,Mat,Mat);
144 PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat,Mat,Mat);
145 PETSC_INTERN PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat,Mat,Mat);
146 PETSC_INTERN PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat);
147 PETSC_INTERN PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat,Mat*);
148 
149 PETSC_INTERN PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems*,Mat);
150 PETSC_INTERN PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);
151 
152 #if !defined(PETSC_USE_COMPLEX) && !defined(PETSC_USE_REAL_SINGLE) && !defined(PETSC_USE_REAL___FLOAT128) && !defined(PETSC_USE_REAL___FP16)
153 PETSC_INTERN PetscErrorCode MatLUFactorSymbolic_MPIAIJ_TFS(Mat,IS,IS,const MatFactorInfo*,Mat*);
154 #endif
155 PETSC_INTERN PetscErrorCode MatSolve_MPIAIJ(Mat,Vec,Vec);
156 PETSC_INTERN PetscErrorCode MatILUFactor_MPIAIJ(Mat,IS,IS,const MatFactorInfo*);
157 
158 PETSC_INTERN PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,const PetscInt*,PetscInt*);
159 
160 extern PetscErrorCode MatGetDiagonalBlock_MPIAIJ(Mat,Mat*);
161 extern PetscErrorCode MatDiagonalScaleLocal_MPIAIJ(Mat,Vec);
162 
163 PETSC_INTERN PetscErrorCode MatGetSeqMats_MPIAIJ(Mat,Mat*,Mat*);
164 PETSC_INTERN PetscErrorCode MatSetSeqMats_MPIAIJ(Mat,IS,IS,IS,MatStructure,Mat,Mat);
165 
166 /* compute apa = A[i,:]*P = Ad[i,:]*P_loc + Ao*[i,:]*P_oth using sparse axpy */
167 #define AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa) \
168 {\
169   PetscInt    _anz,_pnz,_j,_k,*_ai,*_aj,_row,*_pi,*_pj,_nextp,*_apJ;\
170   PetscScalar *_aa,_valtmp,*_pa;\
171   _apJ = apj + api[i];\
172   /* diagonal portion of A */\
173   _ai  = ad->i;\
174   _anz = _ai[i+1] - _ai[i];\
175   _aj  = ad->j + _ai[i];\
176   _aa  = ad->a + _ai[i];\
177   for (_j=0; _j<_anz; _j++) {\
178     _row = _aj[_j]; \
179     _pi  = p_loc->i;                             \
180     _pnz = _pi[_row+1] - _pi[_row];              \
181     _pj  = p_loc->j + _pi[_row];                 \
182     _pa  = p_loc->a + _pi[_row];                 \
183     /* perform sparse axpy */                    \
184     _valtmp = _aa[_j];                           \
185     _nextp  = 0; \
186     for (_k=0; _nextp<_pnz; _k++) {                    \
187       if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */\
188         apa[_k] += _valtmp*_pa[_nextp++];                             \
189       } \
190     }                                           \
191     (void)PetscLogFlops(2.0*_pnz);              \
192   }                                             \
193   /* off-diagonal portion of A */               \
194   if (p_oth){ \
195     _ai  = ao->i;\
196     _anz = _ai[i+1] - _ai[i];                   \
197     _aj  = ao->j + _ai[i];                      \
198     _aa  = ao->a + _ai[i];                      \
199     for (_j=0; _j<_anz; _j++) {                 \
200       _row = _aj[_j];    \
201       _pi  = p_oth->i;                         \
202       _pnz = _pi[_row+1] - _pi[_row];          \
203       _pj  = p_oth->j + _pi[_row];             \
204       _pa  = p_oth->a + _pi[_row];             \
205       /* perform sparse axpy */                \
206       _valtmp = _aa[_j];                       \
207       _nextp  = 0; \
208       for (_k=0; _nextp<_pnz; _k++) {          \
209         if (_apJ[_k] == _pj[_nextp]) { /* column of AP == column of P */\
210           apa[_k] += _valtmp*_pa[_nextp++];    \
211         }                                      \
212       }                                        \
213       (void)PetscLogFlops(2.0*_pnz);           \
214     } \
215   }\
216 }
217 
218 #define AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa) \
219 {\
220   PetscInt    _anz,_pnz,_j,_k,*_ai,*_aj,_row,*_pi,*_pj;\
221   PetscScalar *_aa,_valtmp,*_pa;                       \
222   /* diagonal portion of A */\
223   _ai  = ad->i;\
224   _anz = _ai[i+1] - _ai[i];\
225   _aj  = ad->j + _ai[i];\
226   _aa  = ad->a + _ai[i];\
227   for (_j=0; _j<_anz; _j++) {\
228     _row = _aj[_j]; \
229     _pi  = p_loc->i;                        \
230     _pnz = _pi[_row+1] - _pi[_row];         \
231     _pj  = p_loc->j + _pi[_row];            \
232     _pa  = p_loc->a + _pi[_row];            \
233     /* perform dense axpy */                \
234     _valtmp = _aa[_j];                      \
235     for (_k=0; _k<_pnz; _k++) {             \
236       apa[_pj[_k]] += _valtmp*_pa[_k];      \
237     }                                       \
238     (void)PetscLogFlops(2.0*_pnz);          \
239   }                                         \
240   /* off-diagonal portion of A */           \
241   if (p_oth){ \
242     _ai  = ao->i;\
243     _anz = _ai[i+1] - _ai[i];               \
244     _aj  = ao->j + _ai[i];                  \
245     _aa  = ao->a + _ai[i];                  \
246     for (_j=0; _j<_anz; _j++) {             \
247       _row = _aj[_j];    \
248       _pi  = p_oth->i;                      \
249       _pnz = _pi[_row+1] - _pi[_row];       \
250       _pj  = p_oth->j + _pi[_row];          \
251       _pa  = p_oth->a + _pi[_row];          \
252       /* perform dense axpy */              \
253       _valtmp = _aa[_j];                    \
254       for (_k=0; _k<_pnz; _k++) {           \
255         apa[_pj[_k]] += _valtmp*_pa[_k];    \
256       }                                     \
257       (void)PetscLogFlops(2.0*_pnz);        \
258     }                                       \
259   }\
260 }
261 
262 #endif
263