xref: /petsc/src/mat/impls/aij/mpi/mpimatmatmult.c (revision 7d0a6c19129e7069c8a40e210b34ed62989173db)
1 
2 /*
3   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4           C = A * B
5 */
6 #include "../src/mat/impls/aij/seq/aij.h" /*I "petscmat.h" I*/
7 #include "../src/mat/utils/freespace.h"
8 #include "../src/mat/impls/aij/mpi/mpiaij.h"
9 #include "petscbt.h"
10 #include "../src/mat/impls/dense/mpi/mpidense.h"
11 
12 #undef __FUNCT__
13 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ"
14 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
15 {
16   PetscErrorCode ierr;
17 
18   PetscFunctionBegin;
19   if (scall == MAT_INITIAL_MATRIX){
20     ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */
21   } else if (scall == MAT_REUSE_MATRIX){
22     ierr = MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);CHKERRQ(ierr);
23   } else {
24     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
25   }
26   PetscFunctionReturn(0);
27 }
28 
29 #undef __FUNCT__
30 #define __FUNCT__ "PetscContainerDestroy_Mat_MatMatMultMPI"
31 PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr)
32 {
33   PetscErrorCode       ierr;
34   Mat_MatMatMultMPI    *mult=(Mat_MatMatMultMPI*)ptr;
35 
36   PetscFunctionBegin;
37   ierr = PetscFree2(mult->startsj,mult->startsj_r);CHKERRQ(ierr);
38   ierr = PetscFree(mult->bufa);CHKERRQ(ierr);
39   if (mult->isrowa){ierr = ISDestroy(mult->isrowa);CHKERRQ(ierr);}
40   if (mult->isrowb){ierr = ISDestroy(mult->isrowb);CHKERRQ(ierr);}
41   if (mult->iscolb){ierr = ISDestroy(mult->iscolb);CHKERRQ(ierr);}
42   if (mult->C_seq){ierr = MatDestroy(mult->C_seq);CHKERRQ(ierr);}
43   if (mult->A_loc){ierr = MatDestroy(mult->A_loc);CHKERRQ(ierr); }
44   if (mult->B_seq){ierr = MatDestroy(mult->B_seq);CHKERRQ(ierr);}
45   if (mult->B_loc){ierr = MatDestroy(mult->B_loc);CHKERRQ(ierr);}
46   if (mult->B_oth){ierr = MatDestroy(mult->B_oth);CHKERRQ(ierr);}
47   ierr = PetscFree(mult->abi);CHKERRQ(ierr);
48   ierr = PetscFree(mult->abj);CHKERRQ(ierr);
49   ierr = PetscFree(mult);CHKERRQ(ierr);
50   PetscFunctionReturn(0);
51 }
52 
53 extern PetscErrorCode MatDestroy_AIJ(Mat);
54 #undef __FUNCT__
55 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult"
56 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
57 {
58   PetscErrorCode     ierr;
59   PetscContainer     container;
60   Mat_MatMatMultMPI  *mult=PETSC_NULL;
61 
62   PetscFunctionBegin;
63   ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
64   if (container) {
65     ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
66   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
67   A->ops->destroy = mult->destroy;
68   ierr = PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);CHKERRQ(ierr);
69   ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
70   ierr = PetscContainerDestroy(container);CHKERRQ(ierr);
71   PetscFunctionReturn(0);
72 }
73 
74 #undef __FUNCT__
75 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult"
76 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M)
77 {
78   PetscErrorCode     ierr;
79   Mat_MatMatMultMPI  *mult;
80   PetscContainer     container;
81 
82   PetscFunctionBegin;
83   ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
84   if (container) {
85     ierr  = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
86   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
87   /* Note: the container is not duplicated, because it requires deep copying of
88      several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()).
89      These data sets are only used for repeated calling of MatMatMultNumeric().
90      *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */
91   ierr = (*mult->duplicate)(A,op,M);CHKERRQ(ierr);
92   (*M)->ops->destroy   = mult->destroy;   /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */
93   (*M)->ops->duplicate = mult->duplicate; /* = MatDuplicate_MPIAIJ */
94   PetscFunctionReturn(0);
95 }
96 
97 #undef __FUNCT__
98 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ"
99 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
100 {
101   PetscErrorCode     ierr;
102   PetscInt           start,end;
103   Mat_MatMatMultMPI  *mult;
104   PetscContainer     container;
105 
106   PetscFunctionBegin;
107   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
108     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
109   }
110   ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr);
111 
112   /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */
113   ierr = MatGetBrowsOfAcols(A,B,MAT_INITIAL_MATRIX,&mult->isrowb,&mult->iscolb,&mult->brstart,&mult->B_seq);CHKERRQ(ierr);
114 
115   /*  create a seq matrix A_seq = submatrix of A by taking all local rows of A */
116   start = A->rmap->rstart; end = A->rmap->rend;
117   ierr = ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&mult->isrowa);CHKERRQ(ierr);
118   ierr = MatGetLocalMatCondensed(A,MAT_INITIAL_MATRIX,&mult->isrowa,&mult->isrowb,&mult->A_loc);CHKERRQ(ierr);
119 
120   /* compute C_seq = A_seq * B_seq */
121   ierr = MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_INITIAL_MATRIX,fill,&mult->C_seq);CHKERRQ(ierr);
122 
123   /* create mpi matrix C by concatinating C_seq */
124   ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); /* prevent C_seq being destroyed by MatMerge() */
125   ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_INITIAL_MATRIX,C);CHKERRQ(ierr);
126 
127   /* attach the supporting struct to C for reuse of symbolic C */
128   ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
129   ierr = PetscContainerSetPointer(container,mult);CHKERRQ(ierr);
130   ierr = PetscObjectCompose((PetscObject)(*C),"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr);
131   ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr);
132   mult->destroy   = (*C)->ops->destroy;
133   mult->duplicate = (*C)->ops->duplicate;
134 
135   (*C)->ops->destroy   = MatDestroy_MPIAIJ_MatMatMult;
136   (*C)->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
137   PetscFunctionReturn(0);
138 }
139 
140 /* This routine is called ONLY in the case of reusing previously computed symbolic C */
141 #undef __FUNCT__
142 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ"
143 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C)
144 {
145   PetscErrorCode     ierr;
146   Mat                *seq;
147   Mat_MatMatMultMPI  *mult;
148   PetscContainer     container;
149 
150   PetscFunctionBegin;
151   ierr = PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
152   if (container) {
153     ierr  = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
154   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
155 
156   seq = &mult->B_seq;
157   ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr);
158   mult->B_seq = *seq;
159 
160   seq = &mult->A_loc;
161   ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr);
162   mult->A_loc = *seq;
163 
164   ierr = MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);CHKERRQ(ierr);
165 
166   ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr);
167   ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr);
168   PetscFunctionReturn(0);
169 }
170 
171 #undef __FUNCT__
172 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense"
173 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
174 {
175   PetscErrorCode ierr;
176 
177   PetscFunctionBegin;
178   if (scall == MAT_INITIAL_MATRIX){
179     ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr);
180   }
181   ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr);
182   PetscFunctionReturn(0);
183 }
184 
185 typedef struct {
186   Mat         workB;
187   PetscScalar *rvalues,*svalues;
188   MPI_Request *rwaits,*swaits;
189 } MPIAIJ_MPIDense;
190 
191 #undef __FUNCT__
192 #define __FUNCT__ "MPIAIJ_MPIDenseDestroy"
193 PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx)
194 {
195   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
196   PetscErrorCode  ierr;
197 
198   PetscFunctionBegin;
199   if (contents->workB) {ierr = MatDestroy(contents->workB);CHKERRQ(ierr);}
200   ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr);
201   ierr = PetscFree(contents);CHKERRQ(ierr);
202   PetscFunctionReturn(0);
203 }
204 
205 #undef __FUNCT__
206 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense"
207 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
208 {
209   PetscErrorCode         ierr;
210   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*) A->data;
211   PetscInt               nz = aij->B->cmap->n;
212   PetscContainer         cont;
213   MPIAIJ_MPIDense        *contents;
214   VecScatter             ctx = aij->Mvctx;
215   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
216   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
217   PetscInt               m=A->rmap->n,n=B->cmap->n;
218 
219   PetscFunctionBegin;
220   ierr = MatCreate(((PetscObject)B)->comm,C);CHKERRQ(ierr);
221   ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr);
222   ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr);
223   ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
224   ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
225 
226   ierr = PetscContainerCreate(((PetscObject)A)->comm,&cont);CHKERRQ(ierr);
227   ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr);
228   ierr = PetscContainerSetPointer(cont,contents);CHKERRQ(ierr);
229   ierr = PetscContainerSetUserDestroy(cont,MPIAIJ_MPIDenseDestroy);CHKERRQ(ierr);
230 
231   /* Create work matrix used to store off processor rows of B needed for local product */
232   ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);CHKERRQ(ierr);
233 
234   /* Create work arrays needed */
235   ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues,
236                       B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues,
237                       from->n,MPI_Request,&contents->rwaits,
238                       to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr);
239 
240   ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)cont);CHKERRQ(ierr);
241   ierr = PetscContainerDestroy(cont);CHKERRQ(ierr);
242   PetscFunctionReturn(0);
243 }
244 
245 #undef __FUNCT__
246 #define __FUNCT__ "MatMPIDenseScatter"
247 /*
248     Performs an efficient scatter on the rows of B needed by this process; this is
249     a modification of the VecScatterBegin_() routines.
250 */
251 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
252 {
253   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*)A->data;
254   PetscErrorCode         ierr;
255   PetscScalar            *b,*w,*svalues,*rvalues;
256   VecScatter             ctx = aij->Mvctx;
257   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
258   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
259   PetscInt               i,j,k;
260   PetscInt               *sindices,*sstarts,*rindices,*rstarts;
261   PetscMPIInt            *sprocs,*rprocs,nrecvs;
262   MPI_Request            *swaits,*rwaits;
263   MPI_Comm               comm = ((PetscObject)A)->comm;
264   PetscMPIInt            tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n;
265   MPI_Status             status;
266   MPIAIJ_MPIDense        *contents;
267   PetscContainer         cont;
268   Mat                    workB;
269 
270   PetscFunctionBegin;
271   ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&cont);CHKERRQ(ierr);
272   ierr = PetscContainerGetPointer(cont,(void**)&contents);CHKERRQ(ierr);
273 
274   workB = *outworkB = contents->workB;
275   if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n);
276   sindices  = to->indices;
277   sstarts   = to->starts;
278   sprocs    = to->procs;
279   swaits    = contents->swaits;
280   svalues   = contents->svalues;
281 
282   rindices  = from->indices;
283   rstarts   = from->starts;
284   rprocs    = from->procs;
285   rwaits    = contents->rwaits;
286   rvalues   = contents->rvalues;
287 
288   ierr = MatGetArray(B,&b);CHKERRQ(ierr);
289   ierr = MatGetArray(workB,&w);CHKERRQ(ierr);
290 
291   for (i=0; i<from->n; i++) {
292     ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
293   }
294 
295   for (i=0; i<to->n; i++) {
296     /* pack a message at a time */
297     CHKMEMQ;
298     for (j=0; j<sstarts[i+1]-sstarts[i]; j++){
299       for (k=0; k<ncols; k++) {
300         svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
301       }
302     }
303     CHKMEMQ;
304     ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
305   }
306 
307   nrecvs = from->n;
308   while (nrecvs) {
309     ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr);
310     nrecvs--;
311     /* unpack a message at a time */
312     CHKMEMQ;
313     for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){
314       for (k=0; k<ncols; k++) {
315         w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
316       }
317     }
318     CHKMEMQ;
319   }
320   if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);}
321 
322   ierr = MatRestoreArray(B,&b);CHKERRQ(ierr);
323   ierr = MatRestoreArray(workB,&w);CHKERRQ(ierr);
324   ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
325   ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
326   PetscFunctionReturn(0);
327 }
328 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat);
329 
330 #undef __FUNCT__
331 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense"
332 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
333 {
334   PetscErrorCode       ierr;
335   Mat_MPIAIJ           *aij = (Mat_MPIAIJ*)A->data;
336   Mat_MPIDense         *bdense = (Mat_MPIDense*)B->data;
337   Mat_MPIDense         *cdense = (Mat_MPIDense*)C->data;
338   Mat                  workB;
339 
340   PetscFunctionBegin;
341 
342   /* diagonal block of A times all local rows of B*/
343   ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr);
344 
345   /* get off processor parts of B needed to complete the product */
346   ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr);
347 
348   /* off-diagonal block of A times nonlocal rows of B */
349   ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr);
350   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
351   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
352   PetscFunctionReturn(0);
353 }
354 
355