xref: /petsc/src/mat/impls/aij/mpi/mpimatmatmult.c (revision dd85299cac017dea8f8657dcbe071ecc007ecc52)
1 
2 /*
3   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4           C = A * B
5 */
6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
7 #include <../src/mat/utils/freespace.h>
8 #include <../src/mat/impls/aij/mpi/mpiaij.h>
9 #include <petscbt.h>
10 #include <../src/mat/impls/dense/mpi/mpidense.h>
11 
12 #undef __FUNCT__
13 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ"
14 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
15 {
16   PetscErrorCode ierr;
17 
18   PetscFunctionBegin;
19   if (scall == MAT_INITIAL_MATRIX){
20     ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */
21   } else if (scall == MAT_REUSE_MATRIX){
22     ierr = MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);CHKERRQ(ierr);
23   } else {
24     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
25   }
26   PetscFunctionReturn(0);
27 }
28 
29 #undef __FUNCT__
30 #define __FUNCT__ "PetscContainerDestroy_Mat_MatMatMultMPI"
31 PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr)
32 {
33   PetscErrorCode       ierr;
34   Mat_MatMatMultMPI    *mult=(Mat_MatMatMultMPI*)ptr;
35 
36   PetscFunctionBegin;
37   ierr = PetscFree2(mult->startsj,mult->startsj_r);CHKERRQ(ierr);
38   ierr = PetscFree(mult->bufa);CHKERRQ(ierr);
39   ierr = ISDestroy(&mult->isrowa);CHKERRQ(ierr);
40   ierr = ISDestroy(&mult->isrowb);CHKERRQ(ierr);
41   ierr = ISDestroy(&mult->iscolb);CHKERRQ(ierr);
42   ierr = MatDestroy(&mult->C_seq);CHKERRQ(ierr);
43   ierr = MatDestroy(&mult->A_loc);CHKERRQ(ierr);
44   ierr = MatDestroy(&mult->B_seq);CHKERRQ(ierr);
45   ierr = MatDestroy(&mult->B_loc);CHKERRQ(ierr);
46   ierr = MatDestroy(&mult->B_oth);CHKERRQ(ierr);
47   ierr = PetscFree(mult->abi);CHKERRQ(ierr);
48   ierr = PetscFree(mult->abj);CHKERRQ(ierr);
49   ierr = PetscFree(mult);CHKERRQ(ierr);
50   PetscFunctionReturn(0);
51 }
52 
53 extern PetscErrorCode MatDestroy_AIJ(Mat);
54 #undef __FUNCT__
55 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult"
56 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
57 {
58   PetscErrorCode     ierr;
59   PetscContainer     container;
60   Mat_MatMatMultMPI  *mult=PETSC_NULL;
61 
62   PetscFunctionBegin;
63   ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
64   if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
65   ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
66   A->ops->destroy   = mult->destroy;
67   A->ops->duplicate = mult->duplicate;
68   if (A->ops->destroy) {
69     ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
70   }
71   ierr = PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);CHKERRQ(ierr);
72   PetscFunctionReturn(0);
73 }
74 
75 #undef __FUNCT__
76 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult"
77 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M)
78 {
79   PetscErrorCode     ierr;
80   Mat_MatMatMultMPI  *mult;
81   PetscContainer     container;
82 
83   PetscFunctionBegin;
84   ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
85   if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
86   ierr  = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
87   /* Note: the container is not duplicated, because it requires deep copying of
88      several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()).
89      These data sets are only used for repeated calling of MatMatMultNumeric().
90      *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */
91   ierr = (*mult->duplicate)(A,op,M);CHKERRQ(ierr);
92   (*M)->ops->destroy   = mult->destroy;   /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */
93   (*M)->ops->duplicate = mult->duplicate; /* = MatDuplicate_MPIAIJ */
94   PetscFunctionReturn(0);
95 }
96 
97 #undef __FUNCT__
98 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ"
99 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
100 {
101   PetscErrorCode     ierr;
102   Mat_MatMatMultMPI  *mult;
103   PetscContainer     container;
104   Mat                AB,*seq;
105   Mat_MPIAIJ         *a=(Mat_MPIAIJ*)A->data;
106   PetscInt           *idx,i,start,ncols,nzA,nzB,*cmap,imark;
107 
108   PetscFunctionBegin;
109   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
110     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
111   }
112   ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr);
113 
114   /* get isrowb: nonzero col of A */
115   start = A->cmap->rstart;
116   cmap  = a->garray;
117   nzA   = a->A->cmap->n;
118   nzB   = a->B->cmap->n;
119   ierr  = PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);CHKERRQ(ierr);
120   ncols = 0;
121   for (i=0; i<nzB; i++) {  /* row < local row index */
122     if (cmap[i] < start) idx[ncols++] = cmap[i];
123     else break;
124   }
125   imark = i;
126   for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
127   for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
128   ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&mult->isrowb);CHKERRQ(ierr);
129   ierr = ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&mult->iscolb);CHKERRQ(ierr);
130 
131   /*  get isrowa: all local rows of A */
132   ierr = ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,&mult->isrowa);CHKERRQ(ierr);
133 
134   /* Below should go to MatMatMultNumeric_MPIAIJ_MPIAIJ() - How to generate C there? */
135   /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */
136   ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr);
137   mult->B_seq = *seq;
138   ierr = PetscFree(seq);CHKERRQ(ierr);
139 
140   /*  create a seq matrix A_seq = submatrix of A by taking all local rows of A */
141   ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr);
142   mult->A_loc = *seq;
143   ierr = PetscFree(seq);CHKERRQ(ierr);
144 
145   /* compute C_seq = A_seq * B_seq */
146   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,fill,&mult->C_seq);CHKERRQ(ierr);
147   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,mult->C_seq);CHKERRQ(ierr);
148 
149   /* create mpi matrix C by concatinating C_seq */
150   ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); /* prevent C_seq being destroyed by MatMerge() */
151   ierr = MatMergeSymbolic(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,&AB);CHKERRQ(ierr);
152   ierr = MatMergeNumeric(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,AB);CHKERRQ(ierr);
153 
154   /* attach the supporting struct to C for reuse of symbolic C */
155   ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
156   ierr = PetscContainerSetPointer(container,mult);CHKERRQ(ierr);
157   ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr);
158   ierr = PetscObjectCompose((PetscObject)AB,"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr);
159   ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
160   mult->destroy      = AB->ops->destroy;
161   mult->duplicate    = AB->ops->duplicate;
162   AB->ops->destroy   = MatDestroy_MPIAIJ_MatMatMult;
163   AB->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
164   AB->ops->matmult   = MatMatMult_MPIAIJ_MPIAIJ;
165   *C                 = AB;
166   PetscFunctionReturn(0);
167 }
168 
169 /* This routine is called ONLY in the case of reusing previously computed symbolic C */
170 #undef __FUNCT__
171 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ"
172 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C)
173 {
174   PetscErrorCode     ierr;
175   Mat                *seq;
176   Mat_MatMatMultMPI  *mult;
177   PetscContainer     container;
178 
179   PetscFunctionBegin;
180   ierr = PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
181   if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
182   ierr  = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
183   seq = &mult->B_seq;
184   ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr);
185   mult->B_seq = *seq;
186 
187   seq = &mult->A_loc;
188   ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr);
189   mult->A_loc = *seq;
190 
191   ierr = MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);CHKERRQ(ierr);
192 
193   ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr);
194   ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr);
195   PetscFunctionReturn(0);
196 }
197 
198 #undef __FUNCT__
199 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense"
200 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
201 {
202   PetscErrorCode ierr;
203 
204   PetscFunctionBegin;
205   if (scall == MAT_INITIAL_MATRIX){
206     ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr);
207   }
208   ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr);
209   PetscFunctionReturn(0);
210 }
211 
212 typedef struct {
213   Mat         workB;
214   PetscScalar *rvalues,*svalues;
215   MPI_Request *rwaits,*swaits;
216 } MPIAIJ_MPIDense;
217 
218 #undef __FUNCT__
219 #define __FUNCT__ "MPIAIJ_MPIDenseDestroy"
220 PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx)
221 {
222   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
223   PetscErrorCode  ierr;
224 
225   PetscFunctionBegin;
226   ierr = MatDestroy(&contents->workB);CHKERRQ(ierr);
227   ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr);
228   ierr = PetscFree(contents);CHKERRQ(ierr);
229   PetscFunctionReturn(0);
230 }
231 
232 #undef __FUNCT__
233 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense"
234 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
235 {
236   PetscErrorCode         ierr;
237   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*) A->data;
238   PetscInt               nz = aij->B->cmap->n;
239   PetscContainer         container;
240   MPIAIJ_MPIDense        *contents;
241   VecScatter             ctx = aij->Mvctx;
242   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
243   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
244   PetscInt               m=A->rmap->n,n=B->cmap->n;
245 
246   PetscFunctionBegin;
247   ierr = MatCreate(((PetscObject)B)->comm,C);CHKERRQ(ierr);
248   ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr);
249   ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr);
250   ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
251   ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
252   (*C)->ops->matmult = MatMatMult_MPIAIJ_MPIDense;
253 
254   ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr);
255   /* Create work matrix used to store off processor rows of B needed for local product */
256   ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);CHKERRQ(ierr);
257   /* Create work arrays needed */
258   ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues,
259                       B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues,
260                       from->n,MPI_Request,&contents->rwaits,
261                       to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr);
262 
263   ierr = PetscContainerCreate(((PetscObject)A)->comm,&container);CHKERRQ(ierr);
264   ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr);
265   ierr = PetscContainerSetUserDestroy(container,MPIAIJ_MPIDenseDestroy);CHKERRQ(ierr);
266   ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr);
267   ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
268   PetscFunctionReturn(0);
269 }
270 
271 #undef __FUNCT__
272 #define __FUNCT__ "MatMPIDenseScatter"
273 /*
274     Performs an efficient scatter on the rows of B needed by this process; this is
275     a modification of the VecScatterBegin_() routines.
276 */
277 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
278 {
279   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*)A->data;
280   PetscErrorCode         ierr;
281   PetscScalar            *b,*w,*svalues,*rvalues;
282   VecScatter             ctx = aij->Mvctx;
283   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
284   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
285   PetscInt               i,j,k;
286   PetscInt               *sindices,*sstarts,*rindices,*rstarts;
287   PetscMPIInt            *sprocs,*rprocs,nrecvs;
288   MPI_Request            *swaits,*rwaits;
289   MPI_Comm               comm = ((PetscObject)A)->comm;
290   PetscMPIInt            tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n;
291   MPI_Status             status;
292   MPIAIJ_MPIDense        *contents;
293   PetscContainer         container;
294   Mat                    workB;
295 
296   PetscFunctionBegin;
297   ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr);
298   if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit");
299   ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr);
300 
301   workB = *outworkB = contents->workB;
302   if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n);
303   sindices  = to->indices;
304   sstarts   = to->starts;
305   sprocs    = to->procs;
306   swaits    = contents->swaits;
307   svalues   = contents->svalues;
308 
309   rindices  = from->indices;
310   rstarts   = from->starts;
311   rprocs    = from->procs;
312   rwaits    = contents->rwaits;
313   rvalues   = contents->rvalues;
314 
315   ierr = MatGetArray(B,&b);CHKERRQ(ierr);
316   ierr = MatGetArray(workB,&w);CHKERRQ(ierr);
317 
318   for (i=0; i<from->n; i++) {
319     ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
320   }
321 
322   for (i=0; i<to->n; i++) {
323     /* pack a message at a time */
324     CHKMEMQ;
325     for (j=0; j<sstarts[i+1]-sstarts[i]; j++){
326       for (k=0; k<ncols; k++) {
327         svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
328       }
329     }
330     CHKMEMQ;
331     ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
332   }
333 
334   nrecvs = from->n;
335   while (nrecvs) {
336     ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr);
337     nrecvs--;
338     /* unpack a message at a time */
339     CHKMEMQ;
340     for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){
341       for (k=0; k<ncols; k++) {
342         w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
343       }
344     }
345     CHKMEMQ;
346   }
347   if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);}
348 
349   ierr = MatRestoreArray(B,&b);CHKERRQ(ierr);
350   ierr = MatRestoreArray(workB,&w);CHKERRQ(ierr);
351   ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
352   ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
353   PetscFunctionReturn(0);
354 }
355 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat);
356 
357 #undef __FUNCT__
358 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense"
359 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
360 {
361   PetscErrorCode       ierr;
362   Mat_MPIAIJ           *aij = (Mat_MPIAIJ*)A->data;
363   Mat_MPIDense         *bdense = (Mat_MPIDense*)B->data;
364   Mat_MPIDense         *cdense = (Mat_MPIDense*)C->data;
365   Mat                  workB;
366 
367   PetscFunctionBegin;
368 
369   /* diagonal block of A times all local rows of B*/
370   ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr);
371 
372   /* get off processor parts of B needed to complete the product */
373   ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr);
374 
375   /* off-diagonal block of A times nonlocal rows of B */
376   ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr);
377   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
378   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
379   PetscFunctionReturn(0);
380 }
381 
382