xref: /petsc/src/mat/impls/aij/mpi/mpimatmatmult.c (revision 0700a8246d308f50502909ba325e6169d3ee27eb)
1 #define PETSCMAT_DLL
2 
3 /*
4   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
5           C = A * B
6 */
7 #include "../src/mat/impls/aij/seq/aij.h" /*I "petscmat.h" I*/
8 #include "../src/mat/utils/freespace.h"
9 #include "../src/mat/impls/aij/mpi/mpiaij.h"
10 #include "petscbt.h"
11 #include "../src/mat/impls/dense/mpi/mpidense.h"
12 
13 #undef __FUNCT__
14 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ"
15 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
16 {
17   PetscErrorCode ierr;
18 
19   PetscFunctionBegin;
20   if (scall == MAT_INITIAL_MATRIX){
21     ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */
22   } else if (scall == MAT_REUSE_MATRIX){
23     ierr = MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);CHKERRQ(ierr);
24   } else {
25     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
26   }
27   PetscFunctionReturn(0);
28 }
29 
30 #undef __FUNCT__
31 #define __FUNCT__ "PetscContainerDestroy_Mat_MatMatMultMPI"
32 PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr)
33 {
34   PetscErrorCode       ierr;
35   Mat_MatMatMultMPI    *mult=(Mat_MatMatMultMPI*)ptr;
36 
37   PetscFunctionBegin;
38   ierr = PetscFree2(mult->startsj,mult->startsj_r);CHKERRQ(ierr);
39   ierr = PetscFree(mult->bufa);CHKERRQ(ierr);
40   if (mult->isrowa){ierr = ISDestroy(mult->isrowa);CHKERRQ(ierr);}
41   if (mult->isrowb){ierr = ISDestroy(mult->isrowb);CHKERRQ(ierr);}
42   if (mult->iscolb){ierr = ISDestroy(mult->iscolb);CHKERRQ(ierr);}
43   if (mult->C_seq){ierr = MatDestroy(mult->C_seq);CHKERRQ(ierr);}
44   if (mult->A_loc){ierr = MatDestroy(mult->A_loc);CHKERRQ(ierr); }
45   if (mult->B_seq){ierr = MatDestroy(mult->B_seq);CHKERRQ(ierr);}
46   if (mult->B_loc){ierr = MatDestroy(mult->B_loc);CHKERRQ(ierr);}
47   if (mult->B_oth){ierr = MatDestroy(mult->B_oth);CHKERRQ(ierr);}
48   ierr = PetscFree(mult->abi);CHKERRQ(ierr);
49   ierr = PetscFree(mult->abj);CHKERRQ(ierr);
50   ierr = PetscFree(mult);CHKERRQ(ierr);
51   PetscFunctionReturn(0);
52 }
53 
54 EXTERN PetscErrorCode MatDestroy_AIJ(Mat);
55 #undef __FUNCT__
56 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult"
57 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
58 {
59   PetscErrorCode     ierr;
60   PetscContainer     container;
61   Mat_MatMatMultMPI  *mult=PETSC_NULL;
62 
63   PetscFunctionBegin;
64   ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
65   if (container) {
66     ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
67   } else {
68     SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
69   }
70   A->ops->destroy = mult->MatDestroy;
71   ierr = PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);CHKERRQ(ierr);
72   ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
73   ierr = PetscContainerDestroy(container);CHKERRQ(ierr);
74   PetscFunctionReturn(0);
75 }
76 
77 #undef __FUNCT__
78 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult"
79 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M)
80 {
81   PetscErrorCode     ierr;
82   Mat_MatMatMultMPI  *mult;
83   PetscContainer     container;
84 
85   PetscFunctionBegin;
86   ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
87   if (container) {
88     ierr  = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
89   } else {
90     SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
91   }
92   /* Note: the container is not duplicated, because it requires deep copying of
93      several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()).
94      These data sets are only used for repeated calling of MatMatMultNumeric().
95      *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */
96   ierr = (*mult->MatDuplicate)(A,op,M);CHKERRQ(ierr);
97   (*M)->ops->destroy   = mult->MatDestroy;   /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */
98   (*M)->ops->duplicate = mult->MatDuplicate; /* = MatDuplicate_MPIAIJ */
99   PetscFunctionReturn(0);
100 }
101 
102 #undef __FUNCT__
103 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ"
104 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
105 {
106   PetscErrorCode     ierr;
107   PetscInt           start,end;
108   Mat_MatMatMultMPI  *mult;
109   PetscContainer     container;
110 
111   PetscFunctionBegin;
112   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
113     SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
114   }
115   ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr);
116 
117   /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */
118   ierr = MatGetBrowsOfAcols(A,B,MAT_INITIAL_MATRIX,&mult->isrowb,&mult->iscolb,&mult->brstart,&mult->B_seq);CHKERRQ(ierr);
119 
120   /*  create a seq matrix A_seq = submatrix of A by taking all local rows of A */
121   start = A->rmap->rstart; end = A->rmap->rend;
122   ierr = ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&mult->isrowa);CHKERRQ(ierr);
123   ierr = MatGetLocalMatCondensed(A,MAT_INITIAL_MATRIX,&mult->isrowa,&mult->isrowb,&mult->A_loc);CHKERRQ(ierr);
124 
125   /* compute C_seq = A_seq * B_seq */
126   ierr = MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_INITIAL_MATRIX,fill,&mult->C_seq);CHKERRQ(ierr);
127 
128   /* create mpi matrix C by concatinating C_seq */
129   ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); /* prevent C_seq being destroyed by MatMerge() */
130   ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_INITIAL_MATRIX,C);CHKERRQ(ierr);
131 
132   /* attach the supporting struct to C for reuse of symbolic C */
133   ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
134   ierr = PetscContainerSetPointer(container,mult);CHKERRQ(ierr);
135   ierr = PetscObjectCompose((PetscObject)(*C),"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr);
136   ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr);
137   mult->MatDestroy   = (*C)->ops->destroy;
138   mult->MatDuplicate = (*C)->ops->duplicate;
139 
140   (*C)->ops->destroy   = MatDestroy_MPIAIJ_MatMatMult;
141   (*C)->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
142   PetscFunctionReturn(0);
143 }
144 
145 /* This routine is called ONLY in the case of reusing previously computed symbolic C */
146 #undef __FUNCT__
147 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ"
148 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C)
149 {
150   PetscErrorCode     ierr;
151   Mat                *seq;
152   Mat_MatMatMultMPI  *mult;
153   PetscContainer     container;
154 
155   PetscFunctionBegin;
156   ierr = PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
157   if (container) {
158     ierr  = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
159   } else {
160     SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
161   }
162 
163   seq = &mult->B_seq;
164   ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr);
165   mult->B_seq = *seq;
166 
167   seq = &mult->A_loc;
168   ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr);
169   mult->A_loc = *seq;
170 
171   ierr = MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);CHKERRQ(ierr);
172 
173   ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr);
174   ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr);
175   PetscFunctionReturn(0);
176 }
177 
178 #undef __FUNCT__
179 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense"
180 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
181 {
182   PetscErrorCode ierr;
183 
184   PetscFunctionBegin;
185   if (scall == MAT_INITIAL_MATRIX){
186     ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr);
187   }
188   ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr);
189   PetscFunctionReturn(0);
190 }
191 
192 typedef struct {
193   Mat         workB;
194   PetscScalar *rvalues,*svalues;
195   MPI_Request *rwaits,*swaits;
196 } MPIAIJ_MPIDense;
197 
198 PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx)
199 {
200   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
201   PetscErrorCode  ierr;
202 
203   PetscFunctionBegin;
204   if (contents->workB) {ierr = MatDestroy(contents->workB);CHKERRQ(ierr);}
205   ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr);
206   ierr = PetscFree(contents);CHKERRQ(ierr);
207   PetscFunctionReturn(0);
208 }
209 
210 #undef __FUNCT__
211 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense"
212 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
213 {
214   PetscErrorCode         ierr;
215   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*) A->data;
216   PetscInt               nz = aij->B->cmap->n;
217   PetscContainer         cont;
218   MPIAIJ_MPIDense        *contents;
219   VecScatter             ctx = aij->Mvctx;
220   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
221   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
222   PetscInt               m=A->rmap->n,n=B->cmap->n;
223 
224   PetscFunctionBegin;
225   ierr = MatCreate(((PetscObject)B)->comm,C);CHKERRQ(ierr);
226   ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr);
227   ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr);
228   ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
229   ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
230 
231   ierr = PetscContainerCreate(((PetscObject)A)->comm,&cont);CHKERRQ(ierr);
232   ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr);
233   ierr = PetscContainerSetPointer(cont,contents);CHKERRQ(ierr);
234   ierr = PetscContainerSetUserDestroy(cont,MPIAIJ_MPIDenseDestroy);CHKERRQ(ierr);
235 
236   /* Create work matrix used to store off processor rows of B needed for local product */
237   ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);CHKERRQ(ierr);
238 
239   /* Create work arrays needed */
240   ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues,
241                       B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues,
242                       from->n,MPI_Request,&contents->rwaits,
243                       to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr);
244 
245   ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)cont);CHKERRQ(ierr);
246   ierr = PetscContainerDestroy(cont);CHKERRQ(ierr);
247   PetscFunctionReturn(0);
248 }
249 
250 /*
251     Performs an efficient scatter on the rows of B needed by this process; this is
252     a modification of the VecScatterBegin_() routines.
253 */
254 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
255 {
256   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*)A->data;
257   PetscErrorCode         ierr;
258   PetscScalar            *b,*w,*svalues,*rvalues;
259   VecScatter             ctx = aij->Mvctx;
260   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
261   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
262   PetscInt               i,j,k;
263   PetscInt               *sindices,*sstarts,*rindices,*rstarts;
264   PetscMPIInt            *sprocs,*rprocs,nrecvs;
265   MPI_Request            *swaits,*rwaits;
266   MPI_Comm               comm = ((PetscObject)A)->comm;
267   PetscMPIInt            tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n;
268   MPI_Status             status;
269   MPIAIJ_MPIDense        *contents;
270   PetscContainer         cont;
271   Mat                    workB;
272 
273   PetscFunctionBegin;
274   ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&cont);CHKERRQ(ierr);
275   ierr = PetscContainerGetPointer(cont,(void**)&contents);CHKERRQ(ierr);
276 
277   workB = *outworkB = contents->workB;
278   if (nrows != workB->rmap->n) SETERRQ2(PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n);
279   sindices  = to->indices;
280   sstarts   = to->starts;
281   sprocs    = to->procs;
282   swaits    = contents->swaits;
283   svalues   = contents->svalues;
284 
285   rindices  = from->indices;
286   rstarts   = from->starts;
287   rprocs    = from->procs;
288   rwaits    = contents->rwaits;
289   rvalues   = contents->rvalues;
290 
291   ierr = MatGetArray(B,&b);CHKERRQ(ierr);
292   ierr = MatGetArray(workB,&w);CHKERRQ(ierr);
293 
294   for (i=0; i<from->n; i++) {
295     ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
296   }
297 
298   for (i=0; i<to->n; i++) {
299     /* pack a message at a time */
300     CHKMEMQ;
301     for (j=0; j<sstarts[i+1]-sstarts[i]; j++){
302       for (k=0; k<ncols; k++) {
303         svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
304       }
305     }
306     CHKMEMQ;
307     ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
308   }
309 
310   nrecvs = from->n;
311   while (nrecvs) {
312     ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr);
313     nrecvs--;
314     /* unpack a message at a time */
315     CHKMEMQ;
316     for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){
317       for (k=0; k<ncols; k++) {
318         w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
319       }
320     }
321     CHKMEMQ;
322   }
323   if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr)}
324 
325   ierr = MatRestoreArray(B,&b);CHKERRQ(ierr);
326   ierr = MatRestoreArray(workB,&w);CHKERRQ(ierr);
327   ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
328   ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
329   PetscFunctionReturn(0);
330 }
331 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat);
332 
333 #undef __FUNCT__
334 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense"
335 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
336 {
337   PetscErrorCode       ierr;
338   Mat_MPIAIJ           *aij = (Mat_MPIAIJ*)A->data;
339   Mat_MPIDense         *bdense = (Mat_MPIDense*)B->data;
340   Mat_MPIDense         *cdense = (Mat_MPIDense*)C->data;
341   Mat                  workB;
342 
343   PetscFunctionBegin;
344 
345   /* diagonal block of A times all local rows of B*/
346   ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr);
347 
348   /* get off processor parts of B needed to complete the product */
349   ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr);
350 
351   /* off-diagonal block of A times nonlocal rows of B */
352   ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr);
353   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
354   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
355   PetscFunctionReturn(0);
356 }
357 
358