xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision d44834fb5aa6ab55eaa2e18f09dfd00e40aa4639)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include "src/mat/impls/aij/seq/aij.h"   /*I "petscmat.h" I*/
8 #include "src/mat/utils/freespace.h"
9 #include "src/mat/impls/aij/mpi/mpiaij.h"
10 #include "petscbt.h"
11 
12 #undef __FUNCT__
13 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
14 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
15 {
16   PetscErrorCode       ierr;
17   Mat_MatMatMultMPI    *mult;
18   PetscObjectContainer container;
19 
20   PetscFunctionBegin;
21   if (scall == MAT_INITIAL_MATRIX){
22     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
23     /* destroy the container 'Mat_MatMatMultMPI' in case that P is attached to it */
24     ierr = PetscObjectContainerDestroy_Mat_MatMatMultMPI((PetscObject)P);CHKERRQ(ierr);
25     /* create the container 'Mat_MatMatMultMPI' and attach it to P */
26     ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr);
27     ierr = PetscMemzero(mult,sizeof(Mat_MatMatMultMPI));CHKERRQ(ierr);
28     mult->B_loc=PETSC_NULL; mult->B_oth=PETSC_NULL;
29     mult->abi=PETSC_NULL; mult->abj=PETSC_NULL;
30     mult->abnz_max = 0; /* symbolic A*P is not done yet */
31 
32     /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
33     ierr = MatGetBrowsOfAoCols(A,P,scall,&mult->startsj,&mult->bufa,&mult->B_oth);CHKERRQ(ierr);
34 
35     /* get P_loc by taking all local rows of P */
36     ierr = MatGetLocalMat(P,scall,&mult->B_loc);CHKERRQ(ierr);
37 
38     /* attach the container 'Mat_MatMatMultMPI' to P */
39     P->ops->destroy  = MatDestroy_MPIAIJ_MatMatMult;
40     ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
41     ierr = PetscObjectContainerSetPointer(container,mult);CHKERRQ(ierr);
42     ierr = PetscObjectCompose((PetscObject)P,"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr);
43 
44     /* now, compute symbolic local P^T*A*P */
45     ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */
46     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
47   } else if (scall == MAT_REUSE_MATRIX){
48     ierr = PetscObjectQuery((PetscObject)P,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
49     if (container) {
50       ierr  = PetscObjectContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
51     } else {
52       SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
53     }
54     /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
55     ierr = MatGetBrowsOfAoCols(A,P,scall,&mult->startsj,&mult->bufa,&mult->B_oth);CHKERRQ(ierr);
56     /* get P_loc by taking all local rows of P */
57     ierr = MatGetLocalMat(P,scall,&mult->B_loc);CHKERRQ(ierr);
58   } else {
59     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",scall);
60   }
61   /* now, compute numeric local P^T*A*P */
62   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
63   ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr);
64   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
65 
66   PetscFunctionReturn(0);
67 }
68 
69 #undef __FUNCT__
70 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
71 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
72 {
73   PetscErrorCode       ierr;
74   Mat                  P_loc,P_oth,B_mpi;
75   Mat_MatMatMultMPI    *ap;
76   PetscObjectContainer container;
77   FreeSpaceList        free_space=PETSC_NULL,current_space=PETSC_NULL;
78   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
79   Mat_SeqAIJ           *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
80   Mat_SeqAIJ           *p_loc,*p_oth;
81   PetscInt             *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
82   PetscInt             *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,nnz;
83   PetscInt             nlnk,*lnk,*owners_co,*coi,*coj,i,k,pnz,row;
84   PetscInt             am=A->m,pN=P->N,pn=P->n;
85   PetscBT              lnkbt;
86   MPI_Comm             comm=A->comm;
87   PetscMPIInt          size,rank,tag,*len_si,*len_s,*len_ri;
88   PetscInt             **buf_rj,**buf_ri,**buf_ri_k;
89   PetscInt             len,proc,*dnz,*onz,*owners;
90   PetscInt             nzi,*bi,*bj;
91   PetscInt             nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
92   MPI_Request          *swaits,*rwaits;
93   MPI_Status           *sstatus,rstatus;
94   Mat_Merge_SeqsToMPI  *merge;
95   PetscInt             *api,*apj,*Jptr,apnz,*prmap=p->garray,pon;
96   PetscMPIInt          j;
97 
98   PetscFunctionBegin;
99   ierr = PetscObjectQuery((PetscObject)P,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
100   if (container) {
101     ierr  = PetscObjectContainerGetPointer(container,(void **)&ap);CHKERRQ(ierr);
102   } else {
103     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
104   }
105   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
106   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
107 
108   /* get data from P_loc and P_oth */
109   P_loc=ap->B_loc; P_oth=ap->B_oth;
110   p_loc=(Mat_SeqAIJ*)P_loc->data; p_oth=(Mat_SeqAIJ*)P_oth->data;
111   pi_loc=p_loc->i; pj_loc=p_loc->j; pi_oth=p_oth->i; pj_oth=p_oth->j;
112 
113   /* first, compute symbolic AP = A_loc*P */
114   /*--------------------------------------*/
115   ierr  = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr);
116   ap->abi = api;
117   api[0] = 0;
118 
119   /* create and initialize a linked list */
120   nlnk = pN+1;
121   ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
122 
123   /* Initial FreeSpace size is fill*nnz(A) */
124   ierr = GetMoreSpace((PetscInt)(fill*(adi[am]+aoi[am])),&free_space);CHKERRQ(ierr);
125   current_space = free_space;
126 
127   for (i=0;i<am;i++) {
128     apnz = 0;
129     /* diagonal portion of A */
130     nzi = adi[i+1] - adi[i];
131     for (j=0; j<nzi; j++){
132       row = *adj++;
133       pnz = pi_loc[row+1] - pi_loc[row];
134       Jptr  = pj_loc + pi_loc[row];
135       /* add non-zero cols of P into the sorted linked list lnk */
136       ierr = PetscLLAdd(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
137       apnz += nlnk;
138     }
139     /* off-diagonal portion of A */
140     nzi = aoi[i+1] - aoi[i];
141     for (j=0; j<nzi; j++){
142       row = *aoj++;
143       pnz = pi_oth[row+1] - pi_oth[row];
144       Jptr  = pj_oth + pi_oth[row];
145       ierr = PetscLLAdd(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
146       apnz += nlnk;
147     }
148 
149     api[i+1] = api[i] + apnz;
150     if (ap->abnz_max < apnz) ap->abnz_max = apnz;
151 
152     /* if free space is not available, double the total space in the list */
153     if (current_space->local_remaining<apnz) {
154       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
155     }
156 
157     /* Copy data into free space, then initialize lnk */
158     ierr = PetscLLClean(pN,pN,apnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
159     current_space->array           += apnz;
160     current_space->local_used      += apnz;
161     current_space->local_remaining -= apnz;
162   }
163   /* Allocate space for apj, initialize apj, and */
164   /* destroy list of free space and other temporary array(s) */
165   ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ap->abj);CHKERRQ(ierr);
166   apj = ap->abj;
167   ierr = MakeSpaceContiguous(&free_space,ap->abj);CHKERRQ(ierr);
168 
169   /* determine symbolic Co=(p->B)^T*AP - send to others */
170   /*----------------------------------------------------*/
171   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
172 
173   /* then, compute symbolic Co = (p->B)^T*AP */
174   pon = (p->B)->n; /* total num of rows to be sent to other processors
175                          >= (num of nonzero rows of C_seq) - pn */
176   ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr);
177   coi[0] = 0;
178 
179   /* set initial free space to be 3*pon*max( nnz(AP) per row) */
180   nnz           = 3*pon*ap->abnz_max + 1;
181   ierr          = GetMoreSpace(nnz,&free_space);
182   current_space = free_space;
183 
184   for (i=0; i<pon; i++) {
185     nnz  = 0;
186     pnz = poti[i+1] - poti[i];
187     j     = pnz;
188     ptJ   = potj + poti[i+1];
189     while (j){/* assume cols are almost in increasing order, starting from its end saves computation */
190       j--; ptJ--;
191       row  = *ptJ; /* row of AP == col of Pot */
192       apnz = api[row+1] - api[row];
193       Jptr   = apj + api[row];
194       /* add non-zero cols of AP into the sorted linked list lnk */
195       ierr = PetscLLAdd(apnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
196       nnz += nlnk;
197     }
198 
199     /* If free space is not available, double the total space in the list */
200     if (current_space->local_remaining<nnz) {
201       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
202     }
203 
204     /* Copy data into free space, and zero out denserows */
205     ierr = PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
206     current_space->array           += nnz;
207     current_space->local_used      += nnz;
208     current_space->local_remaining -= nnz;
209     coi[i+1] = coi[i] + nnz;
210   }
211   ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr);
212   ierr = MakeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
213   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
214 
215   /* send j-array (coj) of Co to other processors */
216   /*----------------------------------------------*/
217   /* determine row ownership */
218   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
219   ierr = PetscMemzero(merge,sizeof(Mat_Merge_SeqsToMPI));CHKERRQ(ierr);
220   ierr = PetscMapCreate(comm,&merge->rowmap);CHKERRQ(ierr);
221   ierr = PetscMapSetLocalSize(merge->rowmap,pn);CHKERRQ(ierr);
222   ierr = PetscMapSetType(merge->rowmap,MAP_MPI);CHKERRQ(ierr);
223   ierr = PetscMapGetGlobalRange(merge->rowmap,&owners);CHKERRQ(ierr);
224 
225   /* determine the number of messages to send, their lengths */
226   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
227   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
228   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
229   len_s = merge->len_s;
230   merge->nsend = 0;
231 
232   ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr);
233   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
234 
235   proc = 0;
236   for (i=0; i<pon; i++){
237     while (prmap[i] >= owners[proc+1]) proc++;
238     len_si[proc]++;  /* num of rows in Co to be sent to [proc] */
239     len_s[proc] += coi[i+1] - coi[i];
240   }
241 
242   len   = 0;  /* max length of buf_si[] */
243   owners_co[0] = 0;
244   for (proc=0; proc<size; proc++){
245     owners_co[proc+1] = owners_co[proc] + len_si[proc];
246     if (len_si[proc]){
247       merge->nsend++;
248       len_si[proc] = 2*(len_si[proc] + 1);
249       len += len_si[proc];
250     }
251   }
252 
253   /* determine the number and length of messages to receive for coi and coj  */
254   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
255   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
256 
257   /* post the Irecv and Isend of coj */
258   ierr = PetscObjectGetNewTag((PetscObject)merge->rowmap,&tag);CHKERRQ(ierr);
259   ierr = PetscPostIrecvInt(comm,tag,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
260 
261   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr);
262 
263   for (proc=0, k=0; proc<size; proc++){
264     if (!len_s[proc]) continue;
265     i = owners_co[proc];
266     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tag,comm,swaits+k);CHKERRQ(ierr);
267     k++;
268   }
269 
270   /* receives and sends of coj are complete */
271   ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr);
272   i = merge->nrecv;
273   while (i--) {
274     ierr = MPI_Waitany(merge->nrecv,rwaits,&j,&rstatus);CHKERRQ(ierr);
275   }
276   ierr = PetscFree(rwaits);CHKERRQ(ierr);
277   if (merge->nsend){
278     ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);
279   }
280 
281   /* send and recv coi */
282   /*-------------------*/
283   ierr = PetscPostIrecvInt(comm,tag,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
284 
285   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
286   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
287   for (proc=0,k=0; proc<size; proc++){
288     if (!len_s[proc]) continue;
289     /* form outgoing message for i-structure:
290          buf_si[0]:                 nrows to be sent
291                [1:nrows]:           row index (global)
292                [nrows+1:2*nrows+1]: i-structure index
293     */
294     /*-------------------------------------------*/
295     nrows = len_si[proc]/2 - 1;
296     buf_si_i    = buf_si + nrows+1;
297     buf_si[0]   = nrows;
298     buf_si_i[0] = 0;
299     nrows = 0;
300     for (i=owners_co[proc]; i<owners_co[proc+1]; i++){
301       nzi = coi[i+1] - coi[i];
302       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
303       buf_si[nrows+1] =prmap[i] -owners[proc]; /* local row index */
304       nrows++;
305     }
306     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tag,comm,swaits+k);CHKERRQ(ierr);
307     k++;
308     buf_si += len_si[proc];
309   }
310   i = merge->nrecv;
311   while (i--) {
312     ierr = MPI_Waitany(merge->nrecv,rwaits,&j,&rstatus);CHKERRQ(ierr);
313   }
314   ierr = PetscFree(rwaits);CHKERRQ(ierr);
315   if (merge->nsend){
316     ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);
317   }
318 
319   ierr = PetscLogInfo((PetscObject)A,"MatMerge_SeqsToMPI: nsend: %d, nrecv: %d\n",merge->nsend,merge->nrecv);CHKERRQ(ierr);
320   for (i=0; i<merge->nrecv; i++){
321     ierr = PetscLogInfo((PetscObject)A,"MatMerge_SeqsToMPI:   recv len_ri=%d, len_rj=%d from [%d]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr);
322   }
323 
324   ierr = PetscFree(len_si);CHKERRQ(ierr);
325   ierr = PetscFree(len_ri);CHKERRQ(ierr);
326   ierr = PetscFree(swaits);CHKERRQ(ierr);
327   ierr = PetscFree(sstatus);CHKERRQ(ierr);
328   ierr = PetscFree(buf_s);CHKERRQ(ierr);
329 
330   /* compute the local portion of C (mpi mat) */
331   /*------------------------------------------*/
332   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
333 
334   /* allocate bi array and free space for accumulating nonzero column info */
335   ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
336   bi[0] = 0;
337 
338   /* set initial free space to be 3*pn*max( nnz(AP) per row) */
339   nnz           = 3*pn*ap->abnz_max + 1;
340   ierr          = GetMoreSpace(nnz,&free_space);
341   current_space = free_space;
342 
343   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
344   nextrow = buf_ri_k + merge->nrecv;
345   nextci  = nextrow + merge->nrecv;
346   for (k=0; k<merge->nrecv; k++){
347     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
348     nrows       = *buf_ri_k[k];
349     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
350     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
351   }
352   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
353   for (i=0; i<pn; i++) {
354     /* add pdt[i,:]*AP into lnk */
355     nnz = 0;
356     pnz  = pdti[i+1] - pdti[i];
357     j    = pnz;
358     ptJ  = pdtj + pdti[i+1];
359     while (j){/* assume cols are almost in increasing order, starting from its end saves computation */
360       j--; ptJ--;
361       row  = *ptJ; /* row of AP == col of Pt */
362       apnz = api[row+1] - api[row];
363       Jptr   = apj + api[row];
364       /* add non-zero cols of AP into the sorted linked list lnk */
365       ierr = PetscLLAdd(apnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
366       nnz += nlnk;
367     }
368     /* add received col data into lnk */
369     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
370       if (i == *nextrow[k]) { /* i-th row */
371         nzi = *(nextci[k]+1) - *nextci[k];
372         Jptr  = buf_rj[k] + *nextci[k];
373         ierr = PetscLLAdd(nzi,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
374         nnz += nlnk;
375         nextrow[k]++; nextci[k]++;
376       }
377     }
378 
379     /* if free space is not available, make more free space */
380     if (current_space->local_remaining<nnz) {
381       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
382     }
383     /* copy data into free space, then initialize lnk */
384     ierr = PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
385     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
386     current_space->array           += nnz;
387     current_space->local_used      += nnz;
388     current_space->local_remaining -= nnz;
389     bi[i+1] = bi[i] + nnz;
390   }
391   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
392   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
393 
394   ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
395   ierr = MakeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
396   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
397 
398   /* create symbolic parallel matrix B_mpi */
399   /*---------------------------------------*/
400   ierr = MatCreate(comm,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE,&B_mpi);CHKERRQ(ierr);
401   ierr = MatSetType(B_mpi,MATMPIAIJ);CHKERRQ(ierr);
402   ierr = MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);CHKERRQ(ierr);
403   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
404 
405   /* B_mpi is not ready for use - assembly will be done by MatPtAPNumeric() */
406   B_mpi->assembled     = PETSC_FALSE;
407   B_mpi->ops->destroy  = MatDestroy_MPIAIJ_SeqsToMPI;
408   merge->bi            = bi;
409   merge->bj            = bj;
410   merge->coi           = coi;
411   merge->coj           = coj;
412   merge->buf_ri        = buf_ri;
413   merge->buf_rj        = buf_rj;
414   merge->owners_co     = owners_co;
415 
416   /* attach the supporting struct to B_mpi for reuse */
417   ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
418   ierr = PetscObjectContainerSetPointer(container,merge);CHKERRQ(ierr);
419   ierr = PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);CHKERRQ(ierr);
420   *C = B_mpi;
421 
422   PetscFunctionReturn(0);
423 }
424 
425 #undef __FUNCT__
426 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
427 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
428 {
429   PetscErrorCode       ierr;
430   Mat_Merge_SeqsToMPI  *merge;
431   Mat_MatMatMultMPI    *ap;
432   PetscObjectContainer cont_merge,cont_ptap;
433   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
434   Mat_SeqAIJ     *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
435   Mat_SeqAIJ     *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
436   Mat_SeqAIJ     *p_loc,*p_oth;
437   PetscInt       *adi=ad->i,*aoi=ao->i,*adj=ad->j,*aoj=ao->j,*apJ,nextp,flops=0;
438   PetscInt       *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
439   PetscInt       i,j,k,anz,pnz,apnz,nextap,row,*cj;
440   MatScalar      *ada=ad->a,*aoa=ao->a,*apa,*pa,*ca,*pa_loc,*pa_oth;
441   PetscInt       am=A->m,cm=C->m,pon=(p->B)->n;
442   MPI_Comm       comm=C->comm;
443   PetscMPIInt    size,rank,taga,*len_s;
444   PetscInt       *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
445   PetscInt       **buf_ri,**buf_rj;
446   PetscInt       cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
447   MPI_Request    *s_waits,*r_waits;
448   MPI_Status     *status;
449   MatScalar      **abuf_r,*ba_i,*pA,*coa,*ba;
450   PetscInt       *api,*apj,*coi,*coj;
451   PetscInt       *poJ=po->j,*pdJ=pd->j,pcstart=p->cstart,pcend=p->cend;
452 
453   PetscFunctionBegin;
454   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
455   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
456 
457   ierr = PetscObjectQuery((PetscObject)C,"MatMergeSeqsToMPI",(PetscObject *)&cont_merge);CHKERRQ(ierr);
458   if (cont_merge) {
459     ierr  = PetscObjectContainerGetPointer(cont_merge,(void **)&merge);CHKERRQ(ierr);
460   } else {
461     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix C does not posses an object container");
462   }
463   ierr = PetscObjectQuery((PetscObject)P,"Mat_MatMatMultMPI",(PetscObject *)&cont_ptap);CHKERRQ(ierr);
464   if (cont_ptap) {
465     ierr  = PetscObjectContainerGetPointer(cont_ptap,(void **)&ap);CHKERRQ(ierr);
466   } else {
467     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
468   }
469 
470   /* get data from symbolic products */
471   p_loc = (Mat_SeqAIJ*)(ap->B_loc)->data;
472   p_oth = (Mat_SeqAIJ*)(ap->B_oth)->data;
473   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a,pA=pa_loc;
474   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
475 
476   coi = merge->coi; coj = merge->coj;
477   ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr);
478   ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr);
479 
480   bi = merge->bi; bj = merge->bj;
481   ierr  = PetscMapGetGlobalRange(merge->rowmap,&owners);CHKERRQ(ierr);
482   ierr  = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr);
483   ierr  = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr);
484 
485   /* get data from symbolic A*P */
486   ierr = PetscMalloc((ap->abnz_max+1)*sizeof(MatScalar),&apa);CHKERRQ(ierr);
487   ierr = PetscMemzero(apa,ap->abnz_max*sizeof(MatScalar));CHKERRQ(ierr);
488 
489   /* compute numeric C_seq=P_loc^T*A_loc*P */
490   api = ap->abi; apj = ap->abj;
491   for (i=0;i<am;i++) {
492     /* form i-th sparse row of A*P */
493     apnz = api[i+1] - api[i];
494     apJ  = apj + api[i];
495     /* diagonal portion of A */
496     anz  = adi[i+1] - adi[i];
497     for (j=0;j<anz;j++) {
498       row = *adj++;
499       pnz = pi_loc[row+1] - pi_loc[row];
500       pj  = pj_loc + pi_loc[row];
501       pa  = pa_loc + pi_loc[row];
502       nextp = 0;
503       for (k=0; nextp<pnz; k++) {
504         if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
505           apa[k] += (*ada)*pa[nextp++];
506         }
507       }
508       flops += 2*pnz;
509       ada++;
510     }
511     /* off-diagonal portion of A */
512     anz  = aoi[i+1] - aoi[i];
513     for (j=0; j<anz; j++) {
514       row = *aoj++;
515       pnz = pi_oth[row+1] - pi_oth[row];
516       pj  = pj_oth + pi_oth[row];
517       pa  = pa_oth + pi_oth[row];
518       nextp = 0;
519       for (k=0; nextp<pnz; k++) {
520         if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
521           apa[k] += (*aoa)*pa[nextp++];
522         }
523       }
524       flops += 2*pnz;
525       aoa++;
526     }
527 
528     /* Compute P_loc[i,:]^T*AP[i,:] using outer product */
529     pnz = pi_loc[i+1] - pi_loc[i];
530     for (j=0; j<pnz; j++) {
531       nextap = 0;
532       row    = *pJ++; /* global index */
533       if (row < pcstart || row >=pcend) { /* put the value into Co */
534         cj  = coj + coi[*poJ];
535         ca  = coa + coi[*poJ++];
536       } else {                            /* put the value into Cd */
537         cj   = bj + bi[*pdJ];
538         ca   = ba + bi[*pdJ++];
539       }
540       for (k=0; nextap<apnz; k++) {
541         if (cj[k]==apJ[nextap]) ca[k] += (*pA)*apa[nextap++];
542       }
543       flops += 2*apnz;
544       pA++;
545     }
546 
547     /* zero the current row info for A*P */
548     ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
549   }
550   ierr = PetscFree(apa);CHKERRQ(ierr);
551 
552   /* send and recv matrix values */
553   /*-----------------------------*/
554   buf_ri = merge->buf_ri;
555   buf_rj = merge->buf_rj;
556   len_s  = merge->len_s;
557   ierr = PetscObjectGetNewTag((PetscObject)merge->rowmap,&taga);CHKERRQ(ierr);
558   ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
559 
560   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&s_waits);CHKERRQ(ierr);
561   for (proc=0,k=0; proc<size; proc++){
562     if (!len_s[proc]) continue;
563     i = merge->owners_co[proc];
564     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
565     k++;
566   }
567   ierr = PetscMalloc(size*sizeof(MPI_Status),&status);CHKERRQ(ierr);
568   ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);
569   ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);
570   ierr = PetscFree(status);CHKERRQ(ierr);
571 
572   ierr = PetscFree(s_waits);CHKERRQ(ierr);
573   ierr = PetscFree(r_waits);CHKERRQ(ierr);
574   ierr = PetscFree(coa);CHKERRQ(ierr);
575 
576   /* insert local and received values into C */
577   /*-----------------------------------------*/
578   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
579   nextrow   = buf_ri_k + merge->nrecv;
580   nextci = nextrow + merge->nrecv;
581 
582   for (k=0; k<merge->nrecv; k++){
583     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
584     nrows       = *(buf_ri_k[k]);
585     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
586     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
587   }
588 
589   for (i=0; i<cm; i++) {
590     row = owners[rank] + i; /* global row index of C_seq */
591     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
592     ba_i = ba + bi[i];
593     bnz  = bi[i+1] - bi[i];
594     /* add received vals into ba */
595     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
596       /* i-th row */
597       if (i == *nextrow[k]) {
598         cnz = *(nextci[k]+1) - *nextci[k];
599         cj  = buf_rj[k] + *(nextci[k]);
600         ca  = abuf_r[k] + *(nextci[k]);
601         nextcj = 0;
602         for (j=0; nextcj<cnz; j++){
603           if (bj_i[j] == cj[nextcj]){ /* bcol == ccol */
604             ba_i[j] += ca[nextcj++];
605           }
606         }
607         nextrow[k]++; nextci[k]++;
608       }
609     }
610     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
611     flops += 2*cnz;
612   }
613   ierr = MatSetOption(C,MAT_COLUMNS_SORTED);CHKERRQ(ierr); /* -cause delay? */
614   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
615   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
616 
617   ierr = PetscFree(ba);CHKERRQ(ierr);
618   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
619   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
620   ierr = PetscLogFlops(flops);CHKERRQ(ierr);
621 
622   PetscFunctionReturn(0);
623 }
624