xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision df4be7eeb7111d94d8a5a571d7e69f9c5048ca1d)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16 {
17   PetscErrorCode    ierr;
18   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
19   Mat_PtAPMPI       *ptap=a->ptap;
20   PetscBool         iascii;
21   PetscViewerFormat format;
22 
23   PetscFunctionBegin;
24   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
25   if (iascii) {
26     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
27     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
28       if (ptap->algType == 0) {
29         ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr);
30       } else if (ptap->algType == 1) {
31         ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr);
32       }
33     }
34   }
35   ierr = (ptap->view)(A,viewer);CHKERRQ(ierr);
36   PetscFunctionReturn(0);
37 }
38 
39 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
40 {
41   PetscErrorCode ierr;
42   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
43   Mat_PtAPMPI    *ptap=a->ptap;
44 
45   PetscFunctionBegin;
46   if (ptap) {
47     Mat_Merge_SeqsToMPI *merge=ptap->merge;
48     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
49     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
50     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
51     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
52     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
53     ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
54     ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
55     if (ptap->AP_loc) { /* used by alg_rap */
56       Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
57       ierr = PetscFree(ap->i);CHKERRQ(ierr);
58       ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
59       ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
60     } else { /* used by alg_ptap */
61       ierr = PetscFree(ptap->api);CHKERRQ(ierr);
62       ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
63     }
64     ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
65     ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
66     if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
67 
68     if (merge) { /* used by alg_ptap */
69       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
70       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
71       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
72       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
73       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
74       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
75       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
76       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
77       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
78       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
79       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
80       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
81       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
82       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
83     }
84 
85     ierr = ptap->destroy(A);CHKERRQ(ierr);
86     ierr = PetscFree(ptap);CHKERRQ(ierr);
87   }
88   PetscFunctionReturn(0);
89 }
90 
91 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
92 {
93   PetscErrorCode ierr;
94   PetscBool      flg;
95   MPI_Comm       comm;
96 #if !defined(PETSC_HAVE_HYPRE)
97   const char          *algTypes[2] = {"scalable","nonscalable"};
98   PetscInt            nalg=2;
99 #else
100   const char          *algTypes[3] = {"scalable","nonscalable","hypre"};
101   PetscInt            nalg=3;
102 #endif
103   PetscInt            pN=P->cmap->N,alg=1; /* set default algorithm */
104 
105   PetscFunctionBegin;
106   /* check if matrix local sizes are compatible */
107   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
108   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
109   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
110 
111   if (scall == MAT_INITIAL_MATRIX) {
112     /* pick an algorithm */
113     ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr);
114     PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */
115     ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr);
116     ierr = PetscOptionsEnd();CHKERRQ(ierr);
117 
118     if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
119       MatInfo     Ainfo,Pinfo;
120       PetscInt    nz_local;
121       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
122 
123       ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr);
124       ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr);
125       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
126 
127       if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
128       ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr);
129 
130       if (alg_scalable) {
131         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
132       }
133     }
134 
135     switch (alg) {
136     case 1:
137       /* do R=P^T locally, then C=R*A*P */
138       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
139       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
140       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
141       break;
142 #if defined(PETSC_HAVE_HYPRE)
143     case 2:
144       /* Use boomerAMGBuildCoarseOperator */
145       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
146       ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr);
147       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
148       break;
149 #endif
150     default:
151       /* do R=P^T locally, then C=R*A*P */
152       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
153       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr);
154       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
155       break;
156     }
157   }
158   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
159   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
160   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
161   PetscFunctionReturn(0);
162 }
163 
164 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
165 {
166   PetscErrorCode    ierr;
167   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
168   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
169   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
170   Mat_PtAPMPI       *ptap = c->ptap;
171   Mat               AP_loc,C_loc,C_oth;
172   PetscInt          i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz;
173   PetscScalar       *apa;
174   const PetscInt    *cols;
175   const PetscScalar *vals;
176 
177   PetscFunctionBegin;
178   ierr = MatZeroEntries(C);CHKERRQ(ierr);
179 
180   /* 1) get R = Pd^T,Ro = Po^T */
181   if (ptap->reuse == MAT_REUSE_MATRIX) {
182     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
183     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
184   }
185 
186   /* 2) get AP_loc */
187   AP_loc = ptap->AP_loc;
188   ap = (Mat_SeqAIJ*)AP_loc->data;
189 
190   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
191   /*-----------------------------------------------------*/
192   if (ptap->reuse == MAT_REUSE_MATRIX) {
193     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
194     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
195     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
196   }
197 
198   /* 2-2) compute numeric A_loc*P - dominating part */
199   /* ---------------------------------------------- */
200   /* get data from symbolic products */
201   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
202   if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
203   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
204 
205   api   = ap->i;
206   apj   = ap->j;
207   for (i=0; i<am; i++) {
208     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
209     apnz = api[i+1] - api[i];
210     apa = ap->a + api[i];
211     ierr = PetscMemzero(apa,sizeof(PetscScalar)*apnz);CHKERRQ(ierr);
212     AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
213     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
214   }
215 
216   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
217   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
218   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
219 
220   C_loc = ptap->C_loc;
221   C_oth = ptap->C_oth;
222 
223   /* add C_loc and Co to to C */
224   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
225 
226   /* C_loc -> C */
227   cm    = C_loc->rmap->N;
228   c_seq = (Mat_SeqAIJ*)C_loc->data;
229   cols = c_seq->j;
230   vals = c_seq->a;
231 
232   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
233   /* when there are no off-processor parts.  */
234   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
235   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
236   /* a table, and other, more complex stuff has to be done. */
237   if (C->assembled) {
238     C->was_assembled = PETSC_TRUE;
239     C->assembled     = PETSC_FALSE;
240   }
241   if (C->was_assembled) {
242     for (i=0; i<cm; i++) {
243       ncols = c_seq->i[i+1] - c_seq->i[i];
244       row = rstart + i;
245       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
246       cols += ncols; vals += ncols;
247     }
248   } else {
249     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
250   }
251 
252   /* Co -> C, off-processor part */
253   cm = C_oth->rmap->N;
254   c_seq = (Mat_SeqAIJ*)C_oth->data;
255   cols = c_seq->j;
256   vals = c_seq->a;
257   for (i=0; i<cm; i++) {
258     ncols = c_seq->i[i+1] - c_seq->i[i];
259     row = p->garray[i];
260     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
261     cols += ncols; vals += ncols;
262   }
263   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
264   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
265 
266   ptap->reuse = MAT_REUSE_MATRIX;
267   PetscFunctionReturn(0);
268 }
269 
270 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
271 {
272   PetscErrorCode      ierr;
273   Mat_PtAPMPI         *ptap;
274   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
275   MPI_Comm            comm;
276   PetscMPIInt         size,rank;
277   Mat                 Cmpi,P_loc,P_oth;
278   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
279   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
280   PetscInt            *lnk,i,k,pnz,row,nsend;
281   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
282   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
283   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
284   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
285   MPI_Request         *swaits,*rwaits;
286   MPI_Status          *sstatus,rstatus;
287   PetscLayout         rowmap;
288   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
289   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
290   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi;
291   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
292   PetscScalar         *apv;
293   PetscTable          ta;
294   MatType             mtype;
295 #if defined(PETSC_USE_INFO)
296   PetscReal           apfill;
297 #endif
298 
299   PetscFunctionBegin;
300   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
301   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
302   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
303 
304   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
305 
306   /* create symbolic parallel matrix Cmpi */
307   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
308   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
309   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
310 
311   /* create struct Mat_PtAPMPI and attached it to C later */
312   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
313   ptap->reuse = MAT_INITIAL_MATRIX;
314   ptap->algType = 0;
315 
316   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
317   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr);
318   /* get P_loc by taking all local rows of P */
319   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr);
320 
321   ptap->P_loc = P_loc;
322   ptap->P_oth = P_oth;
323 
324   /* (0) compute Rd = Pd^T, Ro = Po^T  */
325   /* --------------------------------- */
326   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
327   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
328 
329   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
330   /* ----------------------------------------------------------------- */
331   p_loc  = (Mat_SeqAIJ*)P_loc->data;
332   if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
333 
334   /* create and initialize a linked list */
335   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
336   MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
337   MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
338   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
339 
340   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
341 
342   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
343   if (ao) {
344     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
345   } else {
346     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
347   }
348   current_space = free_space;
349   nspacedouble  = 0;
350 
351   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
352   api[0] = 0;
353   for (i=0; i<am; i++) {
354     /* diagonal portion: Ad[i,:]*P */
355     ai = ad->i; pi = p_loc->i;
356     nzi = ai[i+1] - ai[i];
357     aj  = ad->j + ai[i];
358     for (j=0; j<nzi; j++) {
359       row  = aj[j];
360       pnz  = pi[row+1] - pi[row];
361       Jptr = p_loc->j + pi[row];
362       /* add non-zero cols of P into the sorted linked list lnk */
363       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
364     }
365     /* off-diagonal portion: Ao[i,:]*P */
366     if (ao) {
367       ai = ao->i; pi = p_oth->i;
368       nzi = ai[i+1] - ai[i];
369       aj  = ao->j + ai[i];
370       for (j=0; j<nzi; j++) {
371         row  = aj[j];
372         pnz  = pi[row+1] - pi[row];
373         Jptr = p_oth->j + pi[row];
374         ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
375       }
376     }
377     apnz     = lnk[0];
378     api[i+1] = api[i] + apnz;
379 
380     /* if free space is not available, double the total space in the list */
381     if (current_space->local_remaining<apnz) {
382       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
383       nspacedouble++;
384     }
385 
386     /* Copy data into free space, then initialize lnk */
387     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
388 
389     current_space->array           += apnz;
390     current_space->local_used      += apnz;
391     current_space->local_remaining -= apnz;
392   }
393   /* Allocate space for apj and apv, initialize apj, and */
394   /* destroy list of free space and other temporary array(s) */
395   ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
396   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
397   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
398 
399   /* Create AP_loc for reuse */
400   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
401 
402 #if defined(PETSC_USE_INFO)
403   if (ao) {
404     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
405   } else {
406     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
407   }
408   ptap->AP_loc->info.mallocs           = nspacedouble;
409   ptap->AP_loc->info.fill_ratio_given  = fill;
410   ptap->AP_loc->info.fill_ratio_needed = apfill;
411 
412   if (api[am]) {
413     ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
414     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
415   } else {
416     ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
417   }
418 #endif
419 
420   /* (2-1) compute symbolic Co = Ro*AP_loc  */
421   /* ------------------------------------ */
422   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
423 
424   /* (3) send coj of C_oth to other processors  */
425   /* ------------------------------------------ */
426   /* determine row ownership */
427   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
428   rowmap->n  = pn;
429   rowmap->bs = 1;
430   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
431   owners = rowmap->range;
432 
433   /* determine the number of messages to send, their lengths */
434   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
435   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
436   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
437 
438   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
439   coi   = c_oth->i; coj = c_oth->j;
440   con   = ptap->C_oth->rmap->n;
441   proc  = 0;
442   for (i=0; i<con; i++) {
443     while (prmap[i] >= owners[proc+1]) proc++;
444     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
445     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
446   }
447 
448   len          = 0; /* max length of buf_si[], see (4) */
449   owners_co[0] = 0;
450   nsend        = 0;
451   for (proc=0; proc<size; proc++) {
452     owners_co[proc+1] = owners_co[proc] + len_si[proc];
453     if (len_s[proc]) {
454       nsend++;
455       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
456       len         += len_si[proc];
457     }
458   }
459 
460   /* determine the number and length of messages to receive for coi and coj  */
461   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
462   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
463 
464   /* post the Irecv and Isend of coj */
465   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
466   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
467   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
468   for (proc=0, k=0; proc<size; proc++) {
469     if (!len_s[proc]) continue;
470     i    = owners_co[proc];
471     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
472     k++;
473   }
474 
475   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
476   /* ---------------------------------------- */
477   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
478   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
479 
480   /* receives coj are complete */
481   for (i=0; i<nrecv; i++) {
482     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
483   }
484   ierr = PetscFree(rwaits);CHKERRQ(ierr);
485   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
486 
487   /* add received column indices into ta to update Crmax */
488   for (k=0; k<nrecv; k++) {/* k-th received message */
489     Jptr = buf_rj[k];
490     for (j=0; j<len_r[k]; j++) {
491       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
492     }
493   }
494   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
495   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
496 
497   /* (4) send and recv coi */
498   /*-----------------------*/
499   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
500   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
501   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
502   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
503   for (proc=0,k=0; proc<size; proc++) {
504     if (!len_s[proc]) continue;
505     /* form outgoing message for i-structure:
506          buf_si[0]:                 nrows to be sent
507                [1:nrows]:           row index (global)
508                [nrows+1:2*nrows+1]: i-structure index
509     */
510     /*-------------------------------------------*/
511     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
512     buf_si_i    = buf_si + nrows+1;
513     buf_si[0]   = nrows;
514     buf_si_i[0] = 0;
515     nrows       = 0;
516     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
517       nzi = coi[i+1] - coi[i];
518       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
519       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
520       nrows++;
521     }
522     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
523     k++;
524     buf_si += len_si[proc];
525   }
526   for (i=0; i<nrecv; i++) {
527     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
528   }
529   ierr = PetscFree(rwaits);CHKERRQ(ierr);
530   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
531 
532   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
533   ierr = PetscFree(len_ri);CHKERRQ(ierr);
534   ierr = PetscFree(swaits);CHKERRQ(ierr);
535   ierr = PetscFree(buf_s);CHKERRQ(ierr);
536 
537   /* (5) compute the local portion of Cmpi      */
538   /* ------------------------------------------ */
539   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
540   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
541   current_space = free_space;
542 
543   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
544   for (k=0; k<nrecv; k++) {
545     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
546     nrows       = *buf_ri_k[k];
547     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
548     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
549   }
550 
551   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
552   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
553   for (i=0; i<pn; i++) {
554     /* add C_loc into Cmpi */
555     nzi  = c_loc->i[i+1] - c_loc->i[i];
556     Jptr = c_loc->j + c_loc->i[i];
557     ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
558 
559     /* add received col data into lnk */
560     for (k=0; k<nrecv; k++) { /* k-th received message */
561       if (i == *nextrow[k]) { /* i-th row */
562         nzi  = *(nextci[k]+1) - *nextci[k];
563         Jptr = buf_rj[k] + *nextci[k];
564         ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
565         nextrow[k]++; nextci[k]++;
566       }
567     }
568     nzi = lnk[0];
569 
570     /* copy data into free space, then initialize lnk */
571     ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr);
572     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
573   }
574   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
575   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
576   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
577 
578   /* local sizes and preallocation */
579   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
580   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
581   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
582   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
583 
584   /* members in merge */
585   ierr = PetscFree(id_r);CHKERRQ(ierr);
586   ierr = PetscFree(len_r);CHKERRQ(ierr);
587   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
588   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
589   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
590   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
591   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
592 
593   /* attach the supporting struct to Cmpi for reuse */
594   c = (Mat_MPIAIJ*)Cmpi->data;
595   c->ptap         = ptap;
596   ptap->duplicate = Cmpi->ops->duplicate;
597   ptap->destroy   = Cmpi->ops->destroy;
598   ptap->view      = Cmpi->ops->view;
599 
600   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
601   Cmpi->assembled        = PETSC_FALSE;
602   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
603   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
604   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
605   *C                     = Cmpi;
606   PetscFunctionReturn(0);
607 }
608 
609 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
610 {
611   PetscErrorCode      ierr;
612   Mat_PtAPMPI         *ptap;
613   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
614   MPI_Comm            comm;
615   PetscMPIInt         size,rank;
616   Mat                 Cmpi;
617   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
618   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
619   PetscInt            *lnk,i,k,pnz,row,nsend;
620   PetscBT             lnkbt;
621   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
622   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
623   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
624   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
625   MPI_Request         *swaits,*rwaits;
626   MPI_Status          *sstatus,rstatus;
627   PetscLayout         rowmap;
628   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
629   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
630   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
631   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
632   PetscScalar         *apv;
633   PetscTable          ta;
634   MatType             mtype;
635 #if defined(PETSC_USE_INFO)
636   PetscReal           apfill;
637 #endif
638 
639   PetscFunctionBegin;
640   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
641   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
642   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
643 
644   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
645 
646   /* create symbolic parallel matrix Cmpi */
647   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
648   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
649   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
650 
651   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
652   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
653 
654   /* create struct Mat_PtAPMPI and attached it to C later */
655   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
656   ptap->reuse = MAT_INITIAL_MATRIX;
657   ptap->algType = 1;
658 
659   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
660   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
661   /* get P_loc by taking all local rows of P */
662   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
663 
664   /* (0) compute Rd = Pd^T, Ro = Po^T  */
665   /* --------------------------------- */
666   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
667   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
668 
669   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
670   /* ----------------------------------------------------------------- */
671   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
672   if (ptap->P_oth) p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
673 
674   /* create and initialize a linked list */
675   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
676   MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
677   MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
678   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
679   /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
680 
681   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
682 
683   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
684   if (ao) {
685     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
686   } else {
687     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
688   }
689   current_space = free_space;
690   nspacedouble  = 0;
691 
692   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
693   api[0] = 0;
694   for (i=0; i<am; i++) {
695     /* diagonal portion: Ad[i,:]*P */
696     ai = ad->i; pi = p_loc->i;
697     nzi = ai[i+1] - ai[i];
698     aj  = ad->j + ai[i];
699     for (j=0; j<nzi; j++) {
700       row  = aj[j];
701       pnz  = pi[row+1] - pi[row];
702       Jptr = p_loc->j + pi[row];
703       /* add non-zero cols of P into the sorted linked list lnk */
704       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
705     }
706     /* off-diagonal portion: Ao[i,:]*P */
707     if (ao) {
708       ai = ao->i; pi = p_oth->i;
709       nzi = ai[i+1] - ai[i];
710       aj  = ao->j + ai[i];
711       for (j=0; j<nzi; j++) {
712         row  = aj[j];
713         pnz  = pi[row+1] - pi[row];
714         Jptr = p_oth->j + pi[row];
715         ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
716       }
717     }
718     apnz     = lnk[0];
719     api[i+1] = api[i] + apnz;
720     if (ap_rmax < apnz) ap_rmax = apnz;
721 
722     /* if free space is not available, double the total space in the list */
723     if (current_space->local_remaining<apnz) {
724       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
725       nspacedouble++;
726     }
727 
728     /* Copy data into free space, then initialize lnk */
729     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
730 
731     current_space->array           += apnz;
732     current_space->local_used      += apnz;
733     current_space->local_remaining -= apnz;
734   }
735   /* Allocate space for apj and apv, initialize apj, and */
736   /* destroy list of free space and other temporary array(s) */
737   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
738   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
739   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
740 
741   /* Create AP_loc for reuse */
742   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
743 
744 #if defined(PETSC_USE_INFO)
745   if (ao) {
746     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
747   } else {
748     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
749   }
750   ptap->AP_loc->info.mallocs           = nspacedouble;
751   ptap->AP_loc->info.fill_ratio_given  = fill;
752   ptap->AP_loc->info.fill_ratio_needed = apfill;
753 
754   if (api[am]) {
755     ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
756     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
757   } else {
758     ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
759   }
760 #endif
761 
762   /* (2-1) compute symbolic Co = Ro*AP_loc  */
763   /* ------------------------------------ */
764   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
765 
766   /* (3) send coj of C_oth to other processors  */
767   /* ------------------------------------------ */
768   /* determine row ownership */
769   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
770   rowmap->n  = pn;
771   rowmap->bs = 1;
772   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
773   owners = rowmap->range;
774 
775   /* determine the number of messages to send, their lengths */
776   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
777   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
778   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
779 
780   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
781   coi   = c_oth->i; coj = c_oth->j;
782   con   = ptap->C_oth->rmap->n;
783   proc  = 0;
784   for (i=0; i<con; i++) {
785     while (prmap[i] >= owners[proc+1]) proc++;
786     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
787     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
788   }
789 
790   len          = 0; /* max length of buf_si[], see (4) */
791   owners_co[0] = 0;
792   nsend        = 0;
793   for (proc=0; proc<size; proc++) {
794     owners_co[proc+1] = owners_co[proc] + len_si[proc];
795     if (len_s[proc]) {
796       nsend++;
797       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
798       len         += len_si[proc];
799     }
800   }
801 
802   /* determine the number and length of messages to receive for coi and coj  */
803   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
804   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
805 
806   /* post the Irecv and Isend of coj */
807   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
808   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
809   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
810   for (proc=0, k=0; proc<size; proc++) {
811     if (!len_s[proc]) continue;
812     i    = owners_co[proc];
813     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
814     k++;
815   }
816 
817   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
818   /* ---------------------------------------- */
819   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
820   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
821 
822   /* receives coj are complete */
823   for (i=0; i<nrecv; i++) {
824     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
825   }
826   ierr = PetscFree(rwaits);CHKERRQ(ierr);
827   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
828 
829   /* add received column indices into ta to update Crmax */
830   for (k=0; k<nrecv; k++) {/* k-th received message */
831     Jptr = buf_rj[k];
832     for (j=0; j<len_r[k]; j++) {
833       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
834     }
835   }
836   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
837   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
838 
839   /* (4) send and recv coi */
840   /*-----------------------*/
841   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
842   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
843   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
844   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
845   for (proc=0,k=0; proc<size; proc++) {
846     if (!len_s[proc]) continue;
847     /* form outgoing message for i-structure:
848          buf_si[0]:                 nrows to be sent
849                [1:nrows]:           row index (global)
850                [nrows+1:2*nrows+1]: i-structure index
851     */
852     /*-------------------------------------------*/
853     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
854     buf_si_i    = buf_si + nrows+1;
855     buf_si[0]   = nrows;
856     buf_si_i[0] = 0;
857     nrows       = 0;
858     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
859       nzi = coi[i+1] - coi[i];
860       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
861       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
862       nrows++;
863     }
864     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
865     k++;
866     buf_si += len_si[proc];
867   }
868   for (i=0; i<nrecv; i++) {
869     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
870   }
871   ierr = PetscFree(rwaits);CHKERRQ(ierr);
872   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
873 
874   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
875   ierr = PetscFree(len_ri);CHKERRQ(ierr);
876   ierr = PetscFree(swaits);CHKERRQ(ierr);
877   ierr = PetscFree(buf_s);CHKERRQ(ierr);
878 
879   /* (5) compute the local portion of Cmpi      */
880   /* ------------------------------------------ */
881   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
882   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
883   current_space = free_space;
884 
885   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
886   for (k=0; k<nrecv; k++) {
887     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
888     nrows       = *buf_ri_k[k];
889     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
890     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
891   }
892 
893   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
894   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
895   for (i=0; i<pn; i++) {
896     /* add C_loc into Cmpi */
897     nzi  = c_loc->i[i+1] - c_loc->i[i];
898     Jptr = c_loc->j + c_loc->i[i];
899     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
900 
901     /* add received col data into lnk */
902     for (k=0; k<nrecv; k++) { /* k-th received message */
903       if (i == *nextrow[k]) { /* i-th row */
904         nzi  = *(nextci[k]+1) - *nextci[k];
905         Jptr = buf_rj[k] + *nextci[k];
906         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
907         nextrow[k]++; nextci[k]++;
908       }
909     }
910     nzi = lnk[0];
911 
912     /* copy data into free space, then initialize lnk */
913     ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
914     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
915   }
916   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
917   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
918   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
919 
920   /* local sizes and preallocation */
921   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
922   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
923   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
924   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
925 
926   /* members in merge */
927   ierr = PetscFree(id_r);CHKERRQ(ierr);
928   ierr = PetscFree(len_r);CHKERRQ(ierr);
929   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
930   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
931   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
932   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
933   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
934 
935   /* attach the supporting struct to Cmpi for reuse */
936   c = (Mat_MPIAIJ*)Cmpi->data;
937   c->ptap         = ptap;
938   ptap->duplicate = Cmpi->ops->duplicate;
939   ptap->destroy   = Cmpi->ops->destroy;
940   ptap->view      = Cmpi->ops->view;
941   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
942 
943   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
944   Cmpi->assembled        = PETSC_FALSE;
945   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
946   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
947   *C                     = Cmpi;
948   PetscFunctionReturn(0);
949 }
950 
951 
952 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
953 {
954   PetscErrorCode    ierr;
955   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
956   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
957   Mat_SeqAIJ        *ap,*p_loc,*p_oth=NULL,*c_seq;
958   Mat_PtAPMPI       *ptap = c->ptap;
959   Mat               AP_loc,C_loc,C_oth;
960   PetscInt          i,rstart,rend,cm,ncols,row;
961   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
962   PetscScalar       *apa;
963   const PetscInt    *cols;
964   const PetscScalar *vals;
965 
966   PetscFunctionBegin;
967   ierr = MatZeroEntries(C);CHKERRQ(ierr);
968   /* 1) get R = Pd^T,Ro = Po^T */
969   if (ptap->reuse == MAT_REUSE_MATRIX) {
970     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
971     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
972   }
973 
974   /* 2) get AP_loc */
975   AP_loc = ptap->AP_loc;
976   ap = (Mat_SeqAIJ*)AP_loc->data;
977 
978   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
979   /*-----------------------------------------------------*/
980   if (ptap->reuse == MAT_REUSE_MATRIX) {
981     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
982     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
983     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
984   }
985 
986   /* 2-2) compute numeric A_loc*P - dominating part */
987   /* ---------------------------------------------- */
988   /* get data from symbolic products */
989   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
990   if (ptap->P_oth) {
991     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
992   }
993   apa   = ptap->apa;
994   api   = ap->i;
995   apj   = ap->j;
996   for (i=0; i<am; i++) {
997     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
998     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
999     apnz = api[i+1] - api[i];
1000     for (j=0; j<apnz; j++) {
1001       col = apj[j+api[i]];
1002       ap->a[j+ap->i[i]] = apa[col];
1003       apa[col] = 0.0;
1004     }
1005     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1006   }
1007 
1008   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1009   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
1010   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
1011   C_loc = ptap->C_loc;
1012   C_oth = ptap->C_oth;
1013 
1014   /* add C_loc and Co to to C */
1015   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
1016 
1017   /* C_loc -> C */
1018   cm    = C_loc->rmap->N;
1019   c_seq = (Mat_SeqAIJ*)C_loc->data;
1020   cols = c_seq->j;
1021   vals = c_seq->a;
1022 
1023 
1024   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
1025   /* when there are no off-processor parts.  */
1026   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
1027   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
1028   /* a table, and other, more complex stuff has to be done. */
1029   if (C->assembled) {
1030     C->was_assembled = PETSC_TRUE;
1031     C->assembled     = PETSC_FALSE;
1032   }
1033   if (C->was_assembled) {
1034     for (i=0; i<cm; i++) {
1035       ncols = c_seq->i[i+1] - c_seq->i[i];
1036       row = rstart + i;
1037       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1038       cols += ncols; vals += ncols;
1039     }
1040   } else {
1041     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
1042   }
1043 
1044   /* Co -> C, off-processor part */
1045   cm = C_oth->rmap->N;
1046   c_seq = (Mat_SeqAIJ*)C_oth->data;
1047   cols = c_seq->j;
1048   vals = c_seq->a;
1049   for (i=0; i<cm; i++) {
1050     ncols = c_seq->i[i+1] - c_seq->i[i];
1051     row = p->garray[i];
1052     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1053     cols += ncols; vals += ncols;
1054   }
1055 
1056   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1057   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1058 
1059   ptap->reuse = MAT_REUSE_MATRIX;
1060   PetscFunctionReturn(0);
1061 }
1062