xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision 2bf68e3e0f2a61f71e7c65bee250bfa1c8ce0cdb)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
16 #undef __FUNCT__
17 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
18 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
19 {
20   PetscErrorCode ierr;
21   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
22   Mat_PtAPMPI    *ptap=a->ptap;
23 
24   PetscFunctionBegin;
25   if (ptap) {
26     Mat_Merge_SeqsToMPI *merge=ptap->merge;
27     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
28     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
29     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
30     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
31     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
32     ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
33     ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
34     if (ptap->AP_loc) { /* used by alg_rap */
35       Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
36       ierr = PetscFree(ap->i);CHKERRQ(ierr);
37       ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
38       ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
39     } else { /* used by alg_ptap */
40       ierr = PetscFree(ptap->api);CHKERRQ(ierr);
41       ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
42     }
43     ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
44     ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
45     if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
46 
47     if (merge) { /* used by alg_ptap */
48       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
49       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
50       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
51       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
52       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
53       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
54       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
55       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
56       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
57       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
58       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
59       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
60       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
61       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
62     }
63 
64     ierr = ptap->destroy(A);CHKERRQ(ierr);
65     ierr = PetscFree(ptap);CHKERRQ(ierr);
66   }
67   PetscFunctionReturn(0);
68 }
69 
70 #undef __FUNCT__
71 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP"
72 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
73 {
74   PetscErrorCode ierr;
75   Mat_MPIAIJ     *a     = (Mat_MPIAIJ*)A->data;
76   Mat_PtAPMPI    *ptap  = a->ptap;
77 
78   PetscFunctionBegin;
79   ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr);
80   (*M)->ops->destroy   = ptap->destroy;
81   (*M)->ops->duplicate = ptap->duplicate;
82   PetscFunctionReturn(0);
83 }
84 
85 #undef __FUNCT__
86 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
87 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
88 {
89   PetscErrorCode ierr;
90   PetscBool      rap=PETSC_TRUE; /* do R=P^T locally, then C=R*A*P */
91   MPI_Comm       comm;
92 
93   PetscFunctionBegin;
94   /* check if matrix local sizes are compatible */
95   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
96   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
97   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
98 
99   ierr = PetscOptionsGetBool(NULL,NULL,"-matrap",&rap,NULL);CHKERRQ(ierr);
100   if (scall == MAT_INITIAL_MATRIX) {
101     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
102     if (rap) { /* do R=P^T locally, then C=R*A*P */
103       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
104     } else {       /* do P^T*A*P */
105       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_ptap(A,P,fill,C);CHKERRQ(ierr);
106     }
107     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
108   }
109   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
110   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
111   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
112   PetscFunctionReturn(0);
113 }
114 
115 #if defined(PETSC_HAVE_HYPRE)
116 PETSC_INTERN PetscErrorCode MatPtAPSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat*);
117 #endif
118 
119 #undef __FUNCT__
120 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
121 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
122 {
123   PetscErrorCode      ierr;
124   Mat_PtAPMPI         *ptap;
125   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
126   MPI_Comm            comm;
127   PetscMPIInt         size,rank;
128   Mat                 Cmpi;
129   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
130   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
131   PetscInt            *lnk,i,k,pnz,row,nsend;
132   PetscBT             lnkbt;
133   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
134   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
135   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
136   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
137   MPI_Request         *swaits,*rwaits;
138   MPI_Status          *sstatus,rstatus;
139   PetscLayout         rowmap;
140   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
141   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
142   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
143   Mat_SeqAIJ          *p_loc,*p_oth,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*c_loc,*c_oth;
144   PetscScalar         *apv;
145   PetscTable          ta;
146 #if defined(PETSC_HAVE_HYPRE)
147   const char          *algTypes[3] = {"scalable","nonscalable","hypre"};
148   PetscInt            nalg = 3;
149 #else
150   const char          *algTypes[2] = {"scalable","nonscalable"};
151   PetscInt            nalg = 2;
152 #endif
153   PetscInt            alg = 1; /* set default algorithm */
154 #if defined(PETSC_USE_INFO)
155   PetscReal           apfill;
156 #endif
157 #if defined(PTAP_PROFILE)
158   PetscLogDouble      t0,t1,t11,t12,t2,t3,t4;
159 #endif
160 
161   PetscFunctionBegin;
162   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
163   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
164   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
165 
166   /* create symbolic parallel matrix Cmpi */
167   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
168   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
169 
170   ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr);
171   ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[1],&alg,NULL);CHKERRQ(ierr);
172   ierr = PetscOptionsEnd();CHKERRQ(ierr);
173 
174   if (alg == 1) {
175     /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
176     Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
177   } else if (alg == 0) {
178     Cmpi->ops->ptapnumeric = 0; /* not done yet */
179     SETERRQ(comm,PETSC_ERR_ARG_SIZ,"Not done yet");
180 #if defined(PETSC_HAVE_HYPRE)
181   } else if (alg == 2) {
182     /* Use boomerAMGBuildCoarseOperator */
183     ierr = MatDestroy(&Cmpi);CHKERRQ(ierr);
184     ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr);
185     PetscFunctionReturn(0);
186 #endif
187   } else {
188     SETERRQ1(comm,PETSC_ERR_PLIB,"Unknown algorithm number %D\n",alg);
189   }
190 
191 #if defined(PTAP_PROFILE)
192   ierr = PetscTime(&t0);CHKERRQ(ierr);
193 #endif
194 
195   /* create struct Mat_PtAPMPI and attached it to C later */
196   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
197   ptap->reuse = MAT_INITIAL_MATRIX;
198 
199   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
200   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
201   /* get P_loc by taking all local rows of P */
202   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
203 
204   /* (0) compute Rd = Pd^T, Ro = Po^T  */
205   /* --------------------------------- */
206   ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
207   ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
208 #if defined(PTAP_PROFILE)
209   ierr = PetscTime(&t11);CHKERRQ(ierr);
210 #endif
211 
212   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
213   /* ----------------------------------------------------------------- */
214   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
215   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
216 
217   /* create and initialize a linked list */
218   ierr = PetscTableCreate(pN,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
219   MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
220   MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
221   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
222   /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
223 
224   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
225 
226   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
227   ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
228   current_space = free_space;
229   nspacedouble  = 0;
230 
231   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
232   api[0] = 0;
233   for (i=0; i<am; i++) {
234     /* diagonal portion: Ad[i,:]*P */
235     ai = ad->i; pi = p_loc->i;
236     nzi = ai[i+1] - ai[i];
237     aj  = ad->j + ai[i];
238     for (j=0; j<nzi; j++) {
239       row  = aj[j];
240       pnz  = pi[row+1] - pi[row];
241       Jptr = p_loc->j + pi[row];
242       /* add non-zero cols of P into the sorted linked list lnk */
243       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
244     }
245     /* off-diagonal portion: Ao[i,:]*P */
246     ai = ao->i; pi = p_oth->i;
247     nzi = ai[i+1] - ai[i];
248     aj  = ao->j + ai[i];
249     for (j=0; j<nzi; j++) {
250       row  = aj[j];
251       pnz  = pi[row+1] - pi[row];
252       Jptr = p_oth->j + pi[row];
253       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
254     }
255     apnz     = lnk[0];
256     api[i+1] = api[i] + apnz;
257     if (ap_rmax < apnz) ap_rmax = apnz;
258 
259     /* if free space is not available, double the total space in the list */
260     if (current_space->local_remaining<apnz) {
261       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
262       nspacedouble++;
263     }
264 
265     /* Copy data into free space, then initialize lnk */
266     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
267 
268     current_space->array           += apnz;
269     current_space->local_used      += apnz;
270     current_space->local_remaining -= apnz;
271   }
272   /* Allocate space for apj and apv, initialize apj, and */
273   /* destroy list of free space and other temporary array(s) */
274   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
275   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
276   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
277 
278   /* Create AP_loc for reuse */
279   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
280 
281 #if defined(PETSC_USE_INFO)
282   apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
283   ptap->AP_loc->info.mallocs           = nspacedouble;
284   ptap->AP_loc->info.fill_ratio_given  = fill;
285   ptap->AP_loc->info.fill_ratio_needed = apfill;
286 
287   if (api[am]) {
288     ierr = PetscInfo3(ptap->AP_loc,"AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
289     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
290   } else {
291     ierr = PetscInfo(ptap->AP_loc,"AP_loc is empty \n");CHKERRQ(ierr);
292   }
293 #endif
294 
295 #if defined(PTAP_PROFILE)
296   ierr = PetscTime(&t12);CHKERRQ(ierr);
297 #endif
298 
299   /* (2-1) compute symbolic Co = Ro*AP_loc  */
300   /* ------------------------------------ */
301   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
302 #if defined(PTAP_PROFILE)
303   ierr = PetscTime(&t1);CHKERRQ(ierr);
304 #endif
305 
306   /* (3) send coj of C_oth to other processors  */
307   /* ------------------------------------------ */
308   /* determine row ownership */
309   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
310   rowmap->n  = pn;
311   rowmap->bs = 1;
312   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
313   owners = rowmap->range;
314 
315   /* determine the number of messages to send, their lengths */
316   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
317   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
318   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
319 
320   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
321   coi   = c_oth->i; coj = c_oth->j;
322   con   = ptap->C_oth->rmap->n;
323   proc  = 0;
324   for (i=0; i<con; i++) {
325     while (prmap[i] >= owners[proc+1]) proc++;
326     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
327     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
328   }
329 
330   len          = 0; /* max length of buf_si[], see (4) */
331   owners_co[0] = 0;
332   nsend        = 0;
333   for (proc=0; proc<size; proc++) {
334     owners_co[proc+1] = owners_co[proc] + len_si[proc];
335     if (len_s[proc]) {
336       nsend++;
337       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
338       len         += len_si[proc];
339     }
340   }
341 
342   /* determine the number and length of messages to receive for coi and coj  */
343   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
344   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
345 
346   /* post the Irecv and Isend of coj */
347   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
348   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
349   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
350   for (proc=0, k=0; proc<size; proc++) {
351     if (!len_s[proc]) continue;
352     i    = owners_co[proc];
353     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
354     k++;
355   }
356 
357   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
358   /* ---------------------------------------- */
359   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
360   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
361 
362   /* receives coj are complete */
363   for (i=0; i<nrecv; i++) {
364     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
365   }
366   ierr = PetscFree(rwaits);CHKERRQ(ierr);
367   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
368 
369   /* add received column indices into ta to update Crmax */
370   for (k=0; k<nrecv; k++) {/* k-th received message */
371     Jptr = buf_rj[k];
372     for (j=0; j<len_r[k]; j++) {
373       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
374     }
375   }
376   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
377   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
378 
379   /* (4) send and recv coi */
380   /*-----------------------*/
381   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
382   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
383   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
384   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
385   for (proc=0,k=0; proc<size; proc++) {
386     if (!len_s[proc]) continue;
387     /* form outgoing message for i-structure:
388          buf_si[0]:                 nrows to be sent
389                [1:nrows]:           row index (global)
390                [nrows+1:2*nrows+1]: i-structure index
391     */
392     /*-------------------------------------------*/
393     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
394     buf_si_i    = buf_si + nrows+1;
395     buf_si[0]   = nrows;
396     buf_si_i[0] = 0;
397     nrows       = 0;
398     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
399       nzi = coi[i+1] - coi[i];
400       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
401       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
402       nrows++;
403     }
404     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
405     k++;
406     buf_si += len_si[proc];
407   }
408   for (i=0; i<nrecv; i++) {
409     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
410   }
411   ierr = PetscFree(rwaits);CHKERRQ(ierr);
412   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
413 
414   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
415   ierr = PetscFree(len_ri);CHKERRQ(ierr);
416   ierr = PetscFree(swaits);CHKERRQ(ierr);
417   ierr = PetscFree(buf_s);CHKERRQ(ierr);
418 #if defined(PTAP_PROFILE)
419   ierr = PetscTime(&t2);CHKERRQ(ierr);
420 #endif
421   /* (5) compute the local portion of Cmpi      */
422   /* ------------------------------------------ */
423   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
424   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
425   current_space = free_space;
426 
427   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
428   for (k=0; k<nrecv; k++) {
429     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
430     nrows       = *buf_ri_k[k];
431     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
432     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
433   }
434 
435   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
436   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
437   for (i=0; i<pn; i++) {
438     /* add C_loc into Cmpi */
439     nzi  = c_loc->i[i+1] - c_loc->i[i];
440     Jptr = c_loc->j + c_loc->i[i];
441     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
442 
443     /* add received col data into lnk */
444     for (k=0; k<nrecv; k++) { /* k-th received message */
445       if (i == *nextrow[k]) { /* i-th row */
446         nzi  = *(nextci[k]+1) - *nextci[k];
447         Jptr = buf_rj[k] + *nextci[k];
448         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
449         nextrow[k]++; nextci[k]++;
450       }
451     }
452     nzi = lnk[0];
453 
454     /* copy data into free space, then initialize lnk */
455     ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
456     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
457   }
458   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
459   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
460   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
461 #if defined(PTAP_PROFILE)
462   ierr = PetscTime(&t3);CHKERRQ(ierr);
463 #endif
464 
465   /* local sizes and preallocation */
466   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
467   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
468   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
469   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
470 
471   /* members in merge */
472   ierr = PetscFree(id_r);CHKERRQ(ierr);
473   ierr = PetscFree(len_r);CHKERRQ(ierr);
474   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
475   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
476   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
477   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
478   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
479 
480   /* attach the supporting struct to Cmpi for reuse */
481   c = (Mat_MPIAIJ*)Cmpi->data;
482   c->ptap         = ptap;
483   ptap->duplicate = Cmpi->ops->duplicate;
484   ptap->destroy   = Cmpi->ops->destroy;
485 
486   if (alg == 1) {
487     ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
488   }
489 
490   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
491   Cmpi->assembled        = PETSC_FALSE;
492   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
493   Cmpi->ops->duplicate   = MatDuplicate_MPIAIJ_MatPtAP;
494   *C                     = Cmpi;
495 
496 #if defined(PTAP_PROFILE)
497   ierr = PetscTime(&t4);CHKERRQ(ierr);
498   if (rank == 1) {
499     printf("PtAPSym: %g + %g + %g + %g + %g + %g = %g\n",t11-t0,t1-t11,t12-t11,t2-t2,t3-t2,t4-t3,t4-t0);
500   }
501 #endif
502   PetscFunctionReturn(0);
503 }
504 
505 #undef __FUNCT__
506 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
507 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
508 {
509   PetscErrorCode    ierr;
510   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
511   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
512   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
513   Mat_PtAPMPI       *ptap = c->ptap;
514   Mat               AP_loc,C_loc,C_oth;
515   PetscInt          i,rstart,rend,cm,ncols,row;
516   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
517   PetscScalar       *apa;
518   const PetscInt    *cols;
519   const PetscScalar *vals;
520 #if defined(PTAP_PROFILE)
521   PetscMPIInt       rank;
522   MPI_Comm          comm;
523   PetscLogDouble    t0,t1,t2,t3,t4,eR,eAP,eCseq,eCmpi;
524 #endif
525 
526   PetscFunctionBegin;
527   ierr = MatZeroEntries(C);CHKERRQ(ierr);
528 
529   /* 1) get R = Pd^T,Ro = Po^T */
530 #if defined(PTAP_PROFILE)
531   ierr = PetscTime(&t0);CHKERRQ(ierr);
532 #endif
533   if (ptap->reuse == MAT_REUSE_MATRIX) {
534     ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
535     ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
536   }
537 #if defined(PTAP_PROFILE)
538   ierr = PetscTime(&t1);CHKERRQ(ierr);
539   eR = t1 - t0;
540 #endif
541 
542   /* 2) get AP_loc */
543   AP_loc = ptap->AP_loc;
544   ap = (Mat_SeqAIJ*)AP_loc->data;
545 
546   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
547   /*-----------------------------------------------------*/
548   if (ptap->reuse == MAT_REUSE_MATRIX) {
549     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
550     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
551     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
552   }
553 
554   /* 2-2) compute numeric A_loc*P - dominating part */
555   /* ---------------------------------------------- */
556   /* get data from symbolic products */
557   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
558   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
559   apa   = ptap->apa;
560   api   = ap->i;
561   apj   = ap->j;
562   for (i=0; i<am; i++) {
563     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
564     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
565     apnz = api[i+1] - api[i];
566     for (j=0; j<apnz; j++) {
567       col = apj[j+api[i]];
568       ap->a[j+ap->i[i]] = apa[col];
569       apa[col] = 0.0;
570     }
571     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
572   }
573 #if defined(PTAP_PROFILE)
574   ierr = PetscTime(&t2);CHKERRQ(ierr);
575   eAP = t2 - t1;
576 #endif
577 
578   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
579   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
580   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
581   C_loc = ptap->C_loc;
582   C_oth = ptap->C_oth;
583 #if defined(PTAP_PROFILE)
584   ierr = PetscTime(&t3);CHKERRQ(ierr);
585   eCseq = t3 - t2;
586 #endif
587 
588   /* add C_loc and Co to to C */
589   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
590 
591   /* C_loc -> C */
592   cm    = C_loc->rmap->N;
593   c_seq = (Mat_SeqAIJ*)C_loc->data;
594   cols = c_seq->j;
595   vals = c_seq->a;
596   for (i=0; i<cm; i++) {
597     ncols = c_seq->i[i+1] - c_seq->i[i];
598     row = rstart + i;
599     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
600     cols += ncols; vals += ncols;
601   }
602 
603   /* Co -> C, off-processor part */
604   cm = C_oth->rmap->N;
605   c_seq = (Mat_SeqAIJ*)C_oth->data;
606   cols = c_seq->j;
607   vals = c_seq->a;
608   for (i=0; i<cm; i++) {
609     ncols = c_seq->i[i+1] - c_seq->i[i];
610     row = p->garray[i];
611     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
612     cols += ncols; vals += ncols;
613   }
614   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
615   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
616 #if defined(PTAP_PROFILE)
617   ierr = PetscTime(&t4);CHKERRQ(ierr);
618   eCmpi = t4 - t3;
619 
620   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
621   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
622   if (rank==1) {
623     ierr = PetscPrintf(MPI_COMM_SELF," R %g, AP %g, Cseq %g, Cmpi %g = %g\n", eR,eAP,eCseq,eCmpi,eR+eAP+eCseq+eCmpi);CHKERRQ(ierr);
624   }
625 #endif
626   ptap->reuse = MAT_REUSE_MATRIX;
627   PetscFunctionReturn(0);
628 }
629 
630 #undef __FUNCT__
631 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ_ptap"
632 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_ptap(Mat A,Mat P,PetscReal fill,Mat *C)
633 {
634   PetscErrorCode      ierr;
635   Mat                 Cmpi;
636   Mat_PtAPMPI         *ptap;
637   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
638   Mat_MPIAIJ          *a        =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
639   Mat_SeqAIJ          *ad       =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
640   Mat_SeqAIJ          *p_loc,*p_oth;
641   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
642   PetscInt            *adi=ad->i,*aj,*aoi=ao->i,nnz;
643   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
644   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
645   PetscBT             lnkbt;
646   MPI_Comm            comm;
647   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
648   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
649   PetscInt            len,proc,*dnz,*onz,*owners;
650   PetscInt            nzi,*pti,*ptj;
651   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
652   MPI_Request         *swaits,*rwaits;
653   MPI_Status          *sstatus,rstatus;
654   Mat_Merge_SeqsToMPI *merge;
655   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
656   PetscReal           afill=1.0,afill_tmp;
657 
658   PetscFunctionBegin;
659   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
660   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
661   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
662 
663   /* create struct Mat_PtAPMPI and attached it to C later */
664   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
665   ierr        = PetscNew(&merge);CHKERRQ(ierr);
666   ptap->merge = merge;
667   ptap->reuse = MAT_INITIAL_MATRIX;
668 
669   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
670   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
671 
672   /* get P_loc by taking all local rows of P */
673   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
674 
675   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
676   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
677   pi_loc = p_loc->i; pj_loc = p_loc->j;
678   pi_oth = p_oth->i; pj_oth = p_oth->j;
679 
680   /* (1) compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth (api,apj) */
681   /*--------------------------------------------------------------------------*/
682   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
683   api[0] = 0;
684 
685   /* create and initialize a linked list */
686   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr);
687 
688   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
689   ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr);
690   current_space = free_space;
691 
692   for (i=0; i<am; i++) {
693     /* diagonal portion of A */
694     nzi = adi[i+1] - adi[i];
695     aj  = ad->j + adi[i];
696     for (j=0; j<nzi; j++) {
697       row  = aj[j];
698       pnz  = pi_loc[row+1] - pi_loc[row];
699       Jptr = pj_loc + pi_loc[row];
700       /* add non-zero cols of P into the sorted linked list lnk */
701       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
702     }
703     /* off-diagonal portion of A */
704     nzi = aoi[i+1] - aoi[i];
705     aj  = ao->j + aoi[i];
706     for (j=0; j<nzi; j++) {
707       row  = aj[j];
708       pnz  = pi_oth[row+1] - pi_oth[row];
709       Jptr = pj_oth + pi_oth[row];
710       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
711     }
712     apnz     = lnk[0];
713     api[i+1] = api[i] + apnz;
714     if (ap_rmax < apnz) ap_rmax = apnz;
715 
716     /* if free space is not available, double the total space in the list */
717     if (current_space->local_remaining<apnz) {
718       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
719       nspacedouble++;
720     }
721 
722     /* Copy data into free space, then initialize lnk */
723     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
724 
725     current_space->array           += apnz;
726     current_space->local_used      += apnz;
727     current_space->local_remaining -= apnz;
728   }
729 
730   /* Allocate space for apj, initialize apj, and */
731   /* destroy list of free space and other temporary array(s) */
732   ierr      = PetscMalloc1(api[am]+1,&apj);CHKERRQ(ierr);
733   ierr      = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
734   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
735   if (afill_tmp > afill) afill = afill_tmp;
736 
737   /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
738   /*-----------------------------------------------------------------*/
739   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
740 
741   /* then, compute symbolic Co = (p->B)^T*AP */
742   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
743                          >= (num of nonzero rows of C_seq) - pn */
744   ierr   = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr);
745   coi[0] = 0;
746 
747   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
748   nnz           = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],api[am]));
749   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
750   current_space = free_space;
751 
752   for (i=0; i<pon; i++) {
753     pnz = poti[i+1] - poti[i];
754     ptJ = potj + poti[i];
755     for (j=0; j<pnz; j++) {
756       row  = ptJ[j]; /* row of AP == col of Pot */
757       apnz = api[row+1] - api[row];
758       Jptr = apj + api[row];
759       /* add non-zero cols of AP into the sorted linked list lnk */
760       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
761     }
762     nnz = lnk[0];
763 
764     /* If free space is not available, double the total space in the list */
765     if (current_space->local_remaining<nnz) {
766       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
767       nspacedouble++;
768     }
769 
770     /* Copy data into free space, and zero out denserows */
771     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
772 
773     current_space->array           += nnz;
774     current_space->local_used      += nnz;
775     current_space->local_remaining -= nnz;
776 
777     coi[i+1] = coi[i] + nnz;
778   }
779 
780   ierr      = PetscMalloc1(coi[pon],&coj);CHKERRQ(ierr);
781   ierr      = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
782   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
783   if (afill_tmp > afill) afill = afill_tmp;
784   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
785 
786   /* (3) send j-array (coj) of Co to other processors */
787   /*--------------------------------------------------*/
788   ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr);
789   len_s        = merge->len_s;
790   merge->nsend = 0;
791 
792 
793   /* determine row ownership */
794   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
795   merge->rowmap->n  = pn;
796   merge->rowmap->bs = 1;
797 
798   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
799   owners = merge->rowmap->range;
800 
801   /* determine the number of messages to send, their lengths */
802   ierr = PetscMalloc2(size,&len_si,size,&sstatus);CHKERRQ(ierr);
803   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
804   ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr);
805 
806   proc = 0;
807   for (i=0; i<pon; i++) {
808     while (prmap[i] >= owners[proc+1]) proc++;
809     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
810     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
811   }
812 
813   len          = 0; /* max length of buf_si[], see (4) */
814   owners_co[0] = 0;
815   for (proc=0; proc<size; proc++) {
816     owners_co[proc+1] = owners_co[proc] + len_si[proc];
817     if (len_s[proc]) {
818       merge->nsend++;
819       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
820       len         += len_si[proc];
821     }
822   }
823 
824   /* determine the number and length of messages to receive for coi and coj  */
825   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
826   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
827 
828   /* post the Irecv and Isend of coj */
829   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
830   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
831   ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr);
832   for (proc=0, k=0; proc<size; proc++) {
833     if (!len_s[proc]) continue;
834     i    = owners_co[proc];
835     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
836     k++;
837   }
838 
839   /* receives and sends of coj are complete */
840   for (i=0; i<merge->nrecv; i++) {
841     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
842   }
843   ierr = PetscFree(rwaits);CHKERRQ(ierr);
844   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
845 
846   /* (4) send and recv coi */
847   /*-----------------------*/
848   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
849   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
850   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
851   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
852   for (proc=0,k=0; proc<size; proc++) {
853     if (!len_s[proc]) continue;
854     /* form outgoing message for i-structure:
855          buf_si[0]:                 nrows to be sent
856                [1:nrows]:           row index (global)
857                [nrows+1:2*nrows+1]: i-structure index
858     */
859     /*-------------------------------------------*/
860     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
861     buf_si_i    = buf_si + nrows+1;
862     buf_si[0]   = nrows;
863     buf_si_i[0] = 0;
864     nrows       = 0;
865     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
866       nzi = coi[i+1] - coi[i];
867       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
868       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
869       nrows++;
870     }
871     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
872     k++;
873     buf_si += len_si[proc];
874   }
875   i = merge->nrecv;
876   while (i--) {
877     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
878   }
879   ierr = PetscFree(rwaits);CHKERRQ(ierr);
880   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
881 
882   ierr = PetscFree2(len_si,sstatus);CHKERRQ(ierr);
883   ierr = PetscFree(len_ri);CHKERRQ(ierr);
884   ierr = PetscFree(swaits);CHKERRQ(ierr);
885   ierr = PetscFree(buf_s);CHKERRQ(ierr);
886 
887   /* (5) compute the local portion of C (mpi mat) */
888   /*----------------------------------------------*/
889   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
890 
891   /* allocate pti array and free space for accumulating nonzero column info */
892   ierr   = PetscMalloc1(pn+1,&pti);CHKERRQ(ierr);
893   pti[0] = 0;
894 
895   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
896   nnz           = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pi_loc[pm],api[am]));
897   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
898   current_space = free_space;
899 
900   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
901   for (k=0; k<merge->nrecv; k++) {
902     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
903     nrows       = *buf_ri_k[k];
904     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
905     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
906   }
907   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
908 
909   for (i=0; i<pn; i++) {
910     /* add pdt[i,:]*AP into lnk */
911     pnz = pdti[i+1] - pdti[i];
912     ptJ = pdtj + pdti[i];
913     for (j=0; j<pnz; j++) {
914       row  = ptJ[j];  /* row of AP == col of Pt */
915       apnz = api[row+1] - api[row];
916       Jptr = apj + api[row];
917       /* add non-zero cols of AP into the sorted linked list lnk */
918       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
919     }
920 
921     /* add received col data into lnk */
922     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
923       if (i == *nextrow[k]) { /* i-th row */
924         nzi  = *(nextci[k]+1) - *nextci[k];
925         Jptr = buf_rj[k] + *nextci[k];
926         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
927         nextrow[k]++; nextci[k]++;
928       }
929     }
930     nnz = lnk[0];
931 
932     /* if free space is not available, make more free space */
933     if (current_space->local_remaining<nnz) {
934       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
935       nspacedouble++;
936     }
937     /* copy data into free space, then initialize lnk */
938     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
939     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
940 
941     current_space->array           += nnz;
942     current_space->local_used      += nnz;
943     current_space->local_remaining -= nnz;
944 
945     pti[i+1] = pti[i] + nnz;
946   }
947   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
948   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
949 
950   ierr      = PetscMalloc1(pti[pn]+1,&ptj);CHKERRQ(ierr);
951   ierr      = PetscFreeSpaceContiguous(&free_space,ptj);CHKERRQ(ierr);
952   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
953   if (afill_tmp > afill) afill = afill_tmp;
954   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
955 
956   /* (6) create symbolic parallel matrix Cmpi */
957   /*------------------------------------------*/
958   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
959   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
960   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
961   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
962   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
963   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
964 
965   merge->bi        = pti;      /* Cseq->i */
966   merge->bj        = ptj;      /* Cseq->j */
967   merge->coi       = coi;      /* Co->i   */
968   merge->coj       = coj;      /* Co->j   */
969   merge->buf_ri    = buf_ri;
970   merge->buf_rj    = buf_rj;
971   merge->owners_co = owners_co;
972   merge->destroy   = Cmpi->ops->destroy;
973 
974   /* attach the supporting struct to Cmpi for reuse */
975   c           = (Mat_MPIAIJ*)Cmpi->data;
976   c->ptap     = ptap;
977   ptap->api   = api;
978   ptap->apj   = apj;
979   ptap->duplicate = Cmpi->ops->duplicate;
980   ptap->destroy   = Cmpi->ops->destroy;
981 
982   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
983   Cmpi->assembled        = PETSC_FALSE;
984   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
985   Cmpi->ops->duplicate   = MatDuplicate_MPIAIJ_MatPtAP;
986   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_ptap;
987   *C                     = Cmpi;
988 
989   /* flag 'scalable' determines which implementations to be used:
990        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
991        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
992   /* set default scalable */
993   ptap->scalable = PETSC_FALSE; /* PETSC_TRUE; */
994 
995   ierr = PetscOptionsGetBool(((PetscObject)Cmpi)->options,((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);CHKERRQ(ierr);
996   if (!ptap->scalable) {  /* Do dense axpy */
997     ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
998   } else {
999     ierr = PetscCalloc1(ap_rmax+1,&ptap->apa);CHKERRQ(ierr);
1000   }
1001 
1002 #if defined(PETSC_USE_INFO)
1003   if (pti[pn] != 0) {
1004     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr);
1005     ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr);
1006   } else {
1007     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
1008   }
1009 #endif
1010   PetscFunctionReturn(0);
1011 }
1012 
1013 #undef __FUNCT__
1014 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ_ptap"
1015 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_ptap(Mat A,Mat P,Mat C)
1016 {
1017   PetscErrorCode      ierr;
1018   Mat_MPIAIJ          *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
1019   Mat_SeqAIJ          *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
1020   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1021   Mat_SeqAIJ          *p_loc,*p_oth;
1022   Mat_PtAPMPI         *ptap;
1023   Mat_Merge_SeqsToMPI *merge;
1024   PetscInt            *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
1025   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
1026   PetscInt            i,j,k,anz,pnz,apnz,nextap,row,*cj;
1027   MatScalar           *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
1028   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1029   MPI_Comm            comm;
1030   PetscMPIInt         size,rank,taga,*len_s;
1031   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1032   PetscInt            **buf_ri,**buf_rj;
1033   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
1034   MPI_Request         *s_waits,*r_waits;
1035   MPI_Status          *status;
1036   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
1037   PetscInt            *api,*apj,*coi,*coj;
1038   PetscInt            *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
1039   PetscBool           scalable;
1040 #if defined(PTAP_PROFILE)
1041   PetscLogDouble t0,t1,t2,eP,t3,t4,et2_AP=0.0,ePtAP=0.0,t2_0,t2_1,t2_2;
1042 #endif
1043 
1044   PetscFunctionBegin;
1045   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
1046 #if defined(PTAP_PROFILE)
1047   ierr = PetscTime(&t0);CHKERRQ(ierr);
1048 #endif
1049   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1050   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1051 
1052   ptap = c->ptap;
1053   if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
1054   merge    = ptap->merge;
1055   apa      = ptap->apa;
1056   scalable = ptap->scalable;
1057 
1058   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
1059   /*-----------------------------------------------------*/
1060   if (ptap->reuse == MAT_INITIAL_MATRIX) {
1061     /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
1062     ptap->reuse = MAT_REUSE_MATRIX;
1063   } else { /* update numerical values of P_oth and P_loc */
1064     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
1065     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1066   }
1067 #if defined(PTAP_PROFILE)
1068   ierr = PetscTime(&t1);CHKERRQ(ierr);
1069   eP = t1-t0;
1070 #endif
1071   /*
1072   printf("[%d] Ad: %d, %d; Ao: %d, %d; P_loc: %d, %d; P_oth %d, %d;\n",rank,
1073          a->A->rmap->N,a->A->cmap->N,a->B->rmap->N,a->B->cmap->N,
1074          ptap->P_loc->rmap->N,ptap->P_loc->cmap->N,
1075          ptap->P_oth->rmap->N,ptap->P_oth->cmap->N);
1076    */
1077 
1078   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
1079   /*--------------------------------------------------------------*/
1080   /* get data from symbolic products */
1081   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1082   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1083   pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a;
1084   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
1085 
1086   coi  = merge->coi; coj = merge->coj;
1087   ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr);
1088 
1089   bi     = merge->bi; bj = merge->bj;
1090   owners = merge->rowmap->range;
1091   ierr   = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr);  /* ba: Cseq->a */
1092 
1093   api = ptap->api; apj = ptap->apj;
1094 
1095   if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but faster (could take 1/3 scalable time) */
1096     ierr = PetscInfo(C,"Using non-scalable dense axpy\n");CHKERRQ(ierr);
1097 #if defined(PTAP_PROFILE)
1098     ierr = PetscTime(&t1);CHKERRQ(ierr);
1099 #endif
1100     for (i=0; i<am; i++) {
1101 #if defined(PTAP_PROFILE)
1102       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1103 #endif
1104       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
1105       /*------------------------------------------------------------*/
1106       apJ = apj + api[i];
1107 
1108       /* diagonal portion of A */
1109       anz = adi[i+1] - adi[i];
1110       adj = ad->j + adi[i];
1111       ada = ad->a + adi[i];
1112       for (j=0; j<anz; j++) {
1113         row = adj[j];
1114         pnz = pi_loc[row+1] - pi_loc[row];
1115         pj  = pj_loc + pi_loc[row];
1116         pa  = pa_loc + pi_loc[row];
1117 
1118         /* perform dense axpy */
1119         valtmp = ada[j];
1120         for (k=0; k<pnz; k++) {
1121           apa[pj[k]] += valtmp*pa[k];
1122         }
1123         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1124       }
1125 
1126       /* off-diagonal portion of A */
1127       anz = aoi[i+1] - aoi[i];
1128       aoj = ao->j + aoi[i];
1129       aoa = ao->a + aoi[i];
1130       for (j=0; j<anz; j++) {
1131         row = aoj[j];
1132         pnz = pi_oth[row+1] - pi_oth[row];
1133         pj  = pj_oth + pi_oth[row];
1134         pa  = pa_oth + pi_oth[row];
1135 
1136         /* perform dense axpy */
1137         valtmp = aoa[j];
1138         for (k=0; k<pnz; k++) {
1139           apa[pj[k]] += valtmp*pa[k];
1140         }
1141         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1142       }
1143 #if defined(PTAP_PROFILE)
1144       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1145       et2_AP += t2_1 - t2_0;
1146 #endif
1147 
1148       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1149       /*--------------------------------------------------------------*/
1150       apnz = api[i+1] - api[i];
1151       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
1152       pnz = po->i[i+1] - po->i[i];
1153       poJ = po->j + po->i[i];
1154       pA  = po->a + po->i[i];
1155       for (j=0; j<pnz; j++) {
1156         row = poJ[j];
1157         cnz = coi[row+1] - coi[row];
1158         cj  = coj + coi[row];
1159         ca  = coa + coi[row];
1160         /* perform dense axpy */
1161         valtmp = pA[j];
1162         for (k=0; k<cnz; k++) {
1163           ca[k] += valtmp*apa[cj[k]];
1164         }
1165         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1166       }
1167       /* put the value into Cd (diagonal part) */
1168       pnz = pd->i[i+1] - pd->i[i];
1169       pdJ = pd->j + pd->i[i];
1170       pA  = pd->a + pd->i[i];
1171       for (j=0; j<pnz; j++) {
1172         row = pdJ[j];
1173         cnz = bi[row+1] - bi[row];
1174         cj  = bj + bi[row];
1175         ca  = ba + bi[row];
1176         /* perform dense axpy */
1177         valtmp = pA[j];
1178         for (k=0; k<cnz; k++) {
1179           ca[k] += valtmp*apa[cj[k]];
1180         }
1181         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1182       }
1183 
1184       /* zero the current row of A*P */
1185       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
1186 #if defined(PTAP_PROFILE)
1187       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1188       ePtAP += t2_2 - t2_1;
1189 #endif
1190     }
1191   } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
1192     ierr = PetscInfo(C,"Using scalable sparse axpy\n");CHKERRQ(ierr);
1193     /*-----------------------------------------------------------------------------------------*/
1194     pA=pa_loc;
1195     for (i=0; i<am; i++) {
1196 #if defined(PTAP_PROFILE)
1197       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1198 #endif
1199       /* form i-th sparse row of A*P */
1200       apnz = api[i+1] - api[i];
1201       apJ  = apj + api[i];
1202       /* diagonal portion of A */
1203       anz = adi[i+1] - adi[i];
1204       adj = ad->j + adi[i];
1205       ada = ad->a + adi[i];
1206       for (j=0; j<anz; j++) {
1207         row    = adj[j];
1208         pnz    = pi_loc[row+1] - pi_loc[row];
1209         pj     = pj_loc + pi_loc[row];
1210         pa     = pa_loc + pi_loc[row];
1211         valtmp = ada[j];
1212         nextp  = 0;
1213         for (k=0; nextp<pnz; k++) {
1214           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1215             apa[k] += valtmp*pa[nextp++];
1216           }
1217         }
1218         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1219       }
1220       /* off-diagonal portion of A */
1221       anz = aoi[i+1] - aoi[i];
1222       aoj = ao->j + aoi[i];
1223       aoa = ao->a + aoi[i];
1224       for (j=0; j<anz; j++) {
1225         row    = aoj[j];
1226         pnz    = pi_oth[row+1] - pi_oth[row];
1227         pj     = pj_oth + pi_oth[row];
1228         pa     = pa_oth + pi_oth[row];
1229         valtmp = aoa[j];
1230         nextp  = 0;
1231         for (k=0; nextp<pnz; k++) {
1232           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1233             apa[k] += valtmp*pa[nextp++];
1234           }
1235         }
1236         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1237       }
1238 #if defined(PTAP_PROFILE)
1239       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1240       et2_AP += t2_1 - t2_0;
1241 #endif
1242 
1243       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1244       /*--------------------------------------------------------------*/
1245       pnz = pi_loc[i+1] - pi_loc[i];
1246       pJ  = pj_loc + pi_loc[i];
1247       for (j=0; j<pnz; j++) {
1248         nextap = 0;
1249         row    = pJ[j]; /* global index */
1250         if (row < pcstart || row >=pcend) { /* put the value into Co */
1251           row = *poJ;
1252           cj  = coj + coi[row];
1253           ca  = coa + coi[row]; poJ++;
1254         } else {                            /* put the value into Cd */
1255           row = *pdJ;
1256           cj  = bj + bi[row];
1257           ca  = ba + bi[row]; pdJ++;
1258         }
1259         valtmp = pA[j];
1260         for (k=0; nextap<apnz; k++) {
1261           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
1262         }
1263         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1264       }
1265       pA += pnz;
1266       /* zero the current row info for A*P */
1267       ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
1268 #if defined(PTAP_PROFILE)
1269       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1270       ePtAP += t2_2 - t2_1;
1271 #endif
1272     }
1273   }
1274 #if defined(PTAP_PROFILE)
1275   ierr = PetscTime(&t2);CHKERRQ(ierr);
1276 #endif
1277 
1278   /* 3) send and recv matrix values coa */
1279   /*------------------------------------*/
1280   buf_ri = merge->buf_ri;
1281   buf_rj = merge->buf_rj;
1282   len_s  = merge->len_s;
1283   ierr   = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
1284   ierr   = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
1285 
1286   ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr);
1287   for (proc=0,k=0; proc<size; proc++) {
1288     if (!len_s[proc]) continue;
1289     i    = merge->owners_co[proc];
1290     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
1291     k++;
1292   }
1293   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
1294   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
1295 
1296   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
1297   ierr = PetscFree(r_waits);CHKERRQ(ierr);
1298   ierr = PetscFree(coa);CHKERRQ(ierr);
1299 #if defined(PTAP_PROFILE)
1300   ierr = PetscTime(&t3);CHKERRQ(ierr);
1301 #endif
1302 
1303   /* 4) insert local Cseq and received values into Cmpi */
1304   /*------------------------------------------------------*/
1305   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
1306   for (k=0; k<merge->nrecv; k++) {
1307     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1308     nrows       = *(buf_ri_k[k]);
1309     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
1310     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
1311   }
1312 
1313   for (i=0; i<cm; i++) {
1314     row  = owners[rank] + i; /* global row index of C_seq */
1315     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
1316     ba_i = ba + bi[i];
1317     bnz  = bi[i+1] - bi[i];
1318     /* add received vals into ba */
1319     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1320       /* i-th row */
1321       if (i == *nextrow[k]) {
1322         cnz    = *(nextci[k]+1) - *nextci[k];
1323         cj     = buf_rj[k] + *(nextci[k]);
1324         ca     = abuf_r[k] + *(nextci[k]);
1325         nextcj = 0;
1326         for (j=0; nextcj<cnz; j++) {
1327           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1328             ba_i[j] += ca[nextcj++];
1329           }
1330         }
1331         nextrow[k]++; nextci[k]++;
1332         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1333       }
1334     }
1335     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
1336   }
1337   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1338   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1339 
1340   ierr = PetscFree(ba);CHKERRQ(ierr);
1341   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
1342   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
1343   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1344 #if defined(PTAP_PROFILE)
1345   ierr = PetscTime(&t4);CHKERRQ(ierr);
1346   if (rank==1) {
1347     ierr = PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPNum %g/P + %g/PtAP( %g/A*P + %g/Pt*AP ) + %g/comm + %g/Cloc = %g\n\n",rank,eP,t2-t1,et2_AP,ePtAP,t3-t2,t4-t3,t4-t0);CHKERRQ(ierr);
1348   }
1349 #endif
1350   PetscFunctionReturn(0);
1351 }
1352