xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision 5b6bfdb9644f185dbf5e5a09b808ec241507e1e7)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16 {
17   PetscErrorCode    ierr;
18   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
19   Mat_PtAPMPI       *ptap=a->ptap;
20   PetscBool         iascii;
21   PetscViewerFormat format;
22 
23   PetscFunctionBegin;
24   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
25   if (iascii) {
26     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
27     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
28       if (ptap->algType == 0) {
29         ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr);
30       } else if (ptap->algType == 1) {
31         ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr);
32       }
33     }
34   }
35   ierr = (ptap->view)(A,viewer);CHKERRQ(ierr);
36   PetscFunctionReturn(0);
37 }
38 
39 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
40 {
41   PetscErrorCode ierr;
42   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
43   Mat_PtAPMPI    *ptap=a->ptap;
44 
45   PetscFunctionBegin;
46   if (ptap) {
47     Mat_Merge_SeqsToMPI *merge=ptap->merge;
48     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
49     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
50     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
51     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
52     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
53     ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
54     ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
55     if (ptap->AP_loc) { /* used by alg_rap */
56       Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
57       ierr = PetscFree(ap->i);CHKERRQ(ierr);
58       ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
59       ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
60     } else { /* used by alg_ptap */
61       ierr = PetscFree(ptap->api);CHKERRQ(ierr);
62       ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
63     }
64     ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
65     ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
66     if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
67 
68     if (merge) { /* used by alg_ptap */
69       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
70       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
71       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
72       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
73       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
74       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
75       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
76       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
77       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
78       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
79       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
80       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
81       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
82       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
83     }
84 
85     ierr = ptap->destroy(A);CHKERRQ(ierr);
86     ierr = PetscFree(ptap);CHKERRQ(ierr);
87   }
88   PetscFunctionReturn(0);
89 }
90 
91 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
92 {
93   PetscErrorCode ierr;
94   Mat_MPIAIJ     *a     = (Mat_MPIAIJ*)A->data;
95   Mat_PtAPMPI    *ptap  = a->ptap;
96 
97   PetscFunctionBegin;
98   ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr);
99   (*M)->ops->destroy   = ptap->destroy;
100   (*M)->ops->duplicate = ptap->duplicate;
101   (*M)->ops->view      = ptap->view;
102   PetscFunctionReturn(0);
103 }
104 
105 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
106 {
107   PetscErrorCode ierr;
108   PetscBool      flg;
109   MPI_Comm       comm;
110 #if !defined(PETSC_HAVE_HYPRE)
111   const char          *algTypes[2] = {"scalable","nonscalable"};
112   PetscInt            nalg=2;
113 #else
114   const char          *algTypes[3] = {"scalable","nonscalable","hypre"};
115   PetscInt            nalg=3;
116 #endif
117   PetscInt            pN=P->cmap->N,alg=1; /* set default algorithm */
118 
119   PetscFunctionBegin;
120   /* check if matrix local sizes are compatible */
121   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
122   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
123   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
124 
125   if (scall == MAT_INITIAL_MATRIX) {
126     /* pick an algorithm */
127     ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr);
128     PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */
129     ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr);
130     ierr = PetscOptionsEnd();CHKERRQ(ierr);
131 
132     if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
133       MatInfo     Ainfo,Pinfo;
134       PetscInt    nz_local;
135       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
136 
137       ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr);
138       ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr);
139       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
140 
141       if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
142       ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr);
143 
144       if (alg_scalable) {
145         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
146       }
147     }
148 
149     switch (alg) {
150     case 1:
151       /* do R=P^T locally, then C=R*A*P */
152       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
153       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
154       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
155       break;
156 #if defined(PETSC_HAVE_HYPRE)
157     case 2:
158       /* Use boomerAMGBuildCoarseOperator */
159       ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr);
160       PetscFunctionReturn(0);
161       break;
162 #endif
163     default:
164       /* do R=P^T locally, then C=R*A*P */
165       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
166       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr);
167       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
168       break;
169     }
170   }
171   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
172   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
173   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
174   PetscFunctionReturn(0);
175 }
176 
177 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
178 {
179   PetscErrorCode    ierr;
180   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
181   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
182   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
183   Mat_PtAPMPI       *ptap = c->ptap;
184   Mat               AP_loc,C_loc,C_oth;
185   PetscInt          i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz;
186   PetscScalar       *apa;
187   const PetscInt    *cols;
188   const PetscScalar *vals;
189 
190   PetscFunctionBegin;
191   ierr = MatZeroEntries(C);CHKERRQ(ierr);
192 
193   /* 1) get R = Pd^T,Ro = Po^T */
194   if (ptap->reuse == MAT_REUSE_MATRIX) {
195     ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
196     ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
197   }
198 
199   /* 2) get AP_loc */
200   AP_loc = ptap->AP_loc;
201   ap = (Mat_SeqAIJ*)AP_loc->data;
202 
203   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
204   /*-----------------------------------------------------*/
205   if (ptap->reuse == MAT_REUSE_MATRIX) {
206     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
207     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
208     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
209   }
210 
211   /* 2-2) compute numeric A_loc*P - dominating part */
212   /* ---------------------------------------------- */
213   /* get data from symbolic products */
214   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
215   if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
216   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
217 
218   api   = ap->i;
219   apj   = ap->j;
220   for (i=0; i<am; i++) {
221     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
222     apnz = api[i+1] - api[i];
223     apa = ap->a + api[i];
224     ierr = PetscMemzero(apa,sizeof(PetscScalar)*apnz);CHKERRQ(ierr);
225     AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
226     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
227   }
228 
229   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
230   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
231   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
232 
233   C_loc = ptap->C_loc;
234   C_oth = ptap->C_oth;
235 
236   /* add C_loc and Co to to C */
237   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
238 
239   /* C_loc -> C */
240   cm    = C_loc->rmap->N;
241   c_seq = (Mat_SeqAIJ*)C_loc->data;
242   cols = c_seq->j;
243   vals = c_seq->a;
244   for (i=0; i<cm; i++) {
245     ncols = c_seq->i[i+1] - c_seq->i[i];
246     row = rstart + i;
247     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
248     cols += ncols; vals += ncols;
249   }
250 
251   /* Co -> C, off-processor part */
252   cm = C_oth->rmap->N;
253   c_seq = (Mat_SeqAIJ*)C_oth->data;
254   cols = c_seq->j;
255   vals = c_seq->a;
256   for (i=0; i<cm; i++) {
257     ncols = c_seq->i[i+1] - c_seq->i[i];
258     row = p->garray[i];
259     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
260     cols += ncols; vals += ncols;
261   }
262   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
263   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
264 
265   ptap->reuse = MAT_REUSE_MATRIX;
266   PetscFunctionReturn(0);
267 }
268 
269 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
270 {
271   PetscErrorCode      ierr;
272   Mat_PtAPMPI         *ptap;
273   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
274   MPI_Comm            comm;
275   PetscMPIInt         size,rank;
276   Mat                 Cmpi,P_loc,P_oth;
277   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
278   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
279   PetscInt            *lnk,i,k,pnz,row,nsend;
280   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
281   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
282   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
283   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
284   MPI_Request         *swaits,*rwaits;
285   MPI_Status          *sstatus,rstatus;
286   PetscLayout         rowmap;
287   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
288   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
289   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi;
290   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
291   PetscScalar         *apv;
292   PetscTable          ta;
293 #if defined(PETSC_USE_INFO)
294   PetscReal           apfill;
295 #endif
296 
297   PetscFunctionBegin;
298   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
299   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
300   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
301 
302   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
303 
304   /* create symbolic parallel matrix Cmpi */
305   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
306   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
307 
308   /* create struct Mat_PtAPMPI and attached it to C later */
309   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
310   ptap->reuse = MAT_INITIAL_MATRIX;
311   ptap->algType = 0;
312 
313   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
314   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr);
315   /* get P_loc by taking all local rows of P */
316   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr);
317 
318   ptap->P_loc = P_loc;
319   ptap->P_oth = P_oth;
320 
321   /* (0) compute Rd = Pd^T, Ro = Po^T  */
322   /* --------------------------------- */
323   ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
324   ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
325 
326   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
327   /* ----------------------------------------------------------------- */
328   p_loc  = (Mat_SeqAIJ*)P_loc->data;
329   if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
330 
331   /* create and initialize a linked list */
332   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
333   MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
334   MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
335   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
336 
337   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
338 
339   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
340   if (ao) {
341     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
342   } else {
343     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
344   }
345   current_space = free_space;
346   nspacedouble  = 0;
347 
348   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
349   api[0] = 0;
350   for (i=0; i<am; i++) {
351     /* diagonal portion: Ad[i,:]*P */
352     ai = ad->i; pi = p_loc->i;
353     nzi = ai[i+1] - ai[i];
354     aj  = ad->j + ai[i];
355     for (j=0; j<nzi; j++) {
356       row  = aj[j];
357       pnz  = pi[row+1] - pi[row];
358       Jptr = p_loc->j + pi[row];
359       /* add non-zero cols of P into the sorted linked list lnk */
360       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
361     }
362     /* off-diagonal portion: Ao[i,:]*P */
363     if (ao) {
364       ai = ao->i; pi = p_oth->i;
365       nzi = ai[i+1] - ai[i];
366       aj  = ao->j + ai[i];
367       for (j=0; j<nzi; j++) {
368         row  = aj[j];
369         pnz  = pi[row+1] - pi[row];
370         Jptr = p_oth->j + pi[row];
371         ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
372       }
373     }
374     apnz     = lnk[0];
375     api[i+1] = api[i] + apnz;
376 
377     /* if free space is not available, double the total space in the list */
378     if (current_space->local_remaining<apnz) {
379       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
380       nspacedouble++;
381     }
382 
383     /* Copy data into free space, then initialize lnk */
384     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
385 
386     current_space->array           += apnz;
387     current_space->local_used      += apnz;
388     current_space->local_remaining -= apnz;
389   }
390   /* Allocate space for apj and apv, initialize apj, and */
391   /* destroy list of free space and other temporary array(s) */
392   ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
393   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
394   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
395 
396   /* Create AP_loc for reuse */
397   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
398 
399 #if defined(PETSC_USE_INFO)
400   if (ao) {
401     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
402   } else {
403     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
404   }
405   ptap->AP_loc->info.mallocs           = nspacedouble;
406   ptap->AP_loc->info.fill_ratio_given  = fill;
407   ptap->AP_loc->info.fill_ratio_needed = apfill;
408 
409   if (api[am]) {
410     ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
411     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
412   } else {
413     ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
414   }
415 #endif
416 
417   /* (2-1) compute symbolic Co = Ro*AP_loc  */
418   /* ------------------------------------ */
419   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
420 
421   /* (3) send coj of C_oth to other processors  */
422   /* ------------------------------------------ */
423   /* determine row ownership */
424   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
425   rowmap->n  = pn;
426   rowmap->bs = 1;
427   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
428   owners = rowmap->range;
429 
430   /* determine the number of messages to send, their lengths */
431   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
432   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
433   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
434 
435   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
436   coi   = c_oth->i; coj = c_oth->j;
437   con   = ptap->C_oth->rmap->n;
438   proc  = 0;
439   for (i=0; i<con; i++) {
440     while (prmap[i] >= owners[proc+1]) proc++;
441     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
442     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
443   }
444 
445   len          = 0; /* max length of buf_si[], see (4) */
446   owners_co[0] = 0;
447   nsend        = 0;
448   for (proc=0; proc<size; proc++) {
449     owners_co[proc+1] = owners_co[proc] + len_si[proc];
450     if (len_s[proc]) {
451       nsend++;
452       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
453       len         += len_si[proc];
454     }
455   }
456 
457   /* determine the number and length of messages to receive for coi and coj  */
458   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
459   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
460 
461   /* post the Irecv and Isend of coj */
462   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
463   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
464   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
465   for (proc=0, k=0; proc<size; proc++) {
466     if (!len_s[proc]) continue;
467     i    = owners_co[proc];
468     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
469     k++;
470   }
471 
472   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
473   /* ---------------------------------------- */
474   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
475   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
476 
477   /* receives coj are complete */
478   for (i=0; i<nrecv; i++) {
479     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
480   }
481   ierr = PetscFree(rwaits);CHKERRQ(ierr);
482   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
483 
484   /* add received column indices into ta to update Crmax */
485   for (k=0; k<nrecv; k++) {/* k-th received message */
486     Jptr = buf_rj[k];
487     for (j=0; j<len_r[k]; j++) {
488       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
489     }
490   }
491   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
492   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
493 
494   /* (4) send and recv coi */
495   /*-----------------------*/
496   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
497   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
498   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
499   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
500   for (proc=0,k=0; proc<size; proc++) {
501     if (!len_s[proc]) continue;
502     /* form outgoing message for i-structure:
503          buf_si[0]:                 nrows to be sent
504                [1:nrows]:           row index (global)
505                [nrows+1:2*nrows+1]: i-structure index
506     */
507     /*-------------------------------------------*/
508     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
509     buf_si_i    = buf_si + nrows+1;
510     buf_si[0]   = nrows;
511     buf_si_i[0] = 0;
512     nrows       = 0;
513     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
514       nzi = coi[i+1] - coi[i];
515       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
516       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
517       nrows++;
518     }
519     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
520     k++;
521     buf_si += len_si[proc];
522   }
523   for (i=0; i<nrecv; i++) {
524     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
525   }
526   ierr = PetscFree(rwaits);CHKERRQ(ierr);
527   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
528 
529   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
530   ierr = PetscFree(len_ri);CHKERRQ(ierr);
531   ierr = PetscFree(swaits);CHKERRQ(ierr);
532   ierr = PetscFree(buf_s);CHKERRQ(ierr);
533 
534   /* (5) compute the local portion of Cmpi      */
535   /* ------------------------------------------ */
536   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
537   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
538   current_space = free_space;
539 
540   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
541   for (k=0; k<nrecv; k++) {
542     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
543     nrows       = *buf_ri_k[k];
544     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
545     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
546   }
547 
548   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
549   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
550   for (i=0; i<pn; i++) {
551     /* add C_loc into Cmpi */
552     nzi  = c_loc->i[i+1] - c_loc->i[i];
553     Jptr = c_loc->j + c_loc->i[i];
554     ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
555 
556     /* add received col data into lnk */
557     for (k=0; k<nrecv; k++) { /* k-th received message */
558       if (i == *nextrow[k]) { /* i-th row */
559         nzi  = *(nextci[k]+1) - *nextci[k];
560         Jptr = buf_rj[k] + *nextci[k];
561         ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
562         nextrow[k]++; nextci[k]++;
563       }
564     }
565     nzi = lnk[0];
566 
567     /* copy data into free space, then initialize lnk */
568     ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr);
569     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
570   }
571   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
572   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
573   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
574 
575   /* local sizes and preallocation */
576   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
577   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
578   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
579   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
580 
581   /* members in merge */
582   ierr = PetscFree(id_r);CHKERRQ(ierr);
583   ierr = PetscFree(len_r);CHKERRQ(ierr);
584   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
585   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
586   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
587   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
588   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
589 
590   /* attach the supporting struct to Cmpi for reuse */
591   c = (Mat_MPIAIJ*)Cmpi->data;
592   c->ptap         = ptap;
593   ptap->duplicate = Cmpi->ops->duplicate;
594   ptap->destroy   = Cmpi->ops->destroy;
595   ptap->view      = Cmpi->ops->view;
596 
597   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
598   Cmpi->assembled        = PETSC_FALSE;
599   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
600   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
601   Cmpi->ops->duplicate   = MatDuplicate_MPIAIJ_MatPtAP;
602   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
603   *C                     = Cmpi;
604   PetscFunctionReturn(0);
605 }
606 
607 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
608 {
609   PetscErrorCode      ierr;
610   Mat_PtAPMPI         *ptap;
611   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
612   MPI_Comm            comm;
613   PetscMPIInt         size,rank;
614   Mat                 Cmpi;
615   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
616   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
617   PetscInt            *lnk,i,k,pnz,row,nsend;
618   PetscBT             lnkbt;
619   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
620   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
621   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
622   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
623   MPI_Request         *swaits,*rwaits;
624   MPI_Status          *sstatus,rstatus;
625   PetscLayout         rowmap;
626   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
627   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
628   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
629   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
630   PetscScalar         *apv;
631   PetscTable          ta;
632 #if defined(PETSC_USE_INFO)
633   PetscReal           apfill;
634 #endif
635 
636   PetscFunctionBegin;
637   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
638   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
639   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
640 
641   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
642 
643   /* create symbolic parallel matrix Cmpi */
644   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
645   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
646 
647   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
648   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
649 
650   /* create struct Mat_PtAPMPI and attached it to C later */
651   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
652   ptap->reuse = MAT_INITIAL_MATRIX;
653   ptap->algType = 1;
654 
655   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
656   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
657   /* get P_loc by taking all local rows of P */
658   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
659 
660   /* (0) compute Rd = Pd^T, Ro = Po^T  */
661   /* --------------------------------- */
662   ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
663   ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
664 
665   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
666   /* ----------------------------------------------------------------- */
667   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
668   if (ptap->P_oth) p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
669 
670   /* create and initialize a linked list */
671   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
672   MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
673   MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
674   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
675   /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
676 
677   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
678 
679   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
680   if (ao) {
681     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
682   } else {
683     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
684   }
685   current_space = free_space;
686   nspacedouble  = 0;
687 
688   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
689   api[0] = 0;
690   for (i=0; i<am; i++) {
691     /* diagonal portion: Ad[i,:]*P */
692     ai = ad->i; pi = p_loc->i;
693     nzi = ai[i+1] - ai[i];
694     aj  = ad->j + ai[i];
695     for (j=0; j<nzi; j++) {
696       row  = aj[j];
697       pnz  = pi[row+1] - pi[row];
698       Jptr = p_loc->j + pi[row];
699       /* add non-zero cols of P into the sorted linked list lnk */
700       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
701     }
702     /* off-diagonal portion: Ao[i,:]*P */
703     if (ao) {
704       ai = ao->i; pi = p_oth->i;
705       nzi = ai[i+1] - ai[i];
706       aj  = ao->j + ai[i];
707       for (j=0; j<nzi; j++) {
708         row  = aj[j];
709         pnz  = pi[row+1] - pi[row];
710         Jptr = p_oth->j + pi[row];
711         ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
712       }
713     }
714     apnz     = lnk[0];
715     api[i+1] = api[i] + apnz;
716     if (ap_rmax < apnz) ap_rmax = apnz;
717 
718     /* if free space is not available, double the total space in the list */
719     if (current_space->local_remaining<apnz) {
720       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
721       nspacedouble++;
722     }
723 
724     /* Copy data into free space, then initialize lnk */
725     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
726 
727     current_space->array           += apnz;
728     current_space->local_used      += apnz;
729     current_space->local_remaining -= apnz;
730   }
731   /* Allocate space for apj and apv, initialize apj, and */
732   /* destroy list of free space and other temporary array(s) */
733   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
734   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
735   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
736 
737   /* Create AP_loc for reuse */
738   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
739 
740 #if defined(PETSC_USE_INFO)
741   if (ao) {
742     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
743   } else {
744     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
745   }
746   ptap->AP_loc->info.mallocs           = nspacedouble;
747   ptap->AP_loc->info.fill_ratio_given  = fill;
748   ptap->AP_loc->info.fill_ratio_needed = apfill;
749 
750   if (api[am]) {
751     ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
752     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
753   } else {
754     ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
755   }
756 #endif
757 
758   /* (2-1) compute symbolic Co = Ro*AP_loc  */
759   /* ------------------------------------ */
760   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
761 
762   /* (3) send coj of C_oth to other processors  */
763   /* ------------------------------------------ */
764   /* determine row ownership */
765   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
766   rowmap->n  = pn;
767   rowmap->bs = 1;
768   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
769   owners = rowmap->range;
770 
771   /* determine the number of messages to send, their lengths */
772   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
773   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
774   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
775 
776   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
777   coi   = c_oth->i; coj = c_oth->j;
778   con   = ptap->C_oth->rmap->n;
779   proc  = 0;
780   for (i=0; i<con; i++) {
781     while (prmap[i] >= owners[proc+1]) proc++;
782     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
783     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
784   }
785 
786   len          = 0; /* max length of buf_si[], see (4) */
787   owners_co[0] = 0;
788   nsend        = 0;
789   for (proc=0; proc<size; proc++) {
790     owners_co[proc+1] = owners_co[proc] + len_si[proc];
791     if (len_s[proc]) {
792       nsend++;
793       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
794       len         += len_si[proc];
795     }
796   }
797 
798   /* determine the number and length of messages to receive for coi and coj  */
799   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
800   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
801 
802   /* post the Irecv and Isend of coj */
803   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
804   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
805   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
806   for (proc=0, k=0; proc<size; proc++) {
807     if (!len_s[proc]) continue;
808     i    = owners_co[proc];
809     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
810     k++;
811   }
812 
813   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
814   /* ---------------------------------------- */
815   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
816   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
817 
818   /* receives coj are complete */
819   for (i=0; i<nrecv; i++) {
820     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
821   }
822   ierr = PetscFree(rwaits);CHKERRQ(ierr);
823   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
824 
825   /* add received column indices into ta to update Crmax */
826   for (k=0; k<nrecv; k++) {/* k-th received message */
827     Jptr = buf_rj[k];
828     for (j=0; j<len_r[k]; j++) {
829       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
830     }
831   }
832   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
833   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
834 
835   /* (4) send and recv coi */
836   /*-----------------------*/
837   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
838   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
839   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
840   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
841   for (proc=0,k=0; proc<size; proc++) {
842     if (!len_s[proc]) continue;
843     /* form outgoing message for i-structure:
844          buf_si[0]:                 nrows to be sent
845                [1:nrows]:           row index (global)
846                [nrows+1:2*nrows+1]: i-structure index
847     */
848     /*-------------------------------------------*/
849     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
850     buf_si_i    = buf_si + nrows+1;
851     buf_si[0]   = nrows;
852     buf_si_i[0] = 0;
853     nrows       = 0;
854     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
855       nzi = coi[i+1] - coi[i];
856       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
857       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
858       nrows++;
859     }
860     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
861     k++;
862     buf_si += len_si[proc];
863   }
864   for (i=0; i<nrecv; i++) {
865     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
866   }
867   ierr = PetscFree(rwaits);CHKERRQ(ierr);
868   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
869 
870   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
871   ierr = PetscFree(len_ri);CHKERRQ(ierr);
872   ierr = PetscFree(swaits);CHKERRQ(ierr);
873   ierr = PetscFree(buf_s);CHKERRQ(ierr);
874 
875   /* (5) compute the local portion of Cmpi      */
876   /* ------------------------------------------ */
877   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
878   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
879   current_space = free_space;
880 
881   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
882   for (k=0; k<nrecv; k++) {
883     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
884     nrows       = *buf_ri_k[k];
885     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
886     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
887   }
888 
889   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
890   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
891   for (i=0; i<pn; i++) {
892     /* add C_loc into Cmpi */
893     nzi  = c_loc->i[i+1] - c_loc->i[i];
894     Jptr = c_loc->j + c_loc->i[i];
895     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
896 
897     /* add received col data into lnk */
898     for (k=0; k<nrecv; k++) { /* k-th received message */
899       if (i == *nextrow[k]) { /* i-th row */
900         nzi  = *(nextci[k]+1) - *nextci[k];
901         Jptr = buf_rj[k] + *nextci[k];
902         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
903         nextrow[k]++; nextci[k]++;
904       }
905     }
906     nzi = lnk[0];
907 
908     /* copy data into free space, then initialize lnk */
909     ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
910     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
911   }
912   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
913   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
914   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
915 
916   /* local sizes and preallocation */
917   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
918   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
919   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
920   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
921 
922   /* members in merge */
923   ierr = PetscFree(id_r);CHKERRQ(ierr);
924   ierr = PetscFree(len_r);CHKERRQ(ierr);
925   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
926   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
927   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
928   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
929   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
930 
931   /* attach the supporting struct to Cmpi for reuse */
932   c = (Mat_MPIAIJ*)Cmpi->data;
933   c->ptap         = ptap;
934   ptap->duplicate = Cmpi->ops->duplicate;
935   ptap->destroy   = Cmpi->ops->destroy;
936   ptap->view      = Cmpi->ops->view;
937   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
938 
939   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
940   Cmpi->assembled        = PETSC_FALSE;
941   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
942   Cmpi->ops->duplicate   = MatDuplicate_MPIAIJ_MatPtAP;
943   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
944   *C                     = Cmpi;
945   PetscFunctionReturn(0);
946 }
947 
948 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
949 {
950   PetscErrorCode    ierr;
951   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
952   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
953   Mat_SeqAIJ        *ap,*p_loc,*p_oth=NULL,*c_seq;
954   Mat_PtAPMPI       *ptap = c->ptap;
955   Mat               AP_loc,C_loc,C_oth;
956   PetscInt          i,rstart,rend,cm,ncols,row;
957   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
958   PetscScalar       *apa;
959   const PetscInt    *cols;
960   const PetscScalar *vals;
961 
962   PetscFunctionBegin;
963   ierr = MatZeroEntries(C);CHKERRQ(ierr);
964 
965   /* 1) get R = Pd^T,Ro = Po^T */
966   if (ptap->reuse == MAT_REUSE_MATRIX) {
967     ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
968     ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
969   }
970 
971   /* 2) get AP_loc */
972   AP_loc = ptap->AP_loc;
973   ap = (Mat_SeqAIJ*)AP_loc->data;
974 
975   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
976   /*-----------------------------------------------------*/
977   if (ptap->reuse == MAT_REUSE_MATRIX) {
978     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
979     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
980     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
981   }
982 
983   /* 2-2) compute numeric A_loc*P - dominating part */
984   /* ---------------------------------------------- */
985   /* get data from symbolic products */
986   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
987   if (ptap->P_oth) {
988     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
989   }
990   apa   = ptap->apa;
991   api   = ap->i;
992   apj   = ap->j;
993   for (i=0; i<am; i++) {
994     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
995     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
996     apnz = api[i+1] - api[i];
997     for (j=0; j<apnz; j++) {
998       col = apj[j+api[i]];
999       ap->a[j+ap->i[i]] = apa[col];
1000       apa[col] = 0.0;
1001     }
1002     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1003   }
1004 
1005   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1006   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
1007   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
1008   C_loc = ptap->C_loc;
1009   C_oth = ptap->C_oth;
1010 
1011   /* add C_loc and Co to to C */
1012   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
1013 
1014   /* C_loc -> C */
1015   cm    = C_loc->rmap->N;
1016   c_seq = (Mat_SeqAIJ*)C_loc->data;
1017   cols = c_seq->j;
1018   vals = c_seq->a;
1019   for (i=0; i<cm; i++) {
1020     ncols = c_seq->i[i+1] - c_seq->i[i];
1021     row = rstart + i;
1022     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1023     cols += ncols; vals += ncols;
1024   }
1025 
1026   /* Co -> C, off-processor part */
1027   cm = C_oth->rmap->N;
1028   c_seq = (Mat_SeqAIJ*)C_oth->data;
1029   cols = c_seq->j;
1030   vals = c_seq->a;
1031   for (i=0; i<cm; i++) {
1032     ncols = c_seq->i[i+1] - c_seq->i[i];
1033     row = p->garray[i];
1034     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1035     cols += ncols; vals += ncols;
1036   }
1037   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1038   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1039 
1040   ptap->reuse = MAT_REUSE_MATRIX;
1041   PetscFunctionReturn(0);
1042 }
1043