xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision e5a36eccef3d6b83a2c625c30d0dfd5adb4001f2)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16 {
17   PetscErrorCode    ierr;
18   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
19   Mat_APMPI         *ptap=a->ap;
20   PetscBool         iascii;
21   PetscViewerFormat format;
22 
23   PetscFunctionBegin;
24   if (!ptap) {
25     /* hack: MatDuplicate() sets oldmat->ops->view to newmat which is a base mat class with null ptpa! */
26     A->ops->view = MatView_MPIAIJ;
27     ierr = (A->ops->view)(A,viewer);CHKERRQ(ierr);
28     PetscFunctionReturn(0);
29   }
30 
31   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
32   if (iascii) {
33     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
34     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
35       if (ptap->algType == 0) {
36         ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr);
37       } else if (ptap->algType == 1) {
38         ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr);
39       }
40     }
41   }
42   ierr = (ptap->view)(A,viewer);CHKERRQ(ierr);
43   PetscFunctionReturn(0);
44 }
45 
46 PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat A)
47 {
48   PetscErrorCode      ierr;
49   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data;
50   Mat_APMPI           *ptap=a->ap;
51   Mat_Merge_SeqsToMPI *merge;
52 
53   PetscFunctionBegin;
54   if (!ptap) PetscFunctionReturn(0);
55 
56   ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
57   ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
58   ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
59   ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
60   ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
61   ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
62   ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
63   if (ptap->AP_loc) { /* used by alg_rap */
64     Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
65     ierr = PetscFree(ap->i);CHKERRQ(ierr);
66     ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
67     ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
68   } else { /* used by alg_ptap */
69     ierr = PetscFree(ptap->api);CHKERRQ(ierr);
70     ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
71   }
72   ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
73   ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
74   if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
75 
76   ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr);
77 
78   merge=ptap->merge;
79   if (merge) { /* used by alg_ptap */
80     ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
81     ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
82     ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
83     ierr = PetscFree(merge->bi);CHKERRQ(ierr);
84     ierr = PetscFree(merge->bj);CHKERRQ(ierr);
85     ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
86     ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
87     ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
88     ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
89     ierr = PetscFree(merge->coi);CHKERRQ(ierr);
90     ierr = PetscFree(merge->coj);CHKERRQ(ierr);
91     ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
92     ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
93     ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
94   }
95   ierr = ISLocalToGlobalMappingDestroy(&ptap->ltog);CHKERRQ(ierr);
96 
97   A->ops->destroy = ptap->destroy;
98   ierr = PetscFree(a->ap);CHKERRQ(ierr);
99   PetscFunctionReturn(0);
100 }
101 
102 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
103 {
104   PetscErrorCode ierr;
105 
106   PetscFunctionBegin;
107   ierr = (*A->ops->freeintermediatedatastructures)(A);CHKERRQ(ierr);
108   ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
109   PetscFunctionReturn(0);
110 }
111 
112 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
113 {
114   PetscErrorCode ierr;
115   PetscBool      flg;
116   MPI_Comm       comm;
117 #if !defined(PETSC_HAVE_HYPRE)
118   const char          *algTypes[2] = {"scalable","nonscalable"};
119   PetscInt            nalg=2;
120 #else
121   const char          *algTypes[3] = {"scalable","nonscalable","hypre"};
122   PetscInt            nalg=3;
123 #endif
124   PetscInt            pN=P->cmap->N,alg=1; /* set default algorithm */
125 
126   PetscFunctionBegin;
127   /* check if matrix local sizes are compatible */
128   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
129   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
130   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
131 
132   if (scall == MAT_INITIAL_MATRIX) {
133     /* pick an algorithm */
134     ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");CHKERRQ(ierr);
135     ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr);
136     ierr = PetscOptionsEnd();CHKERRQ(ierr);
137 
138     if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
139       MatInfo     Ainfo,Pinfo;
140       PetscInt    nz_local;
141       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
142 
143       ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr);
144       ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr);
145       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
146 
147       if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
148       ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr);
149 
150       if (alg_scalable) {
151         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
152       }
153     }
154 
155     switch (alg) {
156     case 1:
157       /* do R=P^T locally, then C=R*A*P -- nonscalable */
158       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
159       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
160       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
161       break;
162 #if defined(PETSC_HAVE_HYPRE)
163     case 2:
164       /* Use boomerAMGBuildCoarseOperator */
165       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
166       ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr);
167       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
168       break;
169 #endif
170     default:
171       /* do R=P^T locally, then C=R*A*P */
172       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
173       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr);
174       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
175       break;
176     }
177 
178     if (alg == 0 || alg == 1) {
179       Mat_MPIAIJ *c  = (Mat_MPIAIJ*)(*C)->data;
180       Mat_APMPI  *ap = c->ap;
181       ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)(*C)),((PetscObject)(*C))->prefix,"MatFreeIntermediateDataStructures","Mat");CHKERRQ(ierr);
182       ap->freestruct = PETSC_FALSE;
183       ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",ap->freestruct,&ap->freestruct, NULL);CHKERRQ(ierr);
184       ierr = PetscOptionsEnd();CHKERRQ(ierr);
185     }
186   }
187 
188   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
189   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
190   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
191   PetscFunctionReturn(0);
192 }
193 
194 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
195 {
196   PetscErrorCode    ierr;
197   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
198   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
199   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
200   Mat_APMPI         *ptap = c->ap;
201   Mat               AP_loc,C_loc,C_oth;
202   PetscInt          i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz,nout;
203   PetscScalar       *apa;
204   const PetscInt    *cols;
205   const PetscScalar *vals;
206 
207   PetscFunctionBegin;
208   if (!ptap) {
209     MPI_Comm comm;
210     ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
211     SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
212   }
213 
214   ierr = MatZeroEntries(C);CHKERRQ(ierr);
215 
216   /* 1) get R = Pd^T,Ro = Po^T */
217   if (ptap->reuse == MAT_REUSE_MATRIX) {
218     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
219     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
220   }
221 
222   /* 2) get AP_loc */
223   AP_loc = ptap->AP_loc;
224   ap = (Mat_SeqAIJ*)AP_loc->data;
225 
226   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
227   /*-----------------------------------------------------*/
228   if (ptap->reuse == MAT_REUSE_MATRIX) {
229     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
230     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
231     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
232   }
233 
234   /* 2-2) compute numeric A_loc*P - dominating part */
235   /* ---------------------------------------------- */
236   /* get data from symbolic products */
237   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
238   if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
239   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
240 
241   api   = ap->i;
242   apj   = ap->j;
243   ierr = ISLocalToGlobalMappingApply(ptap->ltog,api[AP_loc->rmap->n],apj,apj);CHKERRQ(ierr);
244   for (i=0; i<am; i++) {
245     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
246     apnz = api[i+1] - api[i];
247     apa = ap->a + api[i];
248     ierr = PetscMemzero(apa,sizeof(PetscScalar)*apnz);CHKERRQ(ierr);
249     AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
250     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
251   }
252   ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,api[AP_loc->rmap->n],apj,&nout,apj);CHKERRQ(ierr);
253   if (api[AP_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",api[AP_loc->rmap->n],nout);
254 
255   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
256   /* Always use scalable version since we are in the MPI scalable version */
257   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
258   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
259 
260   C_loc = ptap->C_loc;
261   C_oth = ptap->C_oth;
262 
263   /* add C_loc and Co to to C */
264   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
265 
266   /* C_loc -> C */
267   cm    = C_loc->rmap->N;
268   c_seq = (Mat_SeqAIJ*)C_loc->data;
269   cols = c_seq->j;
270   vals = c_seq->a;
271   ierr = ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_loc->rmap->n],c_seq->j,c_seq->j);CHKERRQ(ierr);
272 
273   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
274   /* when there are no off-processor parts.  */
275   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
276   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
277   /* a table, and other, more complex stuff has to be done. */
278   if (C->assembled) {
279     C->was_assembled = PETSC_TRUE;
280     C->assembled     = PETSC_FALSE;
281   }
282   if (C->was_assembled) {
283     for (i=0; i<cm; i++) {
284       ncols = c_seq->i[i+1] - c_seq->i[i];
285       row = rstart + i;
286       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
287       cols += ncols; vals += ncols;
288     }
289   } else {
290     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
291   }
292   ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_loc->rmap->n],c_seq->j,&nout,c_seq->j);CHKERRQ(ierr);
293   if (c_seq->i[C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout);
294 
295   /* Co -> C, off-processor part */
296   cm = C_oth->rmap->N;
297   c_seq = (Mat_SeqAIJ*)C_oth->data;
298   cols = c_seq->j;
299   vals = c_seq->a;
300   ierr = ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_oth->rmap->n],c_seq->j,c_seq->j);CHKERRQ(ierr);
301   for (i=0; i<cm; i++) {
302     ncols = c_seq->i[i+1] - c_seq->i[i];
303     row = p->garray[i];
304     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
305     cols += ncols; vals += ncols;
306   }
307   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
308   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
309 
310   ptap->reuse = MAT_REUSE_MATRIX;
311 
312   ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_oth->rmap->n],c_seq->j,&nout,c_seq->j);CHKERRQ(ierr);
313   if (c_seq->i[C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout);
314 
315   /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
316   if (ptap->freestruct) {
317     ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr);
318   }
319   PetscFunctionReturn(0);
320 }
321 
322 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
323 {
324   PetscErrorCode      ierr;
325   Mat_APMPI           *ptap;
326   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
327   MPI_Comm            comm;
328   PetscMPIInt         size,rank;
329   Mat                 Cmpi,P_loc,P_oth;
330   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
331   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
332   PetscInt            *lnk,i,k,pnz,row,nsend;
333   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
334   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
335   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
336   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
337   MPI_Request         *swaits,*rwaits;
338   MPI_Status          *sstatus,rstatus;
339   PetscLayout         rowmap;
340   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
341   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
342   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi,nout;
343   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
344   PetscScalar         *apv;
345   PetscTable          ta;
346   MatType             mtype;
347   const char          *prefix;
348 #if defined(PETSC_USE_INFO)
349   PetscReal           apfill;
350 #endif
351 
352   PetscFunctionBegin;
353   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
354   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
355   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
356 
357   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
358 
359   /* create symbolic parallel matrix Cmpi */
360   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
361   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
362   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
363 
364   /* create struct Mat_APMPI and attached it to C later */
365   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
366   ptap->reuse = MAT_INITIAL_MATRIX;
367   ptap->algType = 0;
368 
369   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
370   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr);
371   /* get P_loc by taking all local rows of P */
372   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr);
373 
374   ptap->P_loc = P_loc;
375   ptap->P_oth = P_oth;
376 
377   /* (0) compute Rd = Pd^T, Ro = Po^T  */
378   /* --------------------------------- */
379   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
380   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
381 
382   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
383   /* ----------------------------------------------------------------- */
384   p_loc  = (Mat_SeqAIJ*)P_loc->data;
385   if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
386 
387   /* create and initialize a linked list */
388   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
389   MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
390   MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
391   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
392 
393   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
394 
395   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
396   if (ao) {
397     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
398   } else {
399     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
400   }
401   current_space = free_space;
402   nspacedouble  = 0;
403 
404   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
405   api[0] = 0;
406   for (i=0; i<am; i++) {
407     /* diagonal portion: Ad[i,:]*P */
408     ai = ad->i; pi = p_loc->i;
409     nzi = ai[i+1] - ai[i];
410     aj  = ad->j + ai[i];
411     for (j=0; j<nzi; j++) {
412       row  = aj[j];
413       pnz  = pi[row+1] - pi[row];
414       Jptr = p_loc->j + pi[row];
415       /* add non-zero cols of P into the sorted linked list lnk */
416       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
417     }
418     /* off-diagonal portion: Ao[i,:]*P */
419     if (ao) {
420       ai = ao->i; pi = p_oth->i;
421       nzi = ai[i+1] - ai[i];
422       aj  = ao->j + ai[i];
423       for (j=0; j<nzi; j++) {
424         row  = aj[j];
425         pnz  = pi[row+1] - pi[row];
426         Jptr = p_oth->j + pi[row];
427         ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
428       }
429     }
430     apnz     = lnk[0];
431     api[i+1] = api[i] + apnz;
432 
433     /* if free space is not available, double the total space in the list */
434     if (current_space->local_remaining<apnz) {
435       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
436       nspacedouble++;
437     }
438 
439     /* Copy data into free space, then initialize lnk */
440     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
441 
442     current_space->array           += apnz;
443     current_space->local_used      += apnz;
444     current_space->local_remaining -= apnz;
445   }
446   /* Allocate space for apj and apv, initialize apj, and */
447   /* destroy list of free space and other temporary array(s) */
448   ierr = PetscCalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
449   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
450   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
451 
452   /* Create AP_loc for reuse */
453   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
454   ierr = MatSeqAIJCompactOutExtraColumns_SeqAIJ(ptap->AP_loc, &ptap->ltog);CHKERRQ(ierr);
455 
456 #if defined(PETSC_USE_INFO)
457   if (ao) {
458     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
459   } else {
460     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
461   }
462   ptap->AP_loc->info.mallocs           = nspacedouble;
463   ptap->AP_loc->info.fill_ratio_given  = fill;
464   ptap->AP_loc->info.fill_ratio_needed = apfill;
465 
466   if (api[am]) {
467     ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
468     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
469   } else {
470     ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
471   }
472 #endif
473 
474   /* (2-1) compute symbolic Co = Ro*AP_loc  */
475   /* ------------------------------------ */
476   ierr = MatGetOptionsPrefix(A,&prefix);CHKERRQ(ierr);
477   ierr = MatSetOptionsPrefix(ptap->Ro,prefix);CHKERRQ(ierr);
478   ierr = MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");CHKERRQ(ierr);
479   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
480 
481   /* (3) send coj of C_oth to other processors  */
482   /* ------------------------------------------ */
483   /* determine row ownership */
484   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
485   rowmap->n  = pn;
486   rowmap->bs = 1;
487   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
488   owners = rowmap->range;
489 
490   /* determine the number of messages to send, their lengths */
491   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
492   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
493   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
494 
495   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
496   coi   = c_oth->i; coj = c_oth->j;
497   con   = ptap->C_oth->rmap->n;
498   proc  = 0;
499   ierr = ISLocalToGlobalMappingApply(ptap->ltog,coi[con],coj,coj);CHKERRQ(ierr);
500   for (i=0; i<con; i++) {
501     while (prmap[i] >= owners[proc+1]) proc++;
502     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
503     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
504   }
505 
506   len          = 0; /* max length of buf_si[], see (4) */
507   owners_co[0] = 0;
508   nsend        = 0;
509   for (proc=0; proc<size; proc++) {
510     owners_co[proc+1] = owners_co[proc] + len_si[proc];
511     if (len_s[proc]) {
512       nsend++;
513       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
514       len         += len_si[proc];
515     }
516   }
517 
518   /* determine the number and length of messages to receive for coi and coj  */
519   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
520   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
521 
522   /* post the Irecv and Isend of coj */
523   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
524   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
525   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
526   for (proc=0, k=0; proc<size; proc++) {
527     if (!len_s[proc]) continue;
528     i    = owners_co[proc];
529     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
530     k++;
531   }
532 
533   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
534   /* ---------------------------------------- */
535   ierr = MatSetOptionsPrefix(ptap->Rd,prefix);CHKERRQ(ierr);
536   ierr = MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");CHKERRQ(ierr);
537   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
538   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
539   ierr = ISLocalToGlobalMappingApply(ptap->ltog,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,c_loc->j);CHKERRQ(ierr);
540 
541   /* receives coj are complete */
542   for (i=0; i<nrecv; i++) {
543     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
544   }
545   ierr = PetscFree(rwaits);CHKERRQ(ierr);
546   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
547 
548   /* add received column indices into ta to update Crmax */
549   for (k=0; k<nrecv; k++) {/* k-th received message */
550     Jptr = buf_rj[k];
551     for (j=0; j<len_r[k]; j++) {
552       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
553     }
554   }
555   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
556   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
557 
558   /* (4) send and recv coi */
559   /*-----------------------*/
560   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
561   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
562   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
563   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
564   for (proc=0,k=0; proc<size; proc++) {
565     if (!len_s[proc]) continue;
566     /* form outgoing message for i-structure:
567          buf_si[0]:                 nrows to be sent
568                [1:nrows]:           row index (global)
569                [nrows+1:2*nrows+1]: i-structure index
570     */
571     /*-------------------------------------------*/
572     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
573     buf_si_i    = buf_si + nrows+1;
574     buf_si[0]   = nrows;
575     buf_si_i[0] = 0;
576     nrows       = 0;
577     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
578       nzi = coi[i+1] - coi[i];
579       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
580       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
581       nrows++;
582     }
583     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
584     k++;
585     buf_si += len_si[proc];
586   }
587   for (i=0; i<nrecv; i++) {
588     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
589   }
590   ierr = PetscFree(rwaits);CHKERRQ(ierr);
591   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
592 
593   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
594   ierr = PetscFree(len_ri);CHKERRQ(ierr);
595   ierr = PetscFree(swaits);CHKERRQ(ierr);
596   ierr = PetscFree(buf_s);CHKERRQ(ierr);
597 
598   /* (5) compute the local portion of Cmpi      */
599   /* ------------------------------------------ */
600   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
601   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
602   current_space = free_space;
603 
604   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
605   for (k=0; k<nrecv; k++) {
606     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
607     nrows       = *buf_ri_k[k];
608     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
609     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
610   }
611 
612   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
613   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
614   for (i=0; i<pn; i++) {
615     /* add C_loc into Cmpi */
616     nzi  = c_loc->i[i+1] - c_loc->i[i];
617     Jptr = c_loc->j + c_loc->i[i];
618     ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
619 
620     /* add received col data into lnk */
621     for (k=0; k<nrecv; k++) { /* k-th received message */
622       if (i == *nextrow[k]) { /* i-th row */
623         nzi  = *(nextci[k]+1) - *nextci[k];
624         Jptr = buf_rj[k] + *nextci[k];
625         ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
626         nextrow[k]++; nextci[k]++;
627       }
628     }
629     nzi = lnk[0];
630 
631     /* copy data into free space, then initialize lnk */
632     ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr);
633     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
634   }
635   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
636   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
637   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
638 
639   /* local sizes and preallocation */
640   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
641   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
642   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
643   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
644 
645   /* members in merge */
646   ierr = PetscFree(id_r);CHKERRQ(ierr);
647   ierr = PetscFree(len_r);CHKERRQ(ierr);
648   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
649   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
650   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
651   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
652   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
653 
654   /* attach the supporting struct to Cmpi for reuse */
655   c = (Mat_MPIAIJ*)Cmpi->data;
656   c->ap           = ptap;
657   ptap->duplicate = Cmpi->ops->duplicate;
658   ptap->destroy   = Cmpi->ops->destroy;
659   ptap->view      = Cmpi->ops->view;
660 
661   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
662   Cmpi->assembled        = PETSC_FALSE;
663   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
664   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
665   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
666   Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
667   *C                     = Cmpi;
668 
669    nout = 0;
670    ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_oth->i[ptap->C_oth->rmap->n],c_oth->j,&nout,c_oth->j);CHKERRQ(ierr);
671    if (c_oth->i[ptap->C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_oth->i[ptap->C_oth->rmap->n],nout);
672    ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,&nout,c_loc->j);CHKERRQ(ierr);
673    if (c_loc->i[ptap->C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_loc->i[ptap->C_loc->rmap->n],nout);
674 
675   PetscFunctionReturn(0);
676 }
677 
678 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
679 {
680   PetscErrorCode      ierr;
681   Mat_APMPI           *ptap;
682   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
683   MPI_Comm            comm;
684   PetscMPIInt         size,rank;
685   Mat                 Cmpi;
686   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
687   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
688   PetscInt            *lnk,i,k,pnz,row,nsend;
689   PetscBT             lnkbt;
690   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
691   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
692   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
693   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
694   MPI_Request         *swaits,*rwaits;
695   MPI_Status          *sstatus,rstatus;
696   PetscLayout         rowmap;
697   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
698   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
699   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
700   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
701   PetscScalar         *apv;
702   PetscTable          ta;
703   MatType             mtype;
704   const char          *prefix;
705 #if defined(PETSC_USE_INFO)
706   PetscReal           apfill;
707 #endif
708 
709   PetscFunctionBegin;
710   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
711   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
712   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
713 
714   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
715 
716   /* create symbolic parallel matrix Cmpi */
717   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
718   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
719   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
720 
721   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
722   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
723 
724   /* create struct Mat_APMPI and attached it to C later */
725   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
726   ptap->reuse = MAT_INITIAL_MATRIX;
727   ptap->algType = 1;
728 
729   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
730   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
731   /* get P_loc by taking all local rows of P */
732   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
733 
734   /* (0) compute Rd = Pd^T, Ro = Po^T  */
735   /* --------------------------------- */
736   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
737   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
738 
739   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
740   /* ----------------------------------------------------------------- */
741   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
742   if (ptap->P_oth) p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
743 
744   /* create and initialize a linked list */
745   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
746   MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
747   MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
748   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
749   /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
750 
751   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
752 
753   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
754   if (ao) {
755     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
756   } else {
757     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
758   }
759   current_space = free_space;
760   nspacedouble  = 0;
761 
762   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
763   api[0] = 0;
764   for (i=0; i<am; i++) {
765     /* diagonal portion: Ad[i,:]*P */
766     ai = ad->i; pi = p_loc->i;
767     nzi = ai[i+1] - ai[i];
768     aj  = ad->j + ai[i];
769     for (j=0; j<nzi; j++) {
770       row  = aj[j];
771       pnz  = pi[row+1] - pi[row];
772       Jptr = p_loc->j + pi[row];
773       /* add non-zero cols of P into the sorted linked list lnk */
774       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
775     }
776     /* off-diagonal portion: Ao[i,:]*P */
777     if (ao) {
778       ai = ao->i; pi = p_oth->i;
779       nzi = ai[i+1] - ai[i];
780       aj  = ao->j + ai[i];
781       for (j=0; j<nzi; j++) {
782         row  = aj[j];
783         pnz  = pi[row+1] - pi[row];
784         Jptr = p_oth->j + pi[row];
785         ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
786       }
787     }
788     apnz     = lnk[0];
789     api[i+1] = api[i] + apnz;
790     if (ap_rmax < apnz) ap_rmax = apnz;
791 
792     /* if free space is not available, double the total space in the list */
793     if (current_space->local_remaining<apnz) {
794       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
795       nspacedouble++;
796     }
797 
798     /* Copy data into free space, then initialize lnk */
799     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
800 
801     current_space->array           += apnz;
802     current_space->local_used      += apnz;
803     current_space->local_remaining -= apnz;
804   }
805   /* Allocate space for apj and apv, initialize apj, and */
806   /* destroy list of free space and other temporary array(s) */
807   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
808   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
809   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
810 
811   /* Create AP_loc for reuse */
812   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
813 
814 #if defined(PETSC_USE_INFO)
815   if (ao) {
816     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
817   } else {
818     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
819   }
820   ptap->AP_loc->info.mallocs           = nspacedouble;
821   ptap->AP_loc->info.fill_ratio_given  = fill;
822   ptap->AP_loc->info.fill_ratio_needed = apfill;
823 
824   if (api[am]) {
825     ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
826     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
827   } else {
828     ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
829   }
830 #endif
831 
832   /* (2-1) compute symbolic Co = Ro*AP_loc  */
833   /* ------------------------------------ */
834   ierr = MatGetOptionsPrefix(A,&prefix);CHKERRQ(ierr);
835   ierr = MatSetOptionsPrefix(ptap->Ro,prefix);CHKERRQ(ierr);
836   ierr = MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");CHKERRQ(ierr);
837   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
838 
839   /* (3) send coj of C_oth to other processors  */
840   /* ------------------------------------------ */
841   /* determine row ownership */
842   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
843   rowmap->n  = pn;
844   rowmap->bs = 1;
845   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
846   owners = rowmap->range;
847 
848   /* determine the number of messages to send, their lengths */
849   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
850   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
851   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
852 
853   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
854   coi   = c_oth->i; coj = c_oth->j;
855   con   = ptap->C_oth->rmap->n;
856   proc  = 0;
857   for (i=0; i<con; i++) {
858     while (prmap[i] >= owners[proc+1]) proc++;
859     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
860     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
861   }
862 
863   len          = 0; /* max length of buf_si[], see (4) */
864   owners_co[0] = 0;
865   nsend        = 0;
866   for (proc=0; proc<size; proc++) {
867     owners_co[proc+1] = owners_co[proc] + len_si[proc];
868     if (len_s[proc]) {
869       nsend++;
870       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
871       len         += len_si[proc];
872     }
873   }
874 
875   /* determine the number and length of messages to receive for coi and coj  */
876   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
877   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
878 
879   /* post the Irecv and Isend of coj */
880   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
881   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
882   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
883   for (proc=0, k=0; proc<size; proc++) {
884     if (!len_s[proc]) continue;
885     i    = owners_co[proc];
886     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
887     k++;
888   }
889 
890   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
891   /* ---------------------------------------- */
892   ierr = MatSetOptionsPrefix(ptap->Rd,prefix);CHKERRQ(ierr);
893   ierr = MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");CHKERRQ(ierr);
894   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
895   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
896 
897   /* receives coj are complete */
898   for (i=0; i<nrecv; i++) {
899     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
900   }
901   ierr = PetscFree(rwaits);CHKERRQ(ierr);
902   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
903 
904   /* add received column indices into ta to update Crmax */
905   for (k=0; k<nrecv; k++) {/* k-th received message */
906     Jptr = buf_rj[k];
907     for (j=0; j<len_r[k]; j++) {
908       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
909     }
910   }
911   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
912   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
913 
914   /* (4) send and recv coi */
915   /*-----------------------*/
916   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
917   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
918   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
919   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
920   for (proc=0,k=0; proc<size; proc++) {
921     if (!len_s[proc]) continue;
922     /* form outgoing message for i-structure:
923          buf_si[0]:                 nrows to be sent
924                [1:nrows]:           row index (global)
925                [nrows+1:2*nrows+1]: i-structure index
926     */
927     /*-------------------------------------------*/
928     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
929     buf_si_i    = buf_si + nrows+1;
930     buf_si[0]   = nrows;
931     buf_si_i[0] = 0;
932     nrows       = 0;
933     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
934       nzi = coi[i+1] - coi[i];
935       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
936       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
937       nrows++;
938     }
939     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
940     k++;
941     buf_si += len_si[proc];
942   }
943   for (i=0; i<nrecv; i++) {
944     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
945   }
946   ierr = PetscFree(rwaits);CHKERRQ(ierr);
947   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
948 
949   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
950   ierr = PetscFree(len_ri);CHKERRQ(ierr);
951   ierr = PetscFree(swaits);CHKERRQ(ierr);
952   ierr = PetscFree(buf_s);CHKERRQ(ierr);
953 
954   /* (5) compute the local portion of Cmpi      */
955   /* ------------------------------------------ */
956   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
957   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
958   current_space = free_space;
959 
960   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
961   for (k=0; k<nrecv; k++) {
962     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
963     nrows       = *buf_ri_k[k];
964     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
965     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
966   }
967 
968   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
969   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
970   for (i=0; i<pn; i++) {
971     /* add C_loc into Cmpi */
972     nzi  = c_loc->i[i+1] - c_loc->i[i];
973     Jptr = c_loc->j + c_loc->i[i];
974     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
975 
976     /* add received col data into lnk */
977     for (k=0; k<nrecv; k++) { /* k-th received message */
978       if (i == *nextrow[k]) { /* i-th row */
979         nzi  = *(nextci[k]+1) - *nextci[k];
980         Jptr = buf_rj[k] + *nextci[k];
981         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
982         nextrow[k]++; nextci[k]++;
983       }
984     }
985     nzi = lnk[0];
986 
987     /* copy data into free space, then initialize lnk */
988     ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
989     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
990   }
991   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
992   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
993   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
994 
995   /* local sizes and preallocation */
996   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
997   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
998   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
999   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
1000 
1001   /* members in merge */
1002   ierr = PetscFree(id_r);CHKERRQ(ierr);
1003   ierr = PetscFree(len_r);CHKERRQ(ierr);
1004   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
1005   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
1006   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
1007   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
1008   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
1009 
1010   /* attach the supporting struct to Cmpi for reuse */
1011   c = (Mat_MPIAIJ*)Cmpi->data;
1012   c->ap           = ptap;
1013   ptap->duplicate = Cmpi->ops->duplicate;
1014   ptap->destroy   = Cmpi->ops->destroy;
1015   ptap->view      = Cmpi->ops->view;
1016   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
1017 
1018   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
1019   Cmpi->assembled        = PETSC_FALSE;
1020   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
1021   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
1022   Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
1023   *C                     = Cmpi;
1024   PetscFunctionReturn(0);
1025 }
1026 
1027 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
1028 {
1029   PetscErrorCode    ierr;
1030   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
1031   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
1032   Mat_SeqAIJ        *ap,*p_loc,*p_oth=NULL,*c_seq;
1033   Mat_APMPI         *ptap = c->ap;
1034   Mat               AP_loc,C_loc,C_oth;
1035   PetscInt          i,rstart,rend,cm,ncols,row;
1036   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
1037   PetscScalar       *apa;
1038   const PetscInt    *cols;
1039   const PetscScalar *vals;
1040 
1041   PetscFunctionBegin;
1042   if (!ptap) {
1043     MPI_Comm comm;
1044     ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
1045     SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
1046   }
1047 
1048   ierr = MatZeroEntries(C);CHKERRQ(ierr);
1049   /* 1) get R = Pd^T,Ro = Po^T */
1050   if (ptap->reuse == MAT_REUSE_MATRIX) {
1051     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
1052     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
1053   }
1054 
1055   /* 2) get AP_loc */
1056   AP_loc = ptap->AP_loc;
1057   ap = (Mat_SeqAIJ*)AP_loc->data;
1058 
1059   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
1060   /*-----------------------------------------------------*/
1061   if (ptap->reuse == MAT_REUSE_MATRIX) {
1062     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
1063     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
1064     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1065   }
1066 
1067   /* 2-2) compute numeric A_loc*P - dominating part */
1068   /* ---------------------------------------------- */
1069   /* get data from symbolic products */
1070   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1071   if (ptap->P_oth) {
1072     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1073   }
1074   apa   = ptap->apa;
1075   api   = ap->i;
1076   apj   = ap->j;
1077   for (i=0; i<am; i++) {
1078     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
1079     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
1080     apnz = api[i+1] - api[i];
1081     for (j=0; j<apnz; j++) {
1082       col = apj[j+api[i]];
1083       ap->a[j+ap->i[i]] = apa[col];
1084       apa[col] = 0.0;
1085     }
1086     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1087   }
1088 
1089   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1090   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
1091   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
1092   C_loc = ptap->C_loc;
1093   C_oth = ptap->C_oth;
1094 
1095   /* add C_loc and Co to to C */
1096   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
1097 
1098   /* C_loc -> C */
1099   cm    = C_loc->rmap->N;
1100   c_seq = (Mat_SeqAIJ*)C_loc->data;
1101   cols = c_seq->j;
1102   vals = c_seq->a;
1103 
1104 
1105   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
1106   /* when there are no off-processor parts.  */
1107   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
1108   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
1109   /* a table, and other, more complex stuff has to be done. */
1110   if (C->assembled) {
1111     C->was_assembled = PETSC_TRUE;
1112     C->assembled     = PETSC_FALSE;
1113   }
1114   if (C->was_assembled) {
1115     for (i=0; i<cm; i++) {
1116       ncols = c_seq->i[i+1] - c_seq->i[i];
1117       row = rstart + i;
1118       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1119       cols += ncols; vals += ncols;
1120     }
1121   } else {
1122     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
1123   }
1124 
1125   /* Co -> C, off-processor part */
1126   cm = C_oth->rmap->N;
1127   c_seq = (Mat_SeqAIJ*)C_oth->data;
1128   cols = c_seq->j;
1129   vals = c_seq->a;
1130   for (i=0; i<cm; i++) {
1131     ncols = c_seq->i[i+1] - c_seq->i[i];
1132     row = p->garray[i];
1133     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1134     cols += ncols; vals += ncols;
1135   }
1136 
1137   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1138   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1139 
1140   ptap->reuse = MAT_REUSE_MATRIX;
1141 
1142   /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
1143   if (ptap->freestruct) {
1144     ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr);
1145   }
1146   PetscFunctionReturn(0);
1147 }
1148