xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision a685ae26bbf9fb229cf02e15e73d16b8a9cd69e6)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16 {
17   PetscErrorCode    ierr;
18   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
19   Mat_APMPI         *ptap=a->ap;
20   PetscBool         iascii;
21   PetscViewerFormat format;
22 
23   PetscFunctionBegin;
24   if (!ptap) {
25     /* hack: MatDuplicate() sets oldmat->ops->view to newmat which is a base mat class with null ptpa! */
26     A->ops->view = MatView_MPIAIJ;
27     ierr = (A->ops->view)(A,viewer);CHKERRQ(ierr);
28     PetscFunctionReturn(0);
29   }
30 
31   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
32   if (iascii) {
33     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
34     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
35       if (ptap->algType == 0) {
36         ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr);
37       } else if (ptap->algType == 1) {
38         ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr);
39       }
40     }
41   }
42   ierr = (ptap->view)(A,viewer);CHKERRQ(ierr);
43   PetscFunctionReturn(0);
44 }
45 
46 PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat A)
47 {
48   PetscErrorCode      ierr;
49   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data;
50   Mat_APMPI           *ptap=a->ap;
51   Mat_Merge_SeqsToMPI *merge;
52 
53   PetscFunctionBegin;
54   if (!ptap) PetscFunctionReturn(0);
55 
56   ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
57   ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
58   ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
59   ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
60   ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
61   ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
62   ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
63   if (ptap->AP_loc) { /* used by alg_rap */
64     Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
65     ierr = PetscFree(ap->i);CHKERRQ(ierr);
66     ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
67     ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
68   } else { /* used by alg_ptap */
69     ierr = PetscFree(ptap->api);CHKERRQ(ierr);
70     ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
71   }
72   ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
73   ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
74   if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
75 
76   ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr);
77 
78   merge=ptap->merge;
79   if (merge) { /* used by alg_ptap */
80     ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
81     ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
82     ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
83     ierr = PetscFree(merge->bi);CHKERRQ(ierr);
84     ierr = PetscFree(merge->bj);CHKERRQ(ierr);
85     ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
86     ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
87     ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
88     ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
89     ierr = PetscFree(merge->coi);CHKERRQ(ierr);
90     ierr = PetscFree(merge->coj);CHKERRQ(ierr);
91     ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
92     ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
93     ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
94   }
95 
96   A->ops->destroy = ptap->destroy;
97   ierr = PetscFree(a->ap);CHKERRQ(ierr);
98   PetscFunctionReturn(0);
99 }
100 
101 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
102 {
103   PetscErrorCode ierr;
104 
105   PetscFunctionBegin;
106   ierr = (*A->ops->freeintermediatedatastructures)(A);CHKERRQ(ierr);
107   ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
108   PetscFunctionReturn(0);
109 }
110 
111 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
112 {
113   PetscErrorCode ierr;
114   PetscBool      flg;
115   MPI_Comm       comm;
116 #if !defined(PETSC_HAVE_HYPRE)
117   const char          *algTypes[2] = {"scalable","nonscalable"};
118   PetscInt            nalg=2;
119 #else
120   const char          *algTypes[3] = {"scalable","nonscalable","hypre"};
121   PetscInt            nalg=3;
122 #endif
123   PetscInt            pN=P->cmap->N,alg=1; /* set default algorithm */
124 
125   PetscFunctionBegin;
126   /* check if matrix local sizes are compatible */
127   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
128   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
129   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
130 
131   if (scall == MAT_INITIAL_MATRIX) {
132     /* pick an algorithm */
133     ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");CHKERRQ(ierr);
134     ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr);
135     ierr = PetscOptionsEnd();CHKERRQ(ierr);
136 
137     if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
138       MatInfo     Ainfo,Pinfo;
139       PetscInt    nz_local;
140       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
141 
142       ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr);
143       ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr);
144       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
145 
146       if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
147       ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr);
148 
149       if (alg_scalable) {
150         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
151       }
152     }
153 
154     switch (alg) {
155     case 1:
156       /* do R=P^T locally, then C=R*A*P -- nonscalable */
157       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
158       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
159       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
160       break;
161 #if defined(PETSC_HAVE_HYPRE)
162     case 2:
163       /* Use boomerAMGBuildCoarseOperator */
164       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
165       ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr);
166       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
167       break;
168 #endif
169     default:
170       /* do R=P^T locally, then C=R*A*P */
171       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
172       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr);
173       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
174       break;
175     }
176 
177     if (alg == 0 || alg == 1) {
178       Mat_MPIAIJ *c  = (Mat_MPIAIJ*)(*C)->data;
179       Mat_APMPI  *ap = c->ap;
180       ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)(*C)),((PetscObject)(*C))->prefix,"MatFreeIntermediateDataStructures","Mat");CHKERRQ(ierr);
181       ap->freestruct = PETSC_FALSE;
182       ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",ap->freestruct,&ap->freestruct, NULL);CHKERRQ(ierr);
183       ierr = PetscOptionsEnd();CHKERRQ(ierr);
184     }
185   }
186 
187   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
188   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
189   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
190   PetscFunctionReturn(0);
191 }
192 
193 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
194 {
195   PetscErrorCode    ierr;
196   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
197   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
198   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
199   Mat_APMPI         *ptap = c->ap;
200   Mat               AP_loc,C_loc,C_oth;
201   PetscInt          i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz;
202   PetscScalar       *apa;
203   const PetscInt    *cols;
204   const PetscScalar *vals;
205 
206   PetscFunctionBegin;
207   if (!ptap) {
208     MPI_Comm comm;
209     ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
210     SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
211   }
212 
213   ierr = MatZeroEntries(C);CHKERRQ(ierr);
214 
215   /* 1) get R = Pd^T,Ro = Po^T */
216   if (ptap->reuse == MAT_REUSE_MATRIX) {
217     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
218     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
219   }
220 
221   /* 2) get AP_loc */
222   AP_loc = ptap->AP_loc;
223   ap = (Mat_SeqAIJ*)AP_loc->data;
224 
225   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
226   /*-----------------------------------------------------*/
227   if (ptap->reuse == MAT_REUSE_MATRIX) {
228     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
229     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
230     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
231   }
232 
233   /* 2-2) compute numeric A_loc*P - dominating part */
234   /* ---------------------------------------------- */
235   /* get data from symbolic products */
236   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
237   if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
238   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
239 
240   api   = ap->i;
241   apj   = ap->j;
242   for (i=0; i<am; i++) {
243     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
244     apnz = api[i+1] - api[i];
245     apa = ap->a + api[i];
246     ierr = PetscMemzero(apa,sizeof(PetscScalar)*apnz);CHKERRQ(ierr);
247     AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
248     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
249   }
250 
251   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
252   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
253   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
254 
255   C_loc = ptap->C_loc;
256   C_oth = ptap->C_oth;
257 
258   /* add C_loc and Co to to C */
259   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
260 
261   /* C_loc -> C */
262   cm    = C_loc->rmap->N;
263   c_seq = (Mat_SeqAIJ*)C_loc->data;
264   cols = c_seq->j;
265   vals = c_seq->a;
266 
267   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
268   /* when there are no off-processor parts.  */
269   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
270   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
271   /* a table, and other, more complex stuff has to be done. */
272   if (C->assembled) {
273     C->was_assembled = PETSC_TRUE;
274     C->assembled     = PETSC_FALSE;
275   }
276   if (C->was_assembled) {
277     for (i=0; i<cm; i++) {
278       ncols = c_seq->i[i+1] - c_seq->i[i];
279       row = rstart + i;
280       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
281       cols += ncols; vals += ncols;
282     }
283   } else {
284     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
285   }
286 
287   /* Co -> C, off-processor part */
288   cm = C_oth->rmap->N;
289   c_seq = (Mat_SeqAIJ*)C_oth->data;
290   cols = c_seq->j;
291   vals = c_seq->a;
292   for (i=0; i<cm; i++) {
293     ncols = c_seq->i[i+1] - c_seq->i[i];
294     row = p->garray[i];
295     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
296     cols += ncols; vals += ncols;
297   }
298   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
299   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
300 
301   ptap->reuse = MAT_REUSE_MATRIX;
302 
303   /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
304   if (ptap->freestruct) {
305     ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr);
306   }
307   PetscFunctionReturn(0);
308 }
309 
310 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
311 {
312   PetscErrorCode      ierr;
313   Mat_APMPI           *ptap;
314   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
315   MPI_Comm            comm;
316   PetscMPIInt         size,rank;
317   Mat                 Cmpi,P_loc,P_oth;
318   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
319   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
320   PetscInt            *lnk,i,k,pnz,row,nsend;
321   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
322   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
323   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
324   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
325   MPI_Request         *swaits,*rwaits;
326   MPI_Status          *sstatus,rstatus;
327   PetscLayout         rowmap;
328   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
329   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
330   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi;
331   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
332   PetscScalar         *apv;
333   PetscTable          ta;
334   MatType             mtype;
335 #if defined(PETSC_USE_INFO)
336   PetscReal           apfill;
337 #endif
338 
339   PetscFunctionBegin;
340   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
341   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
342   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
343 
344   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
345 
346   /* create symbolic parallel matrix Cmpi */
347   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
348   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
349   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
350 
351   /* create struct Mat_APMPI and attached it to C later */
352   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
353   ptap->reuse = MAT_INITIAL_MATRIX;
354   ptap->algType = 0;
355 
356   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
357   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr);
358   /* get P_loc by taking all local rows of P */
359   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr);
360 
361   ptap->P_loc = P_loc;
362   ptap->P_oth = P_oth;
363 
364   /* (0) compute Rd = Pd^T, Ro = Po^T  */
365   /* --------------------------------- */
366   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
367   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
368 
369   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
370   /* ----------------------------------------------------------------- */
371   p_loc  = (Mat_SeqAIJ*)P_loc->data;
372   if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
373 
374   /* create and initialize a linked list */
375   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
376   MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
377   MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
378   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
379 
380   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
381 
382   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
383   if (ao) {
384     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
385   } else {
386     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
387   }
388   current_space = free_space;
389   nspacedouble  = 0;
390 
391   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
392   api[0] = 0;
393   for (i=0; i<am; i++) {
394     /* diagonal portion: Ad[i,:]*P */
395     ai = ad->i; pi = p_loc->i;
396     nzi = ai[i+1] - ai[i];
397     aj  = ad->j + ai[i];
398     for (j=0; j<nzi; j++) {
399       row  = aj[j];
400       pnz  = pi[row+1] - pi[row];
401       Jptr = p_loc->j + pi[row];
402       /* add non-zero cols of P into the sorted linked list lnk */
403       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
404     }
405     /* off-diagonal portion: Ao[i,:]*P */
406     if (ao) {
407       ai = ao->i; pi = p_oth->i;
408       nzi = ai[i+1] - ai[i];
409       aj  = ao->j + ai[i];
410       for (j=0; j<nzi; j++) {
411         row  = aj[j];
412         pnz  = pi[row+1] - pi[row];
413         Jptr = p_oth->j + pi[row];
414         ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
415       }
416     }
417     apnz     = lnk[0];
418     api[i+1] = api[i] + apnz;
419 
420     /* if free space is not available, double the total space in the list */
421     if (current_space->local_remaining<apnz) {
422       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
423       nspacedouble++;
424     }
425 
426     /* Copy data into free space, then initialize lnk */
427     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
428 
429     current_space->array           += apnz;
430     current_space->local_used      += apnz;
431     current_space->local_remaining -= apnz;
432   }
433   /* Allocate space for apj and apv, initialize apj, and */
434   /* destroy list of free space and other temporary array(s) */
435   ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
436   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
437   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
438 
439   /* Create AP_loc for reuse */
440   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
441 
442 #if defined(PETSC_USE_INFO)
443   if (ao) {
444     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
445   } else {
446     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
447   }
448   ptap->AP_loc->info.mallocs           = nspacedouble;
449   ptap->AP_loc->info.fill_ratio_given  = fill;
450   ptap->AP_loc->info.fill_ratio_needed = apfill;
451 
452   if (api[am]) {
453     ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
454     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
455   } else {
456     ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
457   }
458 #endif
459 
460   /* (2-1) compute symbolic Co = Ro*AP_loc  */
461   /* ------------------------------------ */
462   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
463 
464   /* (3) send coj of C_oth to other processors  */
465   /* ------------------------------------------ */
466   /* determine row ownership */
467   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
468   rowmap->n  = pn;
469   rowmap->bs = 1;
470   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
471   owners = rowmap->range;
472 
473   /* determine the number of messages to send, their lengths */
474   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
475   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
476   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
477 
478   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
479   coi   = c_oth->i; coj = c_oth->j;
480   con   = ptap->C_oth->rmap->n;
481   proc  = 0;
482   for (i=0; i<con; i++) {
483     while (prmap[i] >= owners[proc+1]) proc++;
484     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
485     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
486   }
487 
488   len          = 0; /* max length of buf_si[], see (4) */
489   owners_co[0] = 0;
490   nsend        = 0;
491   for (proc=0; proc<size; proc++) {
492     owners_co[proc+1] = owners_co[proc] + len_si[proc];
493     if (len_s[proc]) {
494       nsend++;
495       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
496       len         += len_si[proc];
497     }
498   }
499 
500   /* determine the number and length of messages to receive for coi and coj  */
501   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
502   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
503 
504   /* post the Irecv and Isend of coj */
505   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
506   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
507   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
508   for (proc=0, k=0; proc<size; proc++) {
509     if (!len_s[proc]) continue;
510     i    = owners_co[proc];
511     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
512     k++;
513   }
514 
515   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
516   /* ---------------------------------------- */
517   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
518   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
519 
520   /* receives coj are complete */
521   for (i=0; i<nrecv; i++) {
522     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
523   }
524   ierr = PetscFree(rwaits);CHKERRQ(ierr);
525   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
526 
527   /* add received column indices into ta to update Crmax */
528   for (k=0; k<nrecv; k++) {/* k-th received message */
529     Jptr = buf_rj[k];
530     for (j=0; j<len_r[k]; j++) {
531       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
532     }
533   }
534   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
535   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
536 
537   /* (4) send and recv coi */
538   /*-----------------------*/
539   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
540   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
541   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
542   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
543   for (proc=0,k=0; proc<size; proc++) {
544     if (!len_s[proc]) continue;
545     /* form outgoing message for i-structure:
546          buf_si[0]:                 nrows to be sent
547                [1:nrows]:           row index (global)
548                [nrows+1:2*nrows+1]: i-structure index
549     */
550     /*-------------------------------------------*/
551     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
552     buf_si_i    = buf_si + nrows+1;
553     buf_si[0]   = nrows;
554     buf_si_i[0] = 0;
555     nrows       = 0;
556     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
557       nzi = coi[i+1] - coi[i];
558       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
559       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
560       nrows++;
561     }
562     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
563     k++;
564     buf_si += len_si[proc];
565   }
566   for (i=0; i<nrecv; i++) {
567     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
568   }
569   ierr = PetscFree(rwaits);CHKERRQ(ierr);
570   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
571 
572   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
573   ierr = PetscFree(len_ri);CHKERRQ(ierr);
574   ierr = PetscFree(swaits);CHKERRQ(ierr);
575   ierr = PetscFree(buf_s);CHKERRQ(ierr);
576 
577   /* (5) compute the local portion of Cmpi      */
578   /* ------------------------------------------ */
579   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
580   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
581   current_space = free_space;
582 
583   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
584   for (k=0; k<nrecv; k++) {
585     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
586     nrows       = *buf_ri_k[k];
587     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
588     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
589   }
590 
591   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
592   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
593   for (i=0; i<pn; i++) {
594     /* add C_loc into Cmpi */
595     nzi  = c_loc->i[i+1] - c_loc->i[i];
596     Jptr = c_loc->j + c_loc->i[i];
597     ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
598 
599     /* add received col data into lnk */
600     for (k=0; k<nrecv; k++) { /* k-th received message */
601       if (i == *nextrow[k]) { /* i-th row */
602         nzi  = *(nextci[k]+1) - *nextci[k];
603         Jptr = buf_rj[k] + *nextci[k];
604         ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
605         nextrow[k]++; nextci[k]++;
606       }
607     }
608     nzi = lnk[0];
609 
610     /* copy data into free space, then initialize lnk */
611     ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr);
612     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
613   }
614   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
615   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
616   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
617 
618   /* local sizes and preallocation */
619   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
620   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
621   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
622   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
623 
624   /* members in merge */
625   ierr = PetscFree(id_r);CHKERRQ(ierr);
626   ierr = PetscFree(len_r);CHKERRQ(ierr);
627   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
628   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
629   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
630   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
631   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
632 
633   /* attach the supporting struct to Cmpi for reuse */
634   c = (Mat_MPIAIJ*)Cmpi->data;
635   c->ap           = ptap;
636   ptap->duplicate = Cmpi->ops->duplicate;
637   ptap->destroy   = Cmpi->ops->destroy;
638   ptap->view      = Cmpi->ops->view;
639 
640   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
641   Cmpi->assembled        = PETSC_FALSE;
642   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
643   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
644   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
645   Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
646   *C                     = Cmpi;
647   PetscFunctionReturn(0);
648 }
649 
650 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
651 {
652   PetscErrorCode      ierr;
653   Mat_APMPI           *ptap;
654   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
655   MPI_Comm            comm;
656   PetscMPIInt         size,rank;
657   Mat                 Cmpi;
658   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
659   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
660   PetscInt            *lnk,i,k,pnz,row,nsend;
661   PetscBT             lnkbt;
662   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
663   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
664   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
665   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
666   MPI_Request         *swaits,*rwaits;
667   MPI_Status          *sstatus,rstatus;
668   PetscLayout         rowmap;
669   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
670   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
671   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
672   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
673   PetscScalar         *apv;
674   PetscTable          ta;
675   MatType             mtype;
676 #if defined(PETSC_USE_INFO)
677   PetscReal           apfill;
678 #endif
679 
680   PetscFunctionBegin;
681   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
682   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
683   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
684 
685   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
686 
687   /* create symbolic parallel matrix Cmpi */
688   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
689   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
690   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
691 
692   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
693   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
694 
695   /* create struct Mat_APMPI and attached it to C later */
696   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
697   ptap->reuse = MAT_INITIAL_MATRIX;
698   ptap->algType = 1;
699 
700   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
701   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
702   /* get P_loc by taking all local rows of P */
703   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
704 
705   /* (0) compute Rd = Pd^T, Ro = Po^T  */
706   /* --------------------------------- */
707   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
708   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
709 
710   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
711   /* ----------------------------------------------------------------- */
712   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
713   if (ptap->P_oth) p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
714 
715   /* create and initialize a linked list */
716   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
717   MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
718   MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
719   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
720   /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
721 
722   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
723 
724   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
725   if (ao) {
726     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
727   } else {
728     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
729   }
730   current_space = free_space;
731   nspacedouble  = 0;
732 
733   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
734   api[0] = 0;
735   for (i=0; i<am; i++) {
736     /* diagonal portion: Ad[i,:]*P */
737     ai = ad->i; pi = p_loc->i;
738     nzi = ai[i+1] - ai[i];
739     aj  = ad->j + ai[i];
740     for (j=0; j<nzi; j++) {
741       row  = aj[j];
742       pnz  = pi[row+1] - pi[row];
743       Jptr = p_loc->j + pi[row];
744       /* add non-zero cols of P into the sorted linked list lnk */
745       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
746     }
747     /* off-diagonal portion: Ao[i,:]*P */
748     if (ao) {
749       ai = ao->i; pi = p_oth->i;
750       nzi = ai[i+1] - ai[i];
751       aj  = ao->j + ai[i];
752       for (j=0; j<nzi; j++) {
753         row  = aj[j];
754         pnz  = pi[row+1] - pi[row];
755         Jptr = p_oth->j + pi[row];
756         ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
757       }
758     }
759     apnz     = lnk[0];
760     api[i+1] = api[i] + apnz;
761     if (ap_rmax < apnz) ap_rmax = apnz;
762 
763     /* if free space is not available, double the total space in the list */
764     if (current_space->local_remaining<apnz) {
765       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
766       nspacedouble++;
767     }
768 
769     /* Copy data into free space, then initialize lnk */
770     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
771 
772     current_space->array           += apnz;
773     current_space->local_used      += apnz;
774     current_space->local_remaining -= apnz;
775   }
776   /* Allocate space for apj and apv, initialize apj, and */
777   /* destroy list of free space and other temporary array(s) */
778   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
779   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
780   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
781 
782   /* Create AP_loc for reuse */
783   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
784 
785 #if defined(PETSC_USE_INFO)
786   if (ao) {
787     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
788   } else {
789     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
790   }
791   ptap->AP_loc->info.mallocs           = nspacedouble;
792   ptap->AP_loc->info.fill_ratio_given  = fill;
793   ptap->AP_loc->info.fill_ratio_needed = apfill;
794 
795   if (api[am]) {
796     ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
797     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
798   } else {
799     ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
800   }
801 #endif
802 
803   /* (2-1) compute symbolic Co = Ro*AP_loc  */
804   /* ------------------------------------ */
805   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
806 
807   /* (3) send coj of C_oth to other processors  */
808   /* ------------------------------------------ */
809   /* determine row ownership */
810   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
811   rowmap->n  = pn;
812   rowmap->bs = 1;
813   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
814   owners = rowmap->range;
815 
816   /* determine the number of messages to send, their lengths */
817   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
818   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
819   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
820 
821   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
822   coi   = c_oth->i; coj = c_oth->j;
823   con   = ptap->C_oth->rmap->n;
824   proc  = 0;
825   for (i=0; i<con; i++) {
826     while (prmap[i] >= owners[proc+1]) proc++;
827     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
828     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
829   }
830 
831   len          = 0; /* max length of buf_si[], see (4) */
832   owners_co[0] = 0;
833   nsend        = 0;
834   for (proc=0; proc<size; proc++) {
835     owners_co[proc+1] = owners_co[proc] + len_si[proc];
836     if (len_s[proc]) {
837       nsend++;
838       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
839       len         += len_si[proc];
840     }
841   }
842 
843   /* determine the number and length of messages to receive for coi and coj  */
844   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
845   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
846 
847   /* post the Irecv and Isend of coj */
848   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
849   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
850   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
851   for (proc=0, k=0; proc<size; proc++) {
852     if (!len_s[proc]) continue;
853     i    = owners_co[proc];
854     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
855     k++;
856   }
857 
858   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
859   /* ---------------------------------------- */
860   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
861   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
862 
863   /* receives coj are complete */
864   for (i=0; i<nrecv; i++) {
865     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
866   }
867   ierr = PetscFree(rwaits);CHKERRQ(ierr);
868   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
869 
870   /* add received column indices into ta to update Crmax */
871   for (k=0; k<nrecv; k++) {/* k-th received message */
872     Jptr = buf_rj[k];
873     for (j=0; j<len_r[k]; j++) {
874       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
875     }
876   }
877   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
878   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
879 
880   /* (4) send and recv coi */
881   /*-----------------------*/
882   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
883   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
884   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
885   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
886   for (proc=0,k=0; proc<size; proc++) {
887     if (!len_s[proc]) continue;
888     /* form outgoing message for i-structure:
889          buf_si[0]:                 nrows to be sent
890                [1:nrows]:           row index (global)
891                [nrows+1:2*nrows+1]: i-structure index
892     */
893     /*-------------------------------------------*/
894     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
895     buf_si_i    = buf_si + nrows+1;
896     buf_si[0]   = nrows;
897     buf_si_i[0] = 0;
898     nrows       = 0;
899     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
900       nzi = coi[i+1] - coi[i];
901       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
902       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
903       nrows++;
904     }
905     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
906     k++;
907     buf_si += len_si[proc];
908   }
909   for (i=0; i<nrecv; i++) {
910     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
911   }
912   ierr = PetscFree(rwaits);CHKERRQ(ierr);
913   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
914 
915   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
916   ierr = PetscFree(len_ri);CHKERRQ(ierr);
917   ierr = PetscFree(swaits);CHKERRQ(ierr);
918   ierr = PetscFree(buf_s);CHKERRQ(ierr);
919 
920   /* (5) compute the local portion of Cmpi      */
921   /* ------------------------------------------ */
922   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
923   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
924   current_space = free_space;
925 
926   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
927   for (k=0; k<nrecv; k++) {
928     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
929     nrows       = *buf_ri_k[k];
930     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
931     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
932   }
933 
934   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
935   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
936   for (i=0; i<pn; i++) {
937     /* add C_loc into Cmpi */
938     nzi  = c_loc->i[i+1] - c_loc->i[i];
939     Jptr = c_loc->j + c_loc->i[i];
940     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
941 
942     /* add received col data into lnk */
943     for (k=0; k<nrecv; k++) { /* k-th received message */
944       if (i == *nextrow[k]) { /* i-th row */
945         nzi  = *(nextci[k]+1) - *nextci[k];
946         Jptr = buf_rj[k] + *nextci[k];
947         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
948         nextrow[k]++; nextci[k]++;
949       }
950     }
951     nzi = lnk[0];
952 
953     /* copy data into free space, then initialize lnk */
954     ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
955     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
956   }
957   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
958   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
959   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
960 
961   /* local sizes and preallocation */
962   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
963   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
964   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
965   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
966 
967   /* members in merge */
968   ierr = PetscFree(id_r);CHKERRQ(ierr);
969   ierr = PetscFree(len_r);CHKERRQ(ierr);
970   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
971   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
972   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
973   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
974   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
975 
976   /* attach the supporting struct to Cmpi for reuse */
977   c = (Mat_MPIAIJ*)Cmpi->data;
978   c->ap           = ptap;
979   ptap->duplicate = Cmpi->ops->duplicate;
980   ptap->destroy   = Cmpi->ops->destroy;
981   ptap->view      = Cmpi->ops->view;
982   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
983 
984   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
985   Cmpi->assembled        = PETSC_FALSE;
986   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
987   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
988   Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
989   *C                     = Cmpi;
990   PetscFunctionReturn(0);
991 }
992 
993 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
994 {
995   PetscErrorCode    ierr;
996   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
997   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
998   Mat_SeqAIJ        *ap,*p_loc,*p_oth=NULL,*c_seq;
999   Mat_APMPI         *ptap = c->ap;
1000   Mat               AP_loc,C_loc,C_oth;
1001   PetscInt          i,rstart,rend,cm,ncols,row;
1002   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
1003   PetscScalar       *apa;
1004   const PetscInt    *cols;
1005   const PetscScalar *vals;
1006 
1007   PetscFunctionBegin;
1008   if (!ptap) {
1009     MPI_Comm comm;
1010     ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
1011     SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
1012   }
1013 
1014   ierr = MatZeroEntries(C);CHKERRQ(ierr);
1015   /* 1) get R = Pd^T,Ro = Po^T */
1016   if (ptap->reuse == MAT_REUSE_MATRIX) {
1017     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
1018     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
1019   }
1020 
1021   /* 2) get AP_loc */
1022   AP_loc = ptap->AP_loc;
1023   ap = (Mat_SeqAIJ*)AP_loc->data;
1024 
1025   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
1026   /*-----------------------------------------------------*/
1027   if (ptap->reuse == MAT_REUSE_MATRIX) {
1028     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
1029     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
1030     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1031   }
1032 
1033   /* 2-2) compute numeric A_loc*P - dominating part */
1034   /* ---------------------------------------------- */
1035   /* get data from symbolic products */
1036   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1037   if (ptap->P_oth) {
1038     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1039   }
1040   apa   = ptap->apa;
1041   api   = ap->i;
1042   apj   = ap->j;
1043   for (i=0; i<am; i++) {
1044     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
1045     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
1046     apnz = api[i+1] - api[i];
1047     for (j=0; j<apnz; j++) {
1048       col = apj[j+api[i]];
1049       ap->a[j+ap->i[i]] = apa[col];
1050       apa[col] = 0.0;
1051     }
1052     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1053   }
1054 
1055   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1056   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
1057   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
1058   C_loc = ptap->C_loc;
1059   C_oth = ptap->C_oth;
1060 
1061   /* add C_loc and Co to to C */
1062   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
1063 
1064   /* C_loc -> C */
1065   cm    = C_loc->rmap->N;
1066   c_seq = (Mat_SeqAIJ*)C_loc->data;
1067   cols = c_seq->j;
1068   vals = c_seq->a;
1069 
1070 
1071   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
1072   /* when there are no off-processor parts.  */
1073   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
1074   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
1075   /* a table, and other, more complex stuff has to be done. */
1076   if (C->assembled) {
1077     C->was_assembled = PETSC_TRUE;
1078     C->assembled     = PETSC_FALSE;
1079   }
1080   if (C->was_assembled) {
1081     for (i=0; i<cm; i++) {
1082       ncols = c_seq->i[i+1] - c_seq->i[i];
1083       row = rstart + i;
1084       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1085       cols += ncols; vals += ncols;
1086     }
1087   } else {
1088     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
1089   }
1090 
1091   /* Co -> C, off-processor part */
1092   cm = C_oth->rmap->N;
1093   c_seq = (Mat_SeqAIJ*)C_oth->data;
1094   cols = c_seq->j;
1095   vals = c_seq->a;
1096   for (i=0; i<cm; i++) {
1097     ncols = c_seq->i[i+1] - c_seq->i[i];
1098     row = p->garray[i];
1099     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1100     cols += ncols; vals += ncols;
1101   }
1102 
1103   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1104   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1105 
1106   ptap->reuse = MAT_REUSE_MATRIX;
1107 
1108   /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
1109   if (ptap->freestruct) {
1110     ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr);
1111   }
1112   PetscFunctionReturn(0);
1113 }
1114