xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision dc424cfae46ffd055acbb99fbc61fc85fc92f9ad)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16 {
17   PetscErrorCode    ierr;
18   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
19   Mat_PtAPMPI       *ptap=a->ptap;
20   PetscBool         iascii;
21   PetscViewerFormat format;
22 
23   PetscFunctionBegin;
24   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
25   if (iascii) {
26     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
27     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
28       if (ptap->algType == 0) {
29         ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr);
30       } else if (ptap->algType == 1) {
31         ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr);
32       }
33     }
34   }
35   ierr = (ptap->view)(A,viewer);CHKERRQ(ierr);
36   PetscFunctionReturn(0);
37 }
38 
39 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
40 {
41   PetscErrorCode ierr;
42   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
43   Mat_PtAPMPI    *ptap=a->ptap;
44 
45   PetscFunctionBegin;
46   if (ptap) {
47     Mat_Merge_SeqsToMPI *merge=ptap->merge;
48     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
49     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
50     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
51     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
52     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
53     ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
54     ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
55     if (ptap->AP_loc) { /* used by alg_rap */
56       Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
57       ierr = PetscFree(ap->i);CHKERRQ(ierr);
58       ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
59       ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
60     } else { /* used by alg_ptap */
61       ierr = PetscFree(ptap->api);CHKERRQ(ierr);
62       ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
63     }
64     ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
65     ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
66     if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
67 
68     if (merge) { /* used by alg_ptap */
69       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
70       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
71       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
72       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
73       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
74       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
75       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
76       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
77       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
78       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
79       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
80       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
81       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
82       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
83     }
84 
85     ierr = ptap->destroy(A);CHKERRQ(ierr);
86     ierr = PetscFree(ptap);CHKERRQ(ierr);
87   }
88   PetscFunctionReturn(0);
89 }
90 
91 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
92 {
93   PetscErrorCode ierr;
94   Mat_MPIAIJ     *a     = (Mat_MPIAIJ*)A->data;
95   Mat_PtAPMPI    *ptap  = a->ptap;
96 
97   PetscFunctionBegin;
98   ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr);
99   (*M)->ops->destroy   = ptap->destroy;
100   (*M)->ops->duplicate = ptap->duplicate;
101   PetscFunctionReturn(0);
102 }
103 
104 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
105 {
106   PetscErrorCode ierr;
107   PetscBool      flg;
108   MPI_Comm       comm;
109 #if !defined(PETSC_HAVE_HYPRE)
110   const char          *algTypes[2] = {"scalable","nonscalable"};
111   PetscInt            nalg=2;
112 #else
113   const char          *algTypes[3] = {"scalable","nonscalable","hypre"};
114   PetscInt            nalg=3;
115 #endif
116   PetscInt            pN=P->cmap->N,alg=1; /* set default algorithm */
117 
118   PetscFunctionBegin;
119   /* check if matrix local sizes are compatible */
120   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
121   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
122   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
123 
124   if (scall == MAT_INITIAL_MATRIX) {
125     /* pick an algorithm */
126     ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr);
127     PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */
128     ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr);
129     ierr = PetscOptionsEnd();CHKERRQ(ierr);
130 
131     if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
132       MatInfo     Ainfo,Pinfo;
133       PetscInt    nz_local;
134       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
135 
136       ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr);
137       ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr);
138       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
139 
140       if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
141       ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr);
142 
143       if (alg_scalable) {
144         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
145       }
146     }
147 
148     switch (alg) {
149     case 1:
150       /* do R=P^T locally, then C=R*A*P */
151       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
152       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
153       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
154       break;
155 #if defined(PETSC_HAVE_HYPRE)
156     case 2:
157       /* Use boomerAMGBuildCoarseOperator */
158       ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr);
159       PetscFunctionReturn(0);
160       break;
161 #endif
162     default:
163       /* do R=P^T locally, then C=R*A*P */
164       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
165       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr);
166       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
167       break;
168     }
169   }
170   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
171   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
172   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
173   PetscFunctionReturn(0);
174 }
175 
176 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
177 {
178   PetscErrorCode    ierr;
179   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
180   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
181   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
182   Mat_PtAPMPI       *ptap = c->ptap;
183   Mat               AP_loc,C_loc,C_oth;
184   PetscInt          i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz;
185   PetscScalar       *apa;
186   const PetscInt    *cols;
187   const PetscScalar *vals;
188 
189   PetscFunctionBegin;
190   ierr = MatZeroEntries(C);CHKERRQ(ierr);
191 
192   /* 1) get R = Pd^T,Ro = Po^T */
193   if (ptap->reuse == MAT_REUSE_MATRIX) {
194     ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
195     ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
196   }
197 
198   /* 2) get AP_loc */
199   AP_loc = ptap->AP_loc;
200   ap = (Mat_SeqAIJ*)AP_loc->data;
201 
202   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
203   /*-----------------------------------------------------*/
204   if (ptap->reuse == MAT_REUSE_MATRIX) {
205     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
206     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
207     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
208   }
209 
210   /* 2-2) compute numeric A_loc*P - dominating part */
211   /* ---------------------------------------------- */
212   /* get data from symbolic products */
213   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
214   if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
215   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
216 
217   api   = ap->i;
218   apj   = ap->j;
219   for (i=0; i<am; i++) {
220     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
221     apnz = api[i+1] - api[i];
222     apa = ap->a + api[i];
223     ierr = PetscMemzero(apa,sizeof(PetscScalar)*apnz);CHKERRQ(ierr);
224     AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
225     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
226   }
227 
228   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
229   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
230   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
231 
232   C_loc = ptap->C_loc;
233   C_oth = ptap->C_oth;
234 
235   /* add C_loc and Co to to C */
236   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
237 
238   /* C_loc -> C */
239   cm    = C_loc->rmap->N;
240   c_seq = (Mat_SeqAIJ*)C_loc->data;
241   cols = c_seq->j;
242   vals = c_seq->a;
243   for (i=0; i<cm; i++) {
244     ncols = c_seq->i[i+1] - c_seq->i[i];
245     row = rstart + i;
246     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
247     cols += ncols; vals += ncols;
248   }
249 
250   /* Co -> C, off-processor part */
251   cm = C_oth->rmap->N;
252   c_seq = (Mat_SeqAIJ*)C_oth->data;
253   cols = c_seq->j;
254   vals = c_seq->a;
255   for (i=0; i<cm; i++) {
256     ncols = c_seq->i[i+1] - c_seq->i[i];
257     row = p->garray[i];
258     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
259     cols += ncols; vals += ncols;
260   }
261   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
262   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
263 
264   ptap->reuse = MAT_REUSE_MATRIX;
265   PetscFunctionReturn(0);
266 }
267 
268 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
269 {
270   PetscErrorCode      ierr;
271   Mat_PtAPMPI         *ptap;
272   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
273   MPI_Comm            comm;
274   PetscMPIInt         size,rank;
275   Mat                 Cmpi,P_loc,P_oth;
276   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
277   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
278   PetscInt            *lnk,i,k,pnz,row,nsend;
279   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
280   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
281   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
282   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
283   MPI_Request         *swaits,*rwaits;
284   MPI_Status          *sstatus,rstatus;
285   PetscLayout         rowmap;
286   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
287   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
288   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi;
289   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
290   PetscScalar         *apv;
291   PetscTable          ta;
292 #if defined(PETSC_USE_INFO)
293   PetscReal           apfill;
294 #endif
295 
296   PetscFunctionBegin;
297   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
298   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
299   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
300 
301   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
302 
303   /* create symbolic parallel matrix Cmpi */
304   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
305   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
306 
307   /* create struct Mat_PtAPMPI and attached it to C later */
308   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
309   ptap->reuse = MAT_INITIAL_MATRIX;
310   ptap->algType = 0;
311 
312   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
313   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr);
314   /* get P_loc by taking all local rows of P */
315   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr);
316 
317   ptap->P_loc = P_loc;
318   ptap->P_oth = P_oth;
319 
320   /* (0) compute Rd = Pd^T, Ro = Po^T  */
321   /* --------------------------------- */
322   ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
323   ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
324 
325   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
326   /* ----------------------------------------------------------------- */
327   p_loc  = (Mat_SeqAIJ*)P_loc->data;
328   if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
329 
330   /* create and initialize a linked list */
331   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
332   MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
333   MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
334   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
335 
336   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
337 
338   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
339   if (ao) {
340     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
341   } else {
342     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
343   }
344   current_space = free_space;
345   nspacedouble  = 0;
346 
347   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
348   api[0] = 0;
349   for (i=0; i<am; i++) {
350     /* diagonal portion: Ad[i,:]*P */
351     ai = ad->i; pi = p_loc->i;
352     nzi = ai[i+1] - ai[i];
353     aj  = ad->j + ai[i];
354     for (j=0; j<nzi; j++) {
355       row  = aj[j];
356       pnz  = pi[row+1] - pi[row];
357       Jptr = p_loc->j + pi[row];
358       /* add non-zero cols of P into the sorted linked list lnk */
359       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
360     }
361     /* off-diagonal portion: Ao[i,:]*P */
362     if (ao) {
363       ai = ao->i; pi = p_oth->i;
364       nzi = ai[i+1] - ai[i];
365       aj  = ao->j + ai[i];
366       for (j=0; j<nzi; j++) {
367         row  = aj[j];
368         pnz  = pi[row+1] - pi[row];
369         Jptr = p_oth->j + pi[row];
370         ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
371       }
372     }
373     apnz     = lnk[0];
374     api[i+1] = api[i] + apnz;
375 
376     /* if free space is not available, double the total space in the list */
377     if (current_space->local_remaining<apnz) {
378       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
379       nspacedouble++;
380     }
381 
382     /* Copy data into free space, then initialize lnk */
383     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
384 
385     current_space->array           += apnz;
386     current_space->local_used      += apnz;
387     current_space->local_remaining -= apnz;
388   }
389   /* Allocate space for apj and apv, initialize apj, and */
390   /* destroy list of free space and other temporary array(s) */
391   ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
392   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
393   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
394 
395   /* Create AP_loc for reuse */
396   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
397 
398 #if defined(PETSC_USE_INFO)
399   if (ao) {
400     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
401   } else {
402     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
403   }
404   ptap->AP_loc->info.mallocs           = nspacedouble;
405   ptap->AP_loc->info.fill_ratio_given  = fill;
406   ptap->AP_loc->info.fill_ratio_needed = apfill;
407 
408   if (api[am]) {
409     ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
410     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
411   } else {
412     ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
413   }
414 #endif
415 
416   /* (2-1) compute symbolic Co = Ro*AP_loc  */
417   /* ------------------------------------ */
418   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
419 
420   /* (3) send coj of C_oth to other processors  */
421   /* ------------------------------------------ */
422   /* determine row ownership */
423   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
424   rowmap->n  = pn;
425   rowmap->bs = 1;
426   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
427   owners = rowmap->range;
428 
429   /* determine the number of messages to send, their lengths */
430   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
431   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
432   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
433 
434   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
435   coi   = c_oth->i; coj = c_oth->j;
436   con   = ptap->C_oth->rmap->n;
437   proc  = 0;
438   for (i=0; i<con; i++) {
439     while (prmap[i] >= owners[proc+1]) proc++;
440     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
441     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
442   }
443 
444   len          = 0; /* max length of buf_si[], see (4) */
445   owners_co[0] = 0;
446   nsend        = 0;
447   for (proc=0; proc<size; proc++) {
448     owners_co[proc+1] = owners_co[proc] + len_si[proc];
449     if (len_s[proc]) {
450       nsend++;
451       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
452       len         += len_si[proc];
453     }
454   }
455 
456   /* determine the number and length of messages to receive for coi and coj  */
457   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
458   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
459 
460   /* post the Irecv and Isend of coj */
461   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
462   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
463   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
464   for (proc=0, k=0; proc<size; proc++) {
465     if (!len_s[proc]) continue;
466     i    = owners_co[proc];
467     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
468     k++;
469   }
470 
471   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
472   /* ---------------------------------------- */
473   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
474   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
475 
476   /* receives coj are complete */
477   for (i=0; i<nrecv; i++) {
478     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
479   }
480   ierr = PetscFree(rwaits);CHKERRQ(ierr);
481   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
482 
483   /* add received column indices into ta to update Crmax */
484   for (k=0; k<nrecv; k++) {/* k-th received message */
485     Jptr = buf_rj[k];
486     for (j=0; j<len_r[k]; j++) {
487       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
488     }
489   }
490   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
491   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
492 
493   /* (4) send and recv coi */
494   /*-----------------------*/
495   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
496   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
497   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
498   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
499   for (proc=0,k=0; proc<size; proc++) {
500     if (!len_s[proc]) continue;
501     /* form outgoing message for i-structure:
502          buf_si[0]:                 nrows to be sent
503                [1:nrows]:           row index (global)
504                [nrows+1:2*nrows+1]: i-structure index
505     */
506     /*-------------------------------------------*/
507     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
508     buf_si_i    = buf_si + nrows+1;
509     buf_si[0]   = nrows;
510     buf_si_i[0] = 0;
511     nrows       = 0;
512     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
513       nzi = coi[i+1] - coi[i];
514       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
515       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
516       nrows++;
517     }
518     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
519     k++;
520     buf_si += len_si[proc];
521   }
522   for (i=0; i<nrecv; i++) {
523     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
524   }
525   ierr = PetscFree(rwaits);CHKERRQ(ierr);
526   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
527 
528   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
529   ierr = PetscFree(len_ri);CHKERRQ(ierr);
530   ierr = PetscFree(swaits);CHKERRQ(ierr);
531   ierr = PetscFree(buf_s);CHKERRQ(ierr);
532 
533   /* (5) compute the local portion of Cmpi      */
534   /* ------------------------------------------ */
535   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
536   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
537   current_space = free_space;
538 
539   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
540   for (k=0; k<nrecv; k++) {
541     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
542     nrows       = *buf_ri_k[k];
543     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
544     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
545   }
546 
547   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
548   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
549   for (i=0; i<pn; i++) {
550     /* add C_loc into Cmpi */
551     nzi  = c_loc->i[i+1] - c_loc->i[i];
552     Jptr = c_loc->j + c_loc->i[i];
553     ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
554 
555     /* add received col data into lnk */
556     for (k=0; k<nrecv; k++) { /* k-th received message */
557       if (i == *nextrow[k]) { /* i-th row */
558         nzi  = *(nextci[k]+1) - *nextci[k];
559         Jptr = buf_rj[k] + *nextci[k];
560         ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
561         nextrow[k]++; nextci[k]++;
562       }
563     }
564     nzi = lnk[0];
565 
566     /* copy data into free space, then initialize lnk */
567     ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr);
568     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
569   }
570   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
571   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
572   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
573 
574   /* local sizes and preallocation */
575   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
576   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
577   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
578   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
579 
580   /* members in merge */
581   ierr = PetscFree(id_r);CHKERRQ(ierr);
582   ierr = PetscFree(len_r);CHKERRQ(ierr);
583   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
584   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
585   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
586   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
587   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
588 
589   /* attach the supporting struct to Cmpi for reuse */
590   c = (Mat_MPIAIJ*)Cmpi->data;
591   c->ptap         = ptap;
592   ptap->duplicate = Cmpi->ops->duplicate;
593   ptap->destroy   = Cmpi->ops->destroy;
594   ptap->view      = Cmpi->ops->view;
595 
596   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
597   Cmpi->assembled        = PETSC_FALSE;
598   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
599   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
600   Cmpi->ops->duplicate   = MatDuplicate_MPIAIJ_MatPtAP;
601   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
602   *C                     = Cmpi;
603   PetscFunctionReturn(0);
604 }
605 
606 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
607 {
608   PetscErrorCode      ierr;
609   Mat_PtAPMPI         *ptap;
610   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
611   MPI_Comm            comm;
612   PetscMPIInt         size,rank;
613   Mat                 Cmpi;
614   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
615   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
616   PetscInt            *lnk,i,k,pnz,row,nsend;
617   PetscBT             lnkbt;
618   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
619   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
620   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
621   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
622   MPI_Request         *swaits,*rwaits;
623   MPI_Status          *sstatus,rstatus;
624   PetscLayout         rowmap;
625   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
626   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
627   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
628   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
629   PetscScalar         *apv;
630   PetscTable          ta;
631 #if defined(PETSC_USE_INFO)
632   PetscReal           apfill;
633 #endif
634 
635   PetscFunctionBegin;
636   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
637   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
638   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
639 
640   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
641 
642   /* create symbolic parallel matrix Cmpi */
643   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
644   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
645 
646   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
647   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
648 
649   /* create struct Mat_PtAPMPI and attached it to C later */
650   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
651   ptap->reuse = MAT_INITIAL_MATRIX;
652   ptap->algType = 1;
653 
654   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
655   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
656   /* get P_loc by taking all local rows of P */
657   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
658 
659   /* (0) compute Rd = Pd^T, Ro = Po^T  */
660   /* --------------------------------- */
661   ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
662   ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
663 
664   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
665   /* ----------------------------------------------------------------- */
666   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
667   if (ptap->P_oth) p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
668 
669   /* create and initialize a linked list */
670   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
671   MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
672   MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
673   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
674   /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
675 
676   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
677 
678   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
679   if (ao) {
680     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
681   } else {
682     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
683   }
684   current_space = free_space;
685   nspacedouble  = 0;
686 
687   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
688   api[0] = 0;
689   for (i=0; i<am; i++) {
690     /* diagonal portion: Ad[i,:]*P */
691     ai = ad->i; pi = p_loc->i;
692     nzi = ai[i+1] - ai[i];
693     aj  = ad->j + ai[i];
694     for (j=0; j<nzi; j++) {
695       row  = aj[j];
696       pnz  = pi[row+1] - pi[row];
697       Jptr = p_loc->j + pi[row];
698       /* add non-zero cols of P into the sorted linked list lnk */
699       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
700     }
701     /* off-diagonal portion: Ao[i,:]*P */
702     if (ao) {
703       ai = ao->i; pi = p_oth->i;
704       nzi = ai[i+1] - ai[i];
705       aj  = ao->j + ai[i];
706       for (j=0; j<nzi; j++) {
707         row  = aj[j];
708         pnz  = pi[row+1] - pi[row];
709         Jptr = p_oth->j + pi[row];
710         ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
711       }
712     }
713     apnz     = lnk[0];
714     api[i+1] = api[i] + apnz;
715     if (ap_rmax < apnz) ap_rmax = apnz;
716 
717     /* if free space is not available, double the total space in the list */
718     if (current_space->local_remaining<apnz) {
719       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
720       nspacedouble++;
721     }
722 
723     /* Copy data into free space, then initialize lnk */
724     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
725 
726     current_space->array           += apnz;
727     current_space->local_used      += apnz;
728     current_space->local_remaining -= apnz;
729   }
730   /* Allocate space for apj and apv, initialize apj, and */
731   /* destroy list of free space and other temporary array(s) */
732   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
733   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
734   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
735 
736   /* Create AP_loc for reuse */
737   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
738 
739 #if defined(PETSC_USE_INFO)
740   if (ao) {
741     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
742   } else {
743     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
744   }
745   ptap->AP_loc->info.mallocs           = nspacedouble;
746   ptap->AP_loc->info.fill_ratio_given  = fill;
747   ptap->AP_loc->info.fill_ratio_needed = apfill;
748 
749   if (api[am]) {
750     ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
751     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
752   } else {
753     ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
754   }
755 #endif
756 
757   /* (2-1) compute symbolic Co = Ro*AP_loc  */
758   /* ------------------------------------ */
759   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
760 
761   /* (3) send coj of C_oth to other processors  */
762   /* ------------------------------------------ */
763   /* determine row ownership */
764   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
765   rowmap->n  = pn;
766   rowmap->bs = 1;
767   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
768   owners = rowmap->range;
769 
770   /* determine the number of messages to send, their lengths */
771   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
772   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
773   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
774 
775   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
776   coi   = c_oth->i; coj = c_oth->j;
777   con   = ptap->C_oth->rmap->n;
778   proc  = 0;
779   for (i=0; i<con; i++) {
780     while (prmap[i] >= owners[proc+1]) proc++;
781     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
782     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
783   }
784 
785   len          = 0; /* max length of buf_si[], see (4) */
786   owners_co[0] = 0;
787   nsend        = 0;
788   for (proc=0; proc<size; proc++) {
789     owners_co[proc+1] = owners_co[proc] + len_si[proc];
790     if (len_s[proc]) {
791       nsend++;
792       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
793       len         += len_si[proc];
794     }
795   }
796 
797   /* determine the number and length of messages to receive for coi and coj  */
798   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
799   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
800 
801   /* post the Irecv and Isend of coj */
802   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
803   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
804   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
805   for (proc=0, k=0; proc<size; proc++) {
806     if (!len_s[proc]) continue;
807     i    = owners_co[proc];
808     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
809     k++;
810   }
811 
812   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
813   /* ---------------------------------------- */
814   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
815   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
816 
817   /* receives coj are complete */
818   for (i=0; i<nrecv; i++) {
819     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
820   }
821   ierr = PetscFree(rwaits);CHKERRQ(ierr);
822   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
823 
824   /* add received column indices into ta to update Crmax */
825   for (k=0; k<nrecv; k++) {/* k-th received message */
826     Jptr = buf_rj[k];
827     for (j=0; j<len_r[k]; j++) {
828       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
829     }
830   }
831   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
832   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
833 
834   /* (4) send and recv coi */
835   /*-----------------------*/
836   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
837   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
838   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
839   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
840   for (proc=0,k=0; proc<size; proc++) {
841     if (!len_s[proc]) continue;
842     /* form outgoing message for i-structure:
843          buf_si[0]:                 nrows to be sent
844                [1:nrows]:           row index (global)
845                [nrows+1:2*nrows+1]: i-structure index
846     */
847     /*-------------------------------------------*/
848     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
849     buf_si_i    = buf_si + nrows+1;
850     buf_si[0]   = nrows;
851     buf_si_i[0] = 0;
852     nrows       = 0;
853     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
854       nzi = coi[i+1] - coi[i];
855       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
856       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
857       nrows++;
858     }
859     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
860     k++;
861     buf_si += len_si[proc];
862   }
863   for (i=0; i<nrecv; i++) {
864     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
865   }
866   ierr = PetscFree(rwaits);CHKERRQ(ierr);
867   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
868 
869   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
870   ierr = PetscFree(len_ri);CHKERRQ(ierr);
871   ierr = PetscFree(swaits);CHKERRQ(ierr);
872   ierr = PetscFree(buf_s);CHKERRQ(ierr);
873 
874   /* (5) compute the local portion of Cmpi      */
875   /* ------------------------------------------ */
876   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
877   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
878   current_space = free_space;
879 
880   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
881   for (k=0; k<nrecv; k++) {
882     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
883     nrows       = *buf_ri_k[k];
884     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
885     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
886   }
887 
888   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
889   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
890   for (i=0; i<pn; i++) {
891     /* add C_loc into Cmpi */
892     nzi  = c_loc->i[i+1] - c_loc->i[i];
893     Jptr = c_loc->j + c_loc->i[i];
894     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
895 
896     /* add received col data into lnk */
897     for (k=0; k<nrecv; k++) { /* k-th received message */
898       if (i == *nextrow[k]) { /* i-th row */
899         nzi  = *(nextci[k]+1) - *nextci[k];
900         Jptr = buf_rj[k] + *nextci[k];
901         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
902         nextrow[k]++; nextci[k]++;
903       }
904     }
905     nzi = lnk[0];
906 
907     /* copy data into free space, then initialize lnk */
908     ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
909     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
910   }
911   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
912   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
913   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
914 
915   /* local sizes and preallocation */
916   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
917   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
918   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
919   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
920 
921   /* members in merge */
922   ierr = PetscFree(id_r);CHKERRQ(ierr);
923   ierr = PetscFree(len_r);CHKERRQ(ierr);
924   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
925   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
926   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
927   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
928   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
929 
930   /* attach the supporting struct to Cmpi for reuse */
931   c = (Mat_MPIAIJ*)Cmpi->data;
932   c->ptap         = ptap;
933   ptap->duplicate = Cmpi->ops->duplicate;
934   ptap->destroy   = Cmpi->ops->destroy;
935   ptap->view      = Cmpi->ops->view;
936   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
937 
938   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
939   Cmpi->assembled        = PETSC_FALSE;
940   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
941   Cmpi->ops->duplicate   = MatDuplicate_MPIAIJ_MatPtAP;
942   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
943   *C                     = Cmpi;
944   PetscFunctionReturn(0);
945 }
946 
947 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
948 {
949   PetscErrorCode    ierr;
950   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
951   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
952   Mat_SeqAIJ        *ap,*p_loc,*p_oth=NULL,*c_seq;
953   Mat_PtAPMPI       *ptap = c->ptap;
954   Mat               AP_loc,C_loc,C_oth;
955   PetscInt          i,rstart,rend,cm,ncols,row;
956   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
957   PetscScalar       *apa;
958   const PetscInt    *cols;
959   const PetscScalar *vals;
960 
961   PetscFunctionBegin;
962   ierr = MatZeroEntries(C);CHKERRQ(ierr);
963 
964   /* 1) get R = Pd^T,Ro = Po^T */
965   if (ptap->reuse == MAT_REUSE_MATRIX) {
966     ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
967     ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
968   }
969 
970   /* 2) get AP_loc */
971   AP_loc = ptap->AP_loc;
972   ap = (Mat_SeqAIJ*)AP_loc->data;
973 
974   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
975   /*-----------------------------------------------------*/
976   if (ptap->reuse == MAT_REUSE_MATRIX) {
977     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
978     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
979     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
980   }
981 
982   /* 2-2) compute numeric A_loc*P - dominating part */
983   /* ---------------------------------------------- */
984   /* get data from symbolic products */
985   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
986   if (ptap->P_oth) {
987     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
988   }
989   apa   = ptap->apa;
990   api   = ap->i;
991   apj   = ap->j;
992   for (i=0; i<am; i++) {
993     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
994     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
995     apnz = api[i+1] - api[i];
996     for (j=0; j<apnz; j++) {
997       col = apj[j+api[i]];
998       ap->a[j+ap->i[i]] = apa[col];
999       apa[col] = 0.0;
1000     }
1001     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1002   }
1003 
1004   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1005   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
1006   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
1007   C_loc = ptap->C_loc;
1008   C_oth = ptap->C_oth;
1009 
1010   /* add C_loc and Co to to C */
1011   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
1012 
1013   /* C_loc -> C */
1014   cm    = C_loc->rmap->N;
1015   c_seq = (Mat_SeqAIJ*)C_loc->data;
1016   cols = c_seq->j;
1017   vals = c_seq->a;
1018   for (i=0; i<cm; i++) {
1019     ncols = c_seq->i[i+1] - c_seq->i[i];
1020     row = rstart + i;
1021     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1022     cols += ncols; vals += ncols;
1023   }
1024 
1025   /* Co -> C, off-processor part */
1026   cm = C_oth->rmap->N;
1027   c_seq = (Mat_SeqAIJ*)C_oth->data;
1028   cols = c_seq->j;
1029   vals = c_seq->a;
1030   for (i=0; i<cm; i++) {
1031     ncols = c_seq->i[i+1] - c_seq->i[i];
1032     row = p->garray[i];
1033     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1034     cols += ncols; vals += ncols;
1035   }
1036   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1037   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1038 
1039   ptap->reuse = MAT_REUSE_MATRIX;
1040   PetscFunctionReturn(0);
1041 }
1042