xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision bebe2cf65d55febe21a5af8db2bd2e168caaa2e7)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
16 #undef __FUNCT__
17 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
18 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
19 {
20   PetscErrorCode ierr;
21   Mat_MPIAIJ     *a   =(Mat_MPIAIJ*)A->data;
22   Mat_PtAPMPI    *ptap=a->ptap;
23 
24   PetscFunctionBegin;
25   if (ptap) {
26     Mat_Merge_SeqsToMPI *merge=ptap->merge;
27     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
28     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
29     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
30     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
31     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
32     if (ptap->api) {ierr = PetscFree(ptap->api);CHKERRQ(ierr);}
33     if (ptap->apj) {ierr = PetscFree(ptap->apj);CHKERRQ(ierr);}
34     if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
35     if (merge) {
36       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
37       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
38       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
39       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
40       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
41       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
42       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
43       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
44       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
45       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
46       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
47       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
48       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
49       ierr = merge->destroy(A);CHKERRQ(ierr);
50       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
51     }
52     ierr = PetscFree(ptap);CHKERRQ(ierr);
53   }
54   PetscFunctionReturn(0);
55 }
56 
57 #undef __FUNCT__
58 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP"
59 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
60 {
61   PetscErrorCode      ierr;
62   Mat_MPIAIJ          *a     = (Mat_MPIAIJ*)A->data;
63   Mat_PtAPMPI         *ptap  = a->ptap;
64   Mat_Merge_SeqsToMPI *merge = ptap->merge;
65 
66   PetscFunctionBegin;
67   ierr = (*merge->duplicate)(A,op,M);CHKERRQ(ierr);
68 
69   (*M)->ops->destroy   = merge->destroy;
70   (*M)->ops->duplicate = merge->duplicate;
71   PetscFunctionReturn(0);
72 }
73 
74 #undef __FUNCT__
75 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
76 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
77 {
78   PetscErrorCode ierr;
79 
80   PetscFunctionBegin;
81   if (scall == MAT_INITIAL_MATRIX) {
82     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
83     ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
84     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
85   }
86   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
87   ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr);
88   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
89   PetscFunctionReturn(0);
90 }
91 
92 #undef __FUNCT__
93 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
94 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
95 {
96   PetscErrorCode      ierr;
97   Mat                 Cmpi;
98   Mat_PtAPMPI         *ptap;
99   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
100   Mat_MPIAIJ          *a        =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
101   Mat_SeqAIJ          *ad       =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
102   Mat_SeqAIJ          *p_loc,*p_oth;
103   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
104   PetscInt            *adi=ad->i,*aj,*aoi=ao->i,nnz;
105   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
106   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
107   PetscBT             lnkbt;
108   MPI_Comm            comm;
109   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
110   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
111   PetscInt            len,proc,*dnz,*onz,*owners;
112   PetscInt            nzi,*pti,*ptj;
113   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
114   MPI_Request         *swaits,*rwaits;
115   MPI_Status          *sstatus,rstatus;
116   Mat_Merge_SeqsToMPI *merge;
117   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
118   PetscReal           afill=1.0,afill_tmp;
119   PetscInt            rmax;
120 
121   PetscFunctionBegin;
122   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
123 
124   /* check if matrix local sizes are compatible */
125   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
126     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
127   }
128   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
129     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
130   }
131 
132   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
133   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
134 
135   /* create struct Mat_PtAPMPI and attached it to C later */
136   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
137   ierr        = PetscNew(&merge);CHKERRQ(ierr);
138   ptap->merge = merge;
139   ptap->reuse = MAT_INITIAL_MATRIX;
140 
141   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
142   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
143 
144   /* get P_loc by taking all local rows of P */
145   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
146 
147   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
148   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
149   pi_loc = p_loc->i; pj_loc = p_loc->j;
150   pi_oth = p_oth->i; pj_oth = p_oth->j;
151 
152   /* (1) compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth (api,apj) */
153   /*--------------------------------------------------------------------------*/
154   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
155   api[0] = 0;
156 
157   /* create and initialize a linked list */
158   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr);
159 
160   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
161   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
162 
163   current_space = free_space;
164 
165   for (i=0; i<am; i++) {
166     /* diagonal portion of A */
167     nzi = adi[i+1] - adi[i];
168     aj  = ad->j + adi[i];
169     for (j=0; j<nzi; j++) {
170       row  = aj[j];
171       pnz  = pi_loc[row+1] - pi_loc[row];
172       Jptr = pj_loc + pi_loc[row];
173       /* add non-zero cols of P into the sorted linked list lnk */
174       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
175     }
176     /* off-diagonal portion of A */
177     nzi = aoi[i+1] - aoi[i];
178     aj  = ao->j + aoi[i];
179     for (j=0; j<nzi; j++) {
180       row  = aj[j];
181       pnz  = pi_oth[row+1] - pi_oth[row];
182       Jptr = pj_oth + pi_oth[row];
183       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
184     }
185     apnz     = lnk[0];
186     api[i+1] = api[i] + apnz;
187     if (ap_rmax < apnz) ap_rmax = apnz;
188 
189     /* if free space is not available, double the total space in the list */
190     if (current_space->local_remaining<apnz) {
191       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
192       nspacedouble++;
193     }
194 
195     /* Copy data into free space, then initialize lnk */
196     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
197 
198     current_space->array           += apnz;
199     current_space->local_used      += apnz;
200     current_space->local_remaining -= apnz;
201   }
202 
203   /* Allocate space for apj, initialize apj, and */
204   /* destroy list of free space and other temporary array(s) */
205   ierr      = PetscMalloc1(api[am]+1,&apj);CHKERRQ(ierr);
206   ierr      = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
207   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
208   if (afill_tmp > afill) afill = afill_tmp;
209 
210   /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
211   /*-----------------------------------------------------------------*/
212   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
213 
214   /* then, compute symbolic Co = (p->B)^T*AP */
215   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
216                          >= (num of nonzero rows of C_seq) - pn */
217   ierr   = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr);
218   coi[0] = 0;
219 
220   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
221   nnz           = fill*(poti[pon] + api[am]);
222   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
223   current_space = free_space;
224 
225   for (i=0; i<pon; i++) {
226     pnz = poti[i+1] - poti[i];
227     ptJ = potj + poti[i];
228     for (j=0; j<pnz; j++) {
229       row  = ptJ[j]; /* row of AP == col of Pot */
230       apnz = api[row+1] - api[row];
231       Jptr = apj + api[row];
232       /* add non-zero cols of AP into the sorted linked list lnk */
233       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
234     }
235     nnz = lnk[0];
236 
237     /* If free space is not available, double the total space in the list */
238     if (current_space->local_remaining<nnz) {
239       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
240       nspacedouble++;
241     }
242 
243     /* Copy data into free space, and zero out denserows */
244     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
245 
246     current_space->array           += nnz;
247     current_space->local_used      += nnz;
248     current_space->local_remaining -= nnz;
249 
250     coi[i+1] = coi[i] + nnz;
251   }
252 
253   ierr      = PetscMalloc1(coi[pon],&coj);CHKERRQ(ierr);
254   ierr      = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
255   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
256   if (afill_tmp > afill) afill = afill_tmp;
257   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
258 
259   /* (3) send j-array (coj) of Co to other processors */
260   /*--------------------------------------------------*/
261   ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr);
262   len_s        = merge->len_s;
263   merge->nsend = 0;
264 
265 
266   /* determine row ownership */
267   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
268   merge->rowmap->n  = pn;
269   merge->rowmap->bs = 1;
270 
271   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
272   owners = merge->rowmap->range;
273 
274   /* determine the number of messages to send, their lengths */
275   ierr = PetscMalloc2(size,&len_si,size,&sstatus);CHKERRQ(ierr);
276   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
277   ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr);
278 
279   proc = 0;
280   for (i=0; i<pon; i++) {
281     while (prmap[i] >= owners[proc+1]) proc++;
282     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
283     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
284   }
285 
286   len          = 0; /* max length of buf_si[], see (4) */
287   owners_co[0] = 0;
288   for (proc=0; proc<size; proc++) {
289     owners_co[proc+1] = owners_co[proc] + len_si[proc];
290     if (len_s[proc]) {
291       merge->nsend++;
292       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
293       len         += len_si[proc];
294     }
295   }
296 
297   /* determine the number and length of messages to receive for coi and coj  */
298   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
299   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
300 
301   /* post the Irecv and Isend of coj */
302   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
303   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
304   ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr);
305   for (proc=0, k=0; proc<size; proc++) {
306     if (!len_s[proc]) continue;
307     i    = owners_co[proc];
308     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
309     k++;
310   }
311 
312   /* receives and sends of coj are complete */
313   for (i=0; i<merge->nrecv; i++) {
314     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
315   }
316   ierr = PetscFree(rwaits);CHKERRQ(ierr);
317   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
318 
319   /* (4) send and recv coi */
320   /*-----------------------*/
321   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
322   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
323   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
324   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
325   for (proc=0,k=0; proc<size; proc++) {
326     if (!len_s[proc]) continue;
327     /* form outgoing message for i-structure:
328          buf_si[0]:                 nrows to be sent
329                [1:nrows]:           row index (global)
330                [nrows+1:2*nrows+1]: i-structure index
331     */
332     /*-------------------------------------------*/
333     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
334     buf_si_i    = buf_si + nrows+1;
335     buf_si[0]   = nrows;
336     buf_si_i[0] = 0;
337     nrows       = 0;
338     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
339       nzi = coi[i+1] - coi[i];
340       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
341       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
342       nrows++;
343     }
344     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
345     k++;
346     buf_si += len_si[proc];
347   }
348   i = merge->nrecv;
349   while (i--) {
350     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
351   }
352   ierr = PetscFree(rwaits);CHKERRQ(ierr);
353   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
354 
355   ierr = PetscFree2(len_si,sstatus);CHKERRQ(ierr);
356   ierr = PetscFree(len_ri);CHKERRQ(ierr);
357   ierr = PetscFree(swaits);CHKERRQ(ierr);
358   ierr = PetscFree(buf_s);CHKERRQ(ierr);
359 
360   /* (5) compute the local portion of C (mpi mat) */
361   /*----------------------------------------------*/
362   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
363 
364   /* allocate pti array and free space for accumulating nonzero column info */
365   ierr   = PetscMalloc1(pn+1,&pti);CHKERRQ(ierr);
366   pti[0] = 0;
367 
368   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
369   nnz           = fill*(pi_loc[pm] + api[am]);
370   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
371   current_space = free_space;
372 
373   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
374   for (k=0; k<merge->nrecv; k++) {
375     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
376     nrows       = *buf_ri_k[k];
377     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
378     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
379   }
380   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
381   rmax = 0;
382   for (i=0; i<pn; i++) {
383     /* add pdt[i,:]*AP into lnk */
384     pnz = pdti[i+1] - pdti[i];
385     ptJ = pdtj + pdti[i];
386     for (j=0; j<pnz; j++) {
387       row  = ptJ[j];  /* row of AP == col of Pt */
388       apnz = api[row+1] - api[row];
389       Jptr = apj + api[row];
390       /* add non-zero cols of AP into the sorted linked list lnk */
391       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
392     }
393 
394     /* add received col data into lnk */
395     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
396       if (i == *nextrow[k]) { /* i-th row */
397         nzi  = *(nextci[k]+1) - *nextci[k];
398         Jptr = buf_rj[k] + *nextci[k];
399         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
400         nextrow[k]++; nextci[k]++;
401       }
402     }
403     nnz = lnk[0];
404 
405     /* if free space is not available, make more free space */
406     if (current_space->local_remaining<nnz) {
407       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
408       nspacedouble++;
409     }
410     /* copy data into free space, then initialize lnk */
411     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
412     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
413 
414     current_space->array           += nnz;
415     current_space->local_used      += nnz;
416     current_space->local_remaining -= nnz;
417 
418     pti[i+1] = pti[i] + nnz;
419     if (nnz > rmax) rmax = nnz;
420   }
421   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
422   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
423 
424   ierr      = PetscMalloc1(pti[pn]+1,&ptj);CHKERRQ(ierr);
425   ierr      = PetscFreeSpaceContiguous(&free_space,ptj);CHKERRQ(ierr);
426   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
427   if (afill_tmp > afill) afill = afill_tmp;
428   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
429 
430   /* (6) create symbolic parallel matrix Cmpi */
431   /*------------------------------------------*/
432   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
433   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
434   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
435   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
436   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
437   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
438 
439   merge->bi        = pti;      /* Cseq->i */
440   merge->bj        = ptj;      /* Cseq->j */
441   merge->coi       = coi;      /* Co->i   */
442   merge->coj       = coj;      /* Co->j   */
443   merge->buf_ri    = buf_ri;
444   merge->buf_rj    = buf_rj;
445   merge->owners_co = owners_co;
446   merge->destroy   = Cmpi->ops->destroy;
447   merge->duplicate = Cmpi->ops->duplicate;
448 
449   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
450   Cmpi->assembled      = PETSC_FALSE;
451   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;
452   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
453 
454   /* attach the supporting struct to Cmpi for reuse */
455   c           = (Mat_MPIAIJ*)Cmpi->data;
456   c->ptap     = ptap;
457   ptap->api   = api;
458   ptap->apj   = apj;
459   ptap->rmax  = ap_rmax;
460   *C          = Cmpi;
461 
462   /* flag 'scalable' determines which implementations to be used:
463        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
464        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
465   /* set default scalable */
466   ptap->scalable = PETSC_TRUE;
467 
468   ierr = PetscOptionsGetBool(((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);CHKERRQ(ierr);
469   if (!ptap->scalable) {  /* Do dense axpy */
470     ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
471   } else {
472     ierr = PetscCalloc1(ap_rmax+1,&ptap->apa);CHKERRQ(ierr);
473   }
474 
475 #if defined(PETSC_USE_INFO)
476   if (pti[pn] != 0) {
477     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr);
478     ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr);
479   } else {
480     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
481   }
482 #endif
483   PetscFunctionReturn(0);
484 }
485 
486 #undef __FUNCT__
487 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
488 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
489 {
490   PetscErrorCode      ierr;
491   Mat_MPIAIJ          *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
492   Mat_SeqAIJ          *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
493   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
494   Mat_SeqAIJ          *p_loc,*p_oth;
495   Mat_PtAPMPI         *ptap;
496   Mat_Merge_SeqsToMPI *merge;
497   PetscInt            *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
498   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
499   PetscInt            i,j,k,anz,pnz,apnz,nextap,row,*cj;
500   MatScalar           *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
501   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
502   MPI_Comm            comm;
503   PetscMPIInt         size,rank,taga,*len_s;
504   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
505   PetscInt            **buf_ri,**buf_rj;
506   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
507   MPI_Request         *s_waits,*r_waits;
508   MPI_Status          *status;
509   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
510   PetscInt            *api,*apj,*coi,*coj;
511   PetscInt            *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
512   PetscBool           scalable;
513 #if defined(PTAP_PROFILE)
514   PetscLogDouble t0,t1,t2,t3,t4,et2_AP=0.0,et2_PtAP=0.0,t2_0,t2_1,t2_2;
515 #endif
516 
517   PetscFunctionBegin;
518   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
519 #if defined(PTAP_PROFILE)
520   ierr = PetscTime(&t0);CHKERRQ(ierr);
521 #endif
522   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
523   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
524 
525   ptap = c->ptap;
526   if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
527   merge    = ptap->merge;
528   apa      = ptap->apa;
529   scalable = ptap->scalable;
530 
531   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
532   /*-----------------------------------------------------*/
533   if (ptap->reuse == MAT_INITIAL_MATRIX) {
534     /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
535     ptap->reuse = MAT_REUSE_MATRIX;
536   } else { /* update numerical values of P_oth and P_loc */
537     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
538     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
539   }
540 #if defined(PTAP_PROFILE)
541   ierr = PetscTime(&t1);CHKERRQ(ierr);
542 #endif
543 
544   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
545   /*--------------------------------------------------------------*/
546   /* get data from symbolic products */
547   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
548   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
549   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
550   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
551 
552   coi  = merge->coi; coj = merge->coj;
553   ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr);
554 
555   bi     = merge->bi; bj = merge->bj;
556   owners = merge->rowmap->range;
557   ierr   = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr);  /* ba: Cseq->a */
558 
559   api = ptap->api; apj = ptap->apj;
560 
561   if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but fast */
562     ierr = PetscInfo(C,"Using non-scalable dense axpy\n");CHKERRQ(ierr);
563     /*-----------------------------------------------------------------------------------------------------*/
564     for (i=0; i<am; i++) {
565 #if defined(PTAP_PROFILE)
566       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
567 #endif
568       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
569       /*------------------------------------------------------------*/
570       apJ = apj + api[i];
571 
572       /* diagonal portion of A */
573       anz = adi[i+1] - adi[i];
574       adj = ad->j + adi[i];
575       ada = ad->a + adi[i];
576       for (j=0; j<anz; j++) {
577         row = adj[j];
578         pnz = pi_loc[row+1] - pi_loc[row];
579         pj  = pj_loc + pi_loc[row];
580         pa  = pa_loc + pi_loc[row];
581 
582         /* perform dense axpy */
583         valtmp = ada[j];
584         for (k=0; k<pnz; k++) {
585           apa[pj[k]] += valtmp*pa[k];
586         }
587         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
588       }
589 
590       /* off-diagonal portion of A */
591       anz = aoi[i+1] - aoi[i];
592       aoj = ao->j + aoi[i];
593       aoa = ao->a + aoi[i];
594       for (j=0; j<anz; j++) {
595         row = aoj[j];
596         pnz = pi_oth[row+1] - pi_oth[row];
597         pj  = pj_oth + pi_oth[row];
598         pa  = pa_oth + pi_oth[row];
599 
600         /* perform dense axpy */
601         valtmp = aoa[j];
602         for (k=0; k<pnz; k++) {
603           apa[pj[k]] += valtmp*pa[k];
604         }
605         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
606       }
607 #if defined(PTAP_PROFILE)
608       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
609       et2_AP += t2_1 - t2_0;
610 #endif
611 
612       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
613       /*--------------------------------------------------------------*/
614       apnz = api[i+1] - api[i];
615       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
616       pnz = po->i[i+1] - po->i[i];
617       poJ = po->j + po->i[i];
618       pA  = po->a + po->i[i];
619       for (j=0; j<pnz; j++) {
620         row = poJ[j];
621         cnz = coi[row+1] - coi[row];
622         cj  = coj + coi[row];
623         ca  = coa + coi[row];
624         /* perform dense axpy */
625         valtmp = pA[j];
626         for (k=0; k<cnz; k++) {
627           ca[k] += valtmp*apa[cj[k]];
628         }
629         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
630       }
631 
632       /* put the value into Cd (diagonal part) */
633       pnz = pd->i[i+1] - pd->i[i];
634       pdJ = pd->j + pd->i[i];
635       pA  = pd->a + pd->i[i];
636       for (j=0; j<pnz; j++) {
637         row = pdJ[j];
638         cnz = bi[row+1] - bi[row];
639         cj  = bj + bi[row];
640         ca  = ba + bi[row];
641         /* perform dense axpy */
642         valtmp = pA[j];
643         for (k=0; k<cnz; k++) {
644           ca[k] += valtmp*apa[cj[k]];
645         }
646         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
647       }
648 
649       /* zero the current row of A*P */
650       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
651 #if defined(PTAP_PROFILE)
652       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
653       et2_PtAP += t2_2 - t2_1;
654 #endif
655     }
656   } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
657     ierr = PetscInfo(C,"Using scalable sparse axpy\n");CHKERRQ(ierr);
658     /*-----------------------------------------------------------------------------------------*/
659     pA=pa_loc;
660     for (i=0; i<am; i++) {
661 #if defined(PTAP_PROFILE)
662       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
663 #endif
664       /* form i-th sparse row of A*P */
665       apnz = api[i+1] - api[i];
666       apJ  = apj + api[i];
667       /* diagonal portion of A */
668       anz = adi[i+1] - adi[i];
669       adj = ad->j + adi[i];
670       ada = ad->a + adi[i];
671       for (j=0; j<anz; j++) {
672         row    = adj[j];
673         pnz    = pi_loc[row+1] - pi_loc[row];
674         pj     = pj_loc + pi_loc[row];
675         pa     = pa_loc + pi_loc[row];
676         valtmp = ada[j];
677         nextp  = 0;
678         for (k=0; nextp<pnz; k++) {
679           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
680             apa[k] += valtmp*pa[nextp++];
681           }
682         }
683         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
684       }
685       /* off-diagonal portion of A */
686       anz = aoi[i+1] - aoi[i];
687       aoj = ao->j + aoi[i];
688       aoa = ao->a + aoi[i];
689       for (j=0; j<anz; j++) {
690         row    = aoj[j];
691         pnz    = pi_oth[row+1] - pi_oth[row];
692         pj     = pj_oth + pi_oth[row];
693         pa     = pa_oth + pi_oth[row];
694         valtmp = aoa[j];
695         nextp  = 0;
696         for (k=0; nextp<pnz; k++) {
697           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
698             apa[k] += valtmp*pa[nextp++];
699           }
700         }
701         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
702       }
703 #if defined(PTAP_PROFILE)
704       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
705       et2_AP += t2_1 - t2_0;
706 #endif
707 
708       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
709       /*--------------------------------------------------------------*/
710       pnz = pi_loc[i+1] - pi_loc[i];
711       pJ  = pj_loc + pi_loc[i];
712       for (j=0; j<pnz; j++) {
713         nextap = 0;
714         row    = pJ[j]; /* global index */
715         if (row < pcstart || row >=pcend) { /* put the value into Co */
716           row = *poJ;
717           cj  = coj + coi[row];
718           ca  = coa + coi[row]; poJ++;
719         } else {                            /* put the value into Cd */
720           row = *pdJ;
721           cj  = bj + bi[row];
722           ca  = ba + bi[row]; pdJ++;
723         }
724         valtmp = pA[j];
725         for (k=0; nextap<apnz; k++) {
726           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
727         }
728         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
729       }
730       pA += pnz;
731       /* zero the current row info for A*P */
732       ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
733 #if defined(PTAP_PROFILE)
734       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
735       et2_PtAP += t2_2 - t2_1;
736 #endif
737     }
738   }
739 #if defined(PTAP_PROFILE)
740   ierr = PetscTime(&t2);CHKERRQ(ierr);
741 #endif
742 
743   /* 3) send and recv matrix values coa */
744   /*------------------------------------*/
745   buf_ri = merge->buf_ri;
746   buf_rj = merge->buf_rj;
747   len_s  = merge->len_s;
748   ierr   = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
749   ierr   = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
750 
751   ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr);
752   for (proc=0,k=0; proc<size; proc++) {
753     if (!len_s[proc]) continue;
754     i    = merge->owners_co[proc];
755     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
756     k++;
757   }
758   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
759   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
760 
761   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
762   ierr = PetscFree(r_waits);CHKERRQ(ierr);
763   ierr = PetscFree(coa);CHKERRQ(ierr);
764 #if defined(PTAP_PROFILE)
765   ierr = PetscTime(&t3);CHKERRQ(ierr);
766 #endif
767 
768   /* 4) insert local Cseq and received values into Cmpi */
769   /*------------------------------------------------------*/
770   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
771   for (k=0; k<merge->nrecv; k++) {
772     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
773     nrows       = *(buf_ri_k[k]);
774     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
775     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
776   }
777 
778   for (i=0; i<cm; i++) {
779     row  = owners[rank] + i; /* global row index of C_seq */
780     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
781     ba_i = ba + bi[i];
782     bnz  = bi[i+1] - bi[i];
783     /* add received vals into ba */
784     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
785       /* i-th row */
786       if (i == *nextrow[k]) {
787         cnz    = *(nextci[k]+1) - *nextci[k];
788         cj     = buf_rj[k] + *(nextci[k]);
789         ca     = abuf_r[k] + *(nextci[k]);
790         nextcj = 0;
791         for (j=0; nextcj<cnz; j++) {
792           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
793             ba_i[j] += ca[nextcj++];
794           }
795         }
796         nextrow[k]++; nextci[k]++;
797         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
798       }
799     }
800     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
801   }
802   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
803   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
804 
805   ierr = PetscFree(ba);CHKERRQ(ierr);
806   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
807   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
808   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
809 #if defined(PTAP_PROFILE)
810   ierr = PetscTime(&t4);CHKERRQ(ierr);
811   if (rank==1) PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPNum %g/P + %g/PtAP( %g + %g ) + %g/comm + %g/Cloc = %g\n\n",rank,t1-t0,t2-t1,et2_AP,et2_PtAP,t3-t2,t4-t3,t4-t0);CHKERRQ(ierr);
812 #endif
813   PetscFunctionReturn(0);
814 }
815