xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision fa59e80594ae4f5b96fb1a0d88f82abc4bbaec69)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 
12 /*
13 #define DEBUG_MATPTAP
14  */
15 
16 extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
17 #undef __FUNCT__
18 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
19 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
20 {
21   PetscErrorCode       ierr;
22   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data;
23   Mat_PtAPMPI          *ptap=a->ptap;
24 
25   PetscFunctionBegin;
26   if (ptap){
27     Mat_Merge_SeqsToMPI  *merge=ptap->merge;
28     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
29     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
30     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
31     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
32     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
33     if (ptap->api){ierr = PetscFree(ptap->api);CHKERRQ(ierr);}
34     if (ptap->apj){ierr = PetscFree(ptap->apj);CHKERRQ(ierr);}
35     if (merge) {
36       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
37       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
38       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
39       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
40       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
41       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
42       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
43       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
44       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
45       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
46       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
47       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
48       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
49       ierr = merge->destroy(A);CHKERRQ(ierr);
50       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
51     }
52     ierr = PetscFree(ptap);CHKERRQ(ierr);
53   }
54 
55   PetscFunctionReturn(0);
56 }
57 
58 #undef __FUNCT__
59 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP"
60 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
61 {
62   PetscErrorCode       ierr;
63   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data;
64   Mat_PtAPMPI          *ptap = a->ptap;
65   Mat_Merge_SeqsToMPI  *merge = ptap->merge;
66 
67   PetscFunctionBegin;
68   ierr = (*merge->duplicate)(A,op,M);CHKERRQ(ierr);
69   (*M)->ops->destroy   = merge->destroy;
70   (*M)->ops->duplicate = merge->duplicate;
71   PetscFunctionReturn(0);
72 }
73 
74 #undef __FUNCT__
75 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
76 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
77 {
78   PetscErrorCode ierr;
79 
80   PetscFunctionBegin;
81   if (scall == MAT_INITIAL_MATRIX){
82     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
83     ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
84     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
85   }
86   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
87   ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr);
88   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
89   PetscFunctionReturn(0);
90 }
91 
92 #undef __FUNCT__
93 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
94 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
95 {
96   PetscErrorCode       ierr;
97   Mat                  Cmpi;
98   Mat_PtAPMPI          *ptap;
99   PetscFreeSpaceList   free_space=PETSC_NULL,current_space=PETSC_NULL;
100   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
101   Mat_SeqAIJ           *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
102   Mat_SeqAIJ           *p_loc,*p_oth;
103   PetscInt             *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
104   PetscInt             *adi=ad->i,*aj,*aoi=ao->i,nnz;
105   PetscInt             nlnk,*lnk,*owners_co,*coi,*coj,i,k,pnz,row;
106   PetscInt             am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
107   PetscBT              lnkbt;
108   MPI_Comm             comm=((PetscObject)A)->comm;
109   PetscMPIInt          size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
110   PetscInt             **buf_rj,**buf_ri,**buf_ri_k;
111   PetscInt             len,proc,*dnz,*onz,*owners;
112   PetscInt             nzi,*bi,*bj;
113   PetscInt             nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
114   MPI_Request          *swaits,*rwaits;
115   MPI_Status           *sstatus,rstatus;
116   Mat_Merge_SeqsToMPI  *merge;
117   PetscInt             *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
118   PetscReal            afill=1.0,afill_tmp;
119   PetscInt             rstart = P->cmap->rstart,rmax;
120   PetscScalar          *vals;
121 
122   PetscFunctionBegin;
123   /* check if matrix local sizes are compatible */
124   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend){
125     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
126   }
127   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend){
128     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
129   }
130 
131   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
132   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
133 
134   /* create struct Mat_PtAPMPI and attached it to C later */
135   ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr);
136   ptap->reuse    = MAT_INITIAL_MATRIX;
137 
138 
139   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
140   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
141 #if defined(DEBUG_MATPTAP)
142    if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] P_oth is done...\n",rank);
143 #endif
144 
145   /* get P_loc by taking all local rows of P */
146   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
147 #if defined(DEBUG_MATPTAP)
148   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
149   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] P_loc is done...\n",rank);
150 #endif
151 
152   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
153   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
154   pi_loc = p_loc->i; pj_loc = p_loc->j;
155   pi_oth = p_oth->i; pj_oth = p_oth->j;
156 
157   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
158   /*-------------------------------------------------------------------*/
159   ierr = PetscMalloc((am+1)*sizeof(PetscInt),&api);CHKERRQ(ierr);
160   api[0] = 0;
161 
162   /* create and initialize a linked list */
163   nlnk = pN+1;
164 #if defined(DEBUG_MATPTAP)
165   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
166   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call PetscLLCreate(), pN %D, P_rmax %D %D; Annz %D\n",rank,pN,p_loc->rmax,p_oth->rmax,adi[am]+aoi[am]);
167 #endif
168   ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
169 #if defined(DEBUG_MATPTAP)
170   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
171   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call PetscLLCreate() is done\n",rank);
172 #endif
173 
174   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
175   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
176   current_space = free_space;
177 
178   for (i=0; i<am; i++) {
179     apnz = 0;
180     /* diagonal portion of A */
181     nzi = adi[i+1] - adi[i];
182     aj  = ad->j + adi[i];
183     for (j=0; j<nzi; j++){
184       row  = aj[j];
185       pnz  = pi_loc[row+1] - pi_loc[row];
186       Jptr = pj_loc + pi_loc[row];
187       /* add non-zero cols of P into the sorted linked list lnk */
188       ierr = PetscLLAddSorted(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
189       apnz += nlnk;
190     }
191     /* off-diagonal portion of A */
192     nzi = aoi[i+1] - aoi[i];
193     aj  = ao->j + aoi[i];
194     for (j=0; j<nzi; j++){
195       row = aj[j];
196       pnz = pi_oth[row+1] - pi_oth[row];
197       Jptr  = pj_oth + pi_oth[row];
198       ierr = PetscLLAddSorted(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
199       apnz += nlnk;
200     }
201 
202     api[i+1] = api[i] + apnz;
203     if (ap_rmax < apnz) ap_rmax = apnz;
204 
205     /* if free space is not available, double the total space in the list */
206     if (current_space->local_remaining<apnz) {
207       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
208       nspacedouble++;
209     }
210 
211     /* Copy data into free space, then initialize lnk */
212     ierr = PetscLLClean(pN,pN,apnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
213     current_space->array           += apnz;
214     current_space->local_used      += apnz;
215     current_space->local_remaining -= apnz;
216   }
217   /* Allocate space for apj, initialize apj, and */
218   /* destroy list of free space and other temporary array(s) */
219   ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&apj);CHKERRQ(ierr);
220   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
221   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
222   if (afill_tmp > afill) afill = afill_tmp;
223 
224   /* determine symbolic Co=(p->B)^T*AP - send to others */
225   /*----------------------------------------------------*/
226   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
227 
228   /* then, compute symbolic Co = (p->B)^T*AP */
229   pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
230                          >= (num of nonzero rows of C_seq) - pn */
231   ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr);
232   coi[0] = 0;
233 
234   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
235   nnz           = fill*(poti[pon] + api[am]);
236   ierr          = PetscFreeSpaceGet(nnz,&free_space);
237   current_space = free_space;
238 
239   for (i=0; i<pon; i++) {
240     nnz = 0;
241     pnz = poti[i+1] - poti[i];
242     ptJ = potj + poti[i];
243     for (j=0; j<pnz; j++){
244       row  = ptJ[j]; /* row of AP == col of Pot */
245       apnz = api[row+1] - api[row];
246       Jptr = apj + api[row];
247       /* add non-zero cols of AP into the sorted linked list lnk */
248       ierr = PetscLLAddSorted(apnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
249       nnz += nlnk;
250     }
251 
252     /* If free space is not available, double the total space in the list */
253     if (current_space->local_remaining<nnz) {
254       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
255       nspacedouble++;
256     }
257 
258     /* Copy data into free space, and zero out denserows */
259     ierr = PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
260     current_space->array           += nnz;
261     current_space->local_used      += nnz;
262     current_space->local_remaining -= nnz;
263     coi[i+1] = coi[i] + nnz;
264   }
265   ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr);
266   ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
267   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
268   if (afill_tmp > afill) afill = afill_tmp;
269   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
270 
271 #if defined(DEBUG_MATPTAP)
272   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
273   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] local Co is done\n",rank);
274 #endif
275 
276   /* send j-array (coj) of Co to other processors */
277   /*----------------------------------------------*/
278   /* determine row ownership */
279   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
280   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
281   merge->rowmap->n = pn;
282   merge->rowmap->bs = 1;
283   ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
284   owners = merge->rowmap->range;
285 
286   /* determine the number of messages to send, their lengths */
287   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
288   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
289   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
290   len_s = merge->len_s;
291   merge->nsend = 0;
292 
293   ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr);
294   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
295 
296   proc = 0;
297   for (i=0; i<pon; i++){
298     while (prmap[i] >= owners[proc+1]) proc++;
299     len_si[proc]++;  /* num of rows in Co to be sent to [proc] */
300     len_s[proc] += coi[i+1] - coi[i];
301   }
302 
303   len   = 0;  /* max length of buf_si[] */
304   owners_co[0] = 0;
305   for (proc=0; proc<size; proc++){
306     owners_co[proc+1] = owners_co[proc] + len_si[proc];
307     if (len_si[proc]){
308       merge->nsend++;
309       len_si[proc] = 2*(len_si[proc] + 1);
310       len += len_si[proc];
311     }
312   }
313 
314   /* determine the number and length of messages to receive for coi and coj  */
315   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
316   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
317 
318   /* post the Irecv and Isend of coj */
319   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
320   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
321   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr);
322   for (proc=0, k=0; proc<size; proc++){
323     if (!len_s[proc]) continue;
324     i = owners_co[proc];
325     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
326     k++;
327   }
328 
329   /* receives and sends of coj are complete */
330   ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr);
331   for (i=0; i<merge->nrecv; i++){
332     PetscMPIInt icompleted;
333     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
334   }
335   ierr = PetscFree(rwaits);CHKERRQ(ierr);
336   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
337 
338   /* send and recv coi */
339   /*-------------------*/
340   ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
341   ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
342   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
343   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
344   for (proc=0,k=0; proc<size; proc++){
345     if (!len_s[proc]) continue;
346     /* form outgoing message for i-structure:
347          buf_si[0]:                 nrows to be sent
348                [1:nrows]:           row index (global)
349                [nrows+1:2*nrows+1]: i-structure index
350     */
351     /*-------------------------------------------*/
352     nrows = len_si[proc]/2 - 1;
353     buf_si_i    = buf_si + nrows+1;
354     buf_si[0]   = nrows;
355     buf_si_i[0] = 0;
356     nrows = 0;
357     for (i=owners_co[proc]; i<owners_co[proc+1]; i++){
358       nzi = coi[i+1] - coi[i];
359       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
360       buf_si[nrows+1] =prmap[i] -owners[proc]; /* local row index */
361       nrows++;
362     }
363     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
364     k++;
365     buf_si += len_si[proc];
366   }
367   i = merge->nrecv;
368   while (i--) {
369     PetscMPIInt icompleted;
370     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
371   }
372   ierr = PetscFree(rwaits);CHKERRQ(ierr);
373   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
374   /*
375   ierr = PetscInfo2(A,"nsend: %d, nrecv: %d\n",merge->nsend,merge->nrecv);CHKERRQ(ierr);
376   for (i=0; i<merge->nrecv; i++){
377     ierr = PetscInfo3(A,"recv len_ri=%d, len_rj=%d from [%d]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr);
378   }
379   */
380   ierr = PetscFree(len_si);CHKERRQ(ierr);
381   ierr = PetscFree(len_ri);CHKERRQ(ierr);
382   ierr = PetscFree(swaits);CHKERRQ(ierr);
383   ierr = PetscFree(sstatus);CHKERRQ(ierr);
384   ierr = PetscFree(buf_s);CHKERRQ(ierr);
385 
386 #if defined(DEBUG_MATPTAP)
387   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
388   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] Co is done\n",rank);
389 #endif
390 
391   /* compute the local portion of C (mpi mat) */
392   /*------------------------------------------*/
393   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
394 
395   /* allocate bi array and free space for accumulating nonzero column info */
396   ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
397   bi[0] = 0;
398 
399   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
400   nnz           = fill*(pi_loc[pm] + api[am]);
401   ierr          = PetscFreeSpaceGet(nnz,&free_space);
402   current_space = free_space;
403 
404   ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr);
405   for (k=0; k<merge->nrecv; k++){
406     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
407     nrows       = *buf_ri_k[k];
408     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
409     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
410   }
411   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
412   rmax = 0;
413   for (i=0; i<pn; i++) {
414     /* add pdt[i,:]*AP into lnk */
415     nnz = 0;
416     pnz = pdti[i+1] - pdti[i];
417     ptJ = pdtj + pdti[i];
418     for (j=0; j<pnz; j++){
419       row  = ptJ[j];  /* row of AP == col of Pt */
420       apnz = api[row+1] - api[row];
421       Jptr = apj + api[row];
422       /* add non-zero cols of AP into the sorted linked list lnk */
423       ierr = PetscLLAddSorted(apnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
424       nnz += nlnk;
425     }
426     /* add received col data into lnk */
427     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
428       if (i == *nextrow[k]) { /* i-th row */
429         nzi = *(nextci[k]+1) - *nextci[k];
430         Jptr  = buf_rj[k] + *nextci[k];
431         ierr = PetscLLAddSorted(nzi,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
432         nnz += nlnk;
433         nextrow[k]++; nextci[k]++;
434       }
435     }
436 
437     /* if free space is not available, make more free space */
438     if (current_space->local_remaining<nnz) {
439       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
440       nspacedouble++;
441     }
442     /* copy data into free space, then initialize lnk */
443     ierr = PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
444     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
445     current_space->array           += nnz;
446     current_space->local_used      += nnz;
447     current_space->local_remaining -= nnz;
448     bi[i+1] = bi[i] + nnz;
449     if (nnz > rmax) rmax = nnz;
450   }
451   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
452   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
453 
454   ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
455   ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
456   afill_tmp = (PetscReal)bi[pn]/(pi_loc[pm] + api[am]+1);
457   if (afill_tmp > afill) afill = afill_tmp;
458   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
459 
460   /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part - check runex55_SA ?  */
461   /*------------------------------------------------------------------------------------------------------*/
462   ierr = PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
463   ierr = PetscMemzero(vals,rmax*sizeof(PetscScalar));CHKERRQ(ierr);
464 
465   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
466   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
467   ierr = MatSetBlockSizes(Cmpi,P->cmap->bs,P->cmap->bs);CHKERRQ(ierr);
468   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
469   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
470   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
471 
472   for (i=0; i<pn; i++){
473     row = i + rstart;
474     nnz = bi[i+1] - bi[i];
475     Jptr = bj + bi[i];
476     ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr);
477   }
478   ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
479   ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
480   ierr = PetscFree(vals);CHKERRQ(ierr);
481 
482   merge->bi            = bi;
483   merge->bj            = bj;
484   merge->coi           = coi;
485   merge->coj           = coj;
486   merge->buf_ri        = buf_ri;
487   merge->buf_rj        = buf_rj;
488   merge->owners_co     = owners_co;
489   merge->destroy       = Cmpi->ops->destroy;
490   merge->duplicate     = Cmpi->ops->duplicate;
491 
492   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
493   /* Cmpi->assembled      = PETSC_FALSE; */
494   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;
495   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
496 
497   /* attach the supporting struct to Cmpi for reuse */
498   c = (Mat_MPIAIJ*)Cmpi->data;
499   c->ptap        = ptap;
500   ptap->api      = api;
501   ptap->apj      = apj;
502   ptap->merge    = merge;
503   ptap->rmax     = ap_rmax;
504 
505   *C = Cmpi;
506 #if defined(PETSC_USE_INFO)
507   if (bi[pn] != 0) {
508     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
509     ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr);
510   } else {
511     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
512   }
513 #endif
514   PetscFunctionReturn(0);
515 }
516 
517 #undef __FUNCT__
518 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
519 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
520 {
521   PetscErrorCode       ierr;
522   Mat_Merge_SeqsToMPI  *merge;
523   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
524   Mat_SeqAIJ           *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
525   Mat_SeqAIJ           *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
526   Mat_SeqAIJ           *p_loc,*p_oth;
527   Mat_PtAPMPI          *ptap;
528   PetscInt             *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
529   PetscInt             *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
530   PetscInt             i,j,k,anz,pnz,apnz,nextap,row,*cj;
531   MatScalar            *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
532   PetscInt             am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
533   MPI_Comm             comm=((PetscObject)C)->comm;
534   PetscMPIInt          size,rank,taga,*len_s;
535   PetscInt             *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
536   PetscInt             **buf_ri,**buf_rj;
537   PetscInt             cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
538   MPI_Request          *s_waits,*r_waits;
539   MPI_Status           *status;
540   MatScalar            **abuf_r,*ba_i,*pA,*coa,*ba;
541   PetscInt             *api,*apj,*coi,*coj;
542   PetscInt             *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
543   PetscBool            scalable=PETSC_TRUE;
544 
545   PetscFunctionBegin;
546   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
547   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
548 
549   ptap  = c->ptap;
550   if (!ptap) SETERRQ(((PetscObject)C)->comm,PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
551   merge = ptap->merge;
552 
553   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
554   /*--------------------------------------------------*/
555   if (ptap->reuse == MAT_INITIAL_MATRIX){
556     ptap->reuse = MAT_REUSE_MATRIX;
557   } else { /* update numerical values of P_oth and P_loc */
558     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
559     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
560   }
561 
562   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
563   /*--------------------------------------------------------------*/
564   /* get data from symbolic products */
565   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
566   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
567   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
568   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
569 
570   coi = merge->coi; coj = merge->coj;
571   ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr);
572   ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr);
573 
574   bi     = merge->bi; bj = merge->bj;
575   owners = merge->rowmap->range;
576   ierr   = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr);
577   ierr   = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr);
578 
579   api = ptap->api; apj = ptap->apj;
580 
581   /* flag 'scalable' determines which implementations to be used:
582        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
583        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
584   /* set default scalable */
585   ierr = PetscOptionsGetBool(((PetscObject)C)->prefix,"-matptap_scalable",&scalable,PETSC_NULL);CHKERRQ(ierr);
586   if (!scalable){  /* Do dense axpy */
587     /*--------------------------------------------------*/
588     /* malloc apa to store dense row A[i,:]*P - nonscalable! */
589     ierr = PetscMalloc((P->cmap->N)*sizeof(PetscScalar),&apa);CHKERRQ(ierr);
590     ierr = PetscMemzero(apa,P->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr);
591 
592     for (i=0; i<am; i++) {
593       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
594       /*------------------------------------------------------------*/
595       apJ = apj + api[i];
596 
597       /* diagonal portion of A */
598       anz = adi[i+1] - adi[i];
599       adj = ad->j + adi[i];
600       ada = ad->a + adi[i];
601       for (j=0; j<anz; j++) {
602         row = adj[j];
603         pnz = pi_loc[row+1] - pi_loc[row];
604         pj  = pj_loc + pi_loc[row];
605         pa  = pa_loc + pi_loc[row];
606 
607         /* perform dense axpy */
608         valtmp = ada[j];
609         for (k=0; k<pnz; k++){
610           apa[pj[k]] += valtmp*pa[k];
611         }
612         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
613       }
614 
615       /* off-diagonal portion of A */
616       anz = aoi[i+1] - aoi[i];
617       aoj = ao->j + aoi[i];
618       aoa = ao->a + aoi[i];
619       for (j=0; j<anz; j++) {
620         row = aoj[j];
621         pnz = pi_oth[row+1] - pi_oth[row];
622         pj  = pj_oth + pi_oth[row];
623         pa  = pa_oth + pi_oth[row];
624 
625         /* perform dense axpy */
626         valtmp = aoa[j];
627         for (k=0; k<pnz; k++){
628           apa[pj[k]] += valtmp*pa[k];
629         }
630         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
631       }
632 
633       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
634       /*--------------------------------------------------------------*/
635       apnz = api[i+1] - api[i];
636       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
637       pnz = po->i[i+1] - po->i[i];
638       poJ = po->j + po->i[i];
639       pA  = po->a + po->i[i];
640       for (j=0; j<pnz; j++){
641         row = poJ[j];
642         cnz = coi[row+1] - coi[row];
643         cj  = coj + coi[row];
644         ca  = coa + coi[row];
645         /* perform dense axpy */
646         valtmp = pA[j];
647         for (k=0; k<cnz; k++) {
648           ca[k] += valtmp*apa[cj[k]];
649         }
650         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
651       }
652 
653       /* put the value into Cd (diagonal part) */
654       pnz = pd->i[i+1] - pd->i[i];
655       pdJ = pd->j + pd->i[i];
656       pA  = pd->a + pd->i[i];
657       for (j=0; j<pnz; j++){
658         row = pdJ[j];
659         cnz = bi[row+1] - bi[row];
660         cj  = bj + bi[row];
661         ca  = ba + bi[row];
662         /* perform dense axpy */
663         valtmp = pA[j];
664         for (k=0; k<cnz; k++) {
665           ca[k] += valtmp*apa[cj[k]];
666         }
667         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
668       }
669 
670       /* zero the current row of A*P */
671       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
672     }
673   } else {/* Perform sparse axpy */
674     /*----------------------------------------------------*/
675     /* malloc apa to store sparse row A[i,:]*P */
676     ierr = PetscMalloc((ptap->rmax+1)*sizeof(MatScalar),&apa);CHKERRQ(ierr);
677     ierr = PetscMemzero(apa,ptap->rmax*sizeof(MatScalar));CHKERRQ(ierr);
678 
679     pA=pa_loc;
680     for (i=0; i<am; i++) {
681       /* form i-th sparse row of A*P */
682       apnz = api[i+1] - api[i];
683       apJ  = apj + api[i];
684       /* diagonal portion of A */
685       anz = adi[i+1] - adi[i];
686       adj = ad->j + adi[i];
687       ada = ad->a + adi[i];
688       for (j=0; j<anz; j++) {
689         row = adj[j];
690         pnz = pi_loc[row+1] - pi_loc[row];
691         pj  = pj_loc + pi_loc[row];
692         pa  = pa_loc + pi_loc[row];
693         valtmp = ada[j];
694         nextp  = 0;
695         for (k=0; nextp<pnz; k++) {
696           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
697             apa[k] += valtmp*pa[nextp++];
698           }
699         }
700         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
701       }
702       /* off-diagonal portion of A */
703       anz = aoi[i+1] - aoi[i];
704       aoj = ao->j + aoi[i];
705       aoa = ao->a + aoi[i];
706       for (j=0; j<anz; j++) {
707         row = aoj[j];
708         pnz = pi_oth[row+1] - pi_oth[row];
709         pj  = pj_oth + pi_oth[row];
710         pa  = pa_oth + pi_oth[row];
711         valtmp = aoa[j];
712         nextp  = 0;
713         for (k=0; nextp<pnz; k++) {
714           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
715             apa[k] += valtmp*pa[nextp++];
716           }
717         }
718         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
719       }
720 
721       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
722       /*--------------------------------------------------------------*/
723       pnz = pi_loc[i+1] - pi_loc[i];
724       pJ  = pj_loc + pi_loc[i];
725       for (j=0; j<pnz; j++) {
726         nextap = 0;
727         row    = pJ[j]; /* global index */
728         if (row < pcstart || row >=pcend) { /* put the value into Co */
729           row = *poJ;
730           cj  = coj + coi[row];
731           ca  = coa + coi[row]; poJ++;
732         } else {                            /* put the value into Cd */
733           row  = *pdJ;
734           cj   = bj + bi[row];
735           ca   = ba + bi[row]; pdJ++;
736         }
737         valtmp = pA[j];
738         for (k=0; nextap<apnz; k++) {
739           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
740         }
741         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
742       }
743       pA += pnz;
744       /* zero the current row info for A*P */
745       ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
746     }
747   }
748   ierr = PetscFree(apa);CHKERRQ(ierr);
749 
750   /* 3) send and recv matrix values coa */
751   /*------------------------------------*/
752   buf_ri = merge->buf_ri;
753   buf_rj = merge->buf_rj;
754   len_s  = merge->len_s;
755   ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
756   ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
757 
758   ierr = PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);CHKERRQ(ierr);
759   for (proc=0,k=0; proc<size; proc++){
760     if (!len_s[proc]) continue;
761     i = merge->owners_co[proc];
762     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
763     k++;
764   }
765   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
766   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
767 
768   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
769   ierr = PetscFree(r_waits);CHKERRQ(ierr);
770   ierr = PetscFree(coa);CHKERRQ(ierr);
771 
772   /* 4) insert local Cseq and received values into Cmpi */
773   /*------------------------------------------------------*/
774   ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr);
775   for (k=0; k<merge->nrecv; k++){
776     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
777     nrows       = *(buf_ri_k[k]);
778     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
779     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
780   }
781 
782   for (i=0; i<cm; i++) {
783     row = owners[rank] + i; /* global row index of C_seq */
784     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
785     ba_i = ba + bi[i];
786     bnz  = bi[i+1] - bi[i];
787     /* add received vals into ba */
788     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
789       /* i-th row */
790       if (i == *nextrow[k]) {
791         cnz = *(nextci[k]+1) - *nextci[k];
792         cj  = buf_rj[k] + *(nextci[k]);
793         ca  = abuf_r[k] + *(nextci[k]);
794         nextcj = 0;
795         for (j=0; nextcj<cnz; j++){
796           if (bj_i[j] == cj[nextcj]){ /* bcol == ccol */
797             ba_i[j] += ca[nextcj++];
798           }
799         }
800         nextrow[k]++; nextci[k]++;
801         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
802       }
803     }
804     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
805   }
806   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
807   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
808 
809   ierr = PetscFree(ba);CHKERRQ(ierr);
810   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
811   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
812   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
813 #if defined(PETSC_USE_INFO)
814   if (scalable){
815     ierr = PetscInfo(C,"Use scalable sparse axpy\n");CHKERRQ(ierr);
816   } else {
817     ierr = PetscInfo(C,"Use non-scalable dense axpy\n");CHKERRQ(ierr);
818   }
819 #endif
820   PetscFunctionReturn(0);
821 }
822