xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision a201590f66d78a0ee8e1bf4e6f9fcce39f32a809)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 
12 /*
13 #define DEBUG_MATPTAP
14  */
15 
16 extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
17 #undef __FUNCT__
18 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
19 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
20 {
21   PetscErrorCode       ierr;
22   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data;
23   Mat_PtAPMPI          *ptap=a->ptap;
24 
25   PetscFunctionBegin;
26   if (ptap){
27     Mat_Merge_SeqsToMPI  *merge=ptap->merge;
28     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
29     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
30     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
31     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
32     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
33     if (ptap->api){ierr = PetscFree(ptap->api);CHKERRQ(ierr);}
34     if (ptap->apj){ierr = PetscFree(ptap->apj);CHKERRQ(ierr);}
35     if (merge) {
36       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
37       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
38       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
39       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
40       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
41       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
42       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
43       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
44       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
45       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
46       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
47       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
48       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
49       ierr = merge->destroy(A);CHKERRQ(ierr);
50       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
51     }
52     ierr = PetscFree(ptap);CHKERRQ(ierr);
53   }
54 
55   PetscFunctionReturn(0);
56 }
57 
58 #undef __FUNCT__
59 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP"
60 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
61 {
62   PetscErrorCode       ierr;
63   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data;
64   Mat_PtAPMPI          *ptap = a->ptap;
65   Mat_Merge_SeqsToMPI  *merge = ptap->merge;
66 
67   PetscFunctionBegin;
68   ierr = (*merge->duplicate)(A,op,M);CHKERRQ(ierr);
69   (*M)->ops->destroy   = merge->destroy;
70   (*M)->ops->duplicate = merge->duplicate;
71   PetscFunctionReturn(0);
72 }
73 
74 #undef __FUNCT__
75 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ"
76 PetscErrorCode MatPtAPSymbolic_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
77 {
78   PetscErrorCode ierr;
79 
80   PetscFunctionBegin;
81   if (!P->ops->ptapsymbolic_mpiaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
82   ierr = (*P->ops->ptapsymbolic_mpiaij)(A,P,fill,C);CHKERRQ(ierr);
83   PetscFunctionReturn(0);
84 }
85 
86 #undef __FUNCT__
87 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ"
88 PetscErrorCode MatPtAPNumeric_MPIAIJ(Mat A,Mat P,Mat C)
89 {
90   PetscErrorCode ierr;
91 
92   PetscFunctionBegin;
93   if (!P->ops->ptapnumeric_mpiaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
94   ierr = (*P->ops->ptapnumeric_mpiaij)(A,P,C);CHKERRQ(ierr);
95   PetscFunctionReturn(0);
96 }
97 
98 #undef __FUNCT__
99 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
100 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
101 {
102   PetscErrorCode       ierr;
103   Mat                  Cmpi;
104   Mat_PtAPMPI          *ptap;
105   PetscFreeSpaceList   free_space=PETSC_NULL,current_space=PETSC_NULL;
106   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
107   Mat_SeqAIJ           *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
108   Mat_SeqAIJ           *p_loc,*p_oth;
109   PetscInt             *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
110   PetscInt             *adi=ad->i,*aj,*aoi=ao->i,nnz;
111   PetscInt             nlnk,*lnk,*owners_co,*coi,*coj,i,k,pnz,row;
112   PetscInt             am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
113   PetscBT              lnkbt;
114   MPI_Comm             comm=((PetscObject)A)->comm;
115   PetscMPIInt          size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
116   PetscInt             **buf_rj,**buf_ri,**buf_ri_k;
117   PetscInt             len,proc,*dnz,*onz,*owners;
118   PetscInt             nzi,*bi,*bj;
119   PetscInt             nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
120   MPI_Request          *swaits,*rwaits;
121   MPI_Status           *sstatus,rstatus;
122   Mat_Merge_SeqsToMPI  *merge;
123   PetscInt             *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
124   PetscReal            afill=1.0,afill_tmp;
125   PetscInt             rstart = P->cmap->rstart,rmax;
126   PetscScalar          *vals;
127 
128   PetscFunctionBegin;
129   /* check if matrix local sizes are compatible */
130   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend){
131     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
132   }
133   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend){
134     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
135   }
136 
137   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
138   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
139 
140   /* create struct Mat_PtAPMPI and attached it to C later */
141   ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr);
142   ptap->reuse    = MAT_INITIAL_MATRIX;
143 
144 
145   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
146   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
147 #if defined(DEBUG_MATPTAP)
148    if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] P_oth is done...\n",rank);
149 #endif
150 
151   /* get P_loc by taking all local rows of P */
152   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
153 #if defined(DEBUG_MATPTAP)
154   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
155   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] P_loc is done...\n",rank);
156 #endif
157 
158   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
159   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
160   pi_loc = p_loc->i; pj_loc = p_loc->j;
161   pi_oth = p_oth->i; pj_oth = p_oth->j;
162 
163   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
164   /*-------------------------------------------------------------------*/
165   ierr = PetscMalloc((am+1)*sizeof(PetscInt),&api);CHKERRQ(ierr);
166   api[0] = 0;
167 
168   /* create and initialize a linked list */
169   nlnk = pN+1;
170 #if defined(DEBUG_MATPTAP)
171   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
172   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call PetscLLCreate(), pN %D, P_rmax %D %D; Annz %D\n",rank,pN,p_loc->rmax,p_oth->rmax,adi[am]+aoi[am]);
173 #endif
174   ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
175 #if defined(DEBUG_MATPTAP)
176   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
177   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call PetscLLCreate() is done\n",rank);
178 #endif
179 
180   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
181   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
182   current_space = free_space;
183 
184   for (i=0; i<am; i++) {
185     apnz = 0;
186     /* diagonal portion of A */
187     nzi = adi[i+1] - adi[i];
188     aj  = ad->j + adi[i];
189     for (j=0; j<nzi; j++){
190       row  = aj[j];
191       pnz  = pi_loc[row+1] - pi_loc[row];
192       Jptr = pj_loc + pi_loc[row];
193       /* add non-zero cols of P into the sorted linked list lnk */
194       ierr = PetscLLAddSorted(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
195       apnz += nlnk;
196     }
197     /* off-diagonal portion of A */
198     nzi = aoi[i+1] - aoi[i];
199     aj  = ao->j + aoi[i];
200     for (j=0; j<nzi; j++){
201       row = aj[j];
202       pnz = pi_oth[row+1] - pi_oth[row];
203       Jptr  = pj_oth + pi_oth[row];
204       ierr = PetscLLAddSorted(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
205       apnz += nlnk;
206     }
207 
208     api[i+1] = api[i] + apnz;
209     if (ap_rmax < apnz) ap_rmax = apnz;
210 
211     /* if free space is not available, double the total space in the list */
212     if (current_space->local_remaining<apnz) {
213       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
214       nspacedouble++;
215     }
216 
217     /* Copy data into free space, then initialize lnk */
218     ierr = PetscLLClean(pN,pN,apnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
219     current_space->array           += apnz;
220     current_space->local_used      += apnz;
221     current_space->local_remaining -= apnz;
222   }
223   /* Allocate space for apj, initialize apj, and */
224   /* destroy list of free space and other temporary array(s) */
225   ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&apj);CHKERRQ(ierr);
226   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
227   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
228   if (afill_tmp > afill) afill = afill_tmp;
229 
230   /* determine symbolic Co=(p->B)^T*AP - send to others */
231   /*----------------------------------------------------*/
232   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
233 
234   /* then, compute symbolic Co = (p->B)^T*AP */
235   pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
236                          >= (num of nonzero rows of C_seq) - pn */
237   ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr);
238   coi[0] = 0;
239 
240   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
241   nnz           = fill*(poti[pon] + api[am]);
242   ierr          = PetscFreeSpaceGet(nnz,&free_space);
243   current_space = free_space;
244 
245   for (i=0; i<pon; i++) {
246     nnz = 0;
247     pnz = poti[i+1] - poti[i];
248     ptJ = potj + poti[i];
249     for (j=0; j<pnz; j++){
250       row  = ptJ[j]; /* row of AP == col of Pot */
251       apnz = api[row+1] - api[row];
252       Jptr = apj + api[row];
253       /* add non-zero cols of AP into the sorted linked list lnk */
254       ierr = PetscLLAddSorted(apnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
255       nnz += nlnk;
256     }
257 
258     /* If free space is not available, double the total space in the list */
259     if (current_space->local_remaining<nnz) {
260       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
261       nspacedouble++;
262     }
263 
264     /* Copy data into free space, and zero out denserows */
265     ierr = PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
266     current_space->array           += nnz;
267     current_space->local_used      += nnz;
268     current_space->local_remaining -= nnz;
269     coi[i+1] = coi[i] + nnz;
270   }
271   ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr);
272   ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
273   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
274   if (afill_tmp > afill) afill = afill_tmp;
275   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
276 
277 #if defined(DEBUG_MATPTAP)
278   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
279   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] local Co is done\n",rank);
280 #endif
281 
282   /* send j-array (coj) of Co to other processors */
283   /*----------------------------------------------*/
284   /* determine row ownership */
285   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
286   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
287   merge->rowmap->n = pn;
288   merge->rowmap->bs = 1;
289   ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
290   owners = merge->rowmap->range;
291 
292   /* determine the number of messages to send, their lengths */
293   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
294   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
295   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
296   len_s = merge->len_s;
297   merge->nsend = 0;
298 
299   ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr);
300   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
301 
302   proc = 0;
303   for (i=0; i<pon; i++){
304     while (prmap[i] >= owners[proc+1]) proc++;
305     len_si[proc]++;  /* num of rows in Co to be sent to [proc] */
306     len_s[proc] += coi[i+1] - coi[i];
307   }
308 
309   len   = 0;  /* max length of buf_si[] */
310   owners_co[0] = 0;
311   for (proc=0; proc<size; proc++){
312     owners_co[proc+1] = owners_co[proc] + len_si[proc];
313     if (len_si[proc]){
314       merge->nsend++;
315       len_si[proc] = 2*(len_si[proc] + 1);
316       len += len_si[proc];
317     }
318   }
319 
320   /* determine the number and length of messages to receive for coi and coj  */
321   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
322   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
323 
324   /* post the Irecv and Isend of coj */
325   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
326   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
327   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr);
328   for (proc=0, k=0; proc<size; proc++){
329     if (!len_s[proc]) continue;
330     i = owners_co[proc];
331     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
332     k++;
333   }
334 
335   /* receives and sends of coj are complete */
336   ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr);
337   for (i=0; i<merge->nrecv; i++){
338     PetscMPIInt icompleted;
339     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
340   }
341   ierr = PetscFree(rwaits);CHKERRQ(ierr);
342   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
343 
344   /* send and recv coi */
345   /*-------------------*/
346   ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
347   ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
348   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
349   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
350   for (proc=0,k=0; proc<size; proc++){
351     if (!len_s[proc]) continue;
352     /* form outgoing message for i-structure:
353          buf_si[0]:                 nrows to be sent
354                [1:nrows]:           row index (global)
355                [nrows+1:2*nrows+1]: i-structure index
356     */
357     /*-------------------------------------------*/
358     nrows = len_si[proc]/2 - 1;
359     buf_si_i    = buf_si + nrows+1;
360     buf_si[0]   = nrows;
361     buf_si_i[0] = 0;
362     nrows = 0;
363     for (i=owners_co[proc]; i<owners_co[proc+1]; i++){
364       nzi = coi[i+1] - coi[i];
365       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
366       buf_si[nrows+1] =prmap[i] -owners[proc]; /* local row index */
367       nrows++;
368     }
369     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
370     k++;
371     buf_si += len_si[proc];
372   }
373   i = merge->nrecv;
374   while (i--) {
375     PetscMPIInt icompleted;
376     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
377   }
378   ierr = PetscFree(rwaits);CHKERRQ(ierr);
379   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
380   /*
381   ierr = PetscInfo2(A,"nsend: %d, nrecv: %d\n",merge->nsend,merge->nrecv);CHKERRQ(ierr);
382   for (i=0; i<merge->nrecv; i++){
383     ierr = PetscInfo3(A,"recv len_ri=%d, len_rj=%d from [%d]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr);
384   }
385   */
386   ierr = PetscFree(len_si);CHKERRQ(ierr);
387   ierr = PetscFree(len_ri);CHKERRQ(ierr);
388   ierr = PetscFree(swaits);CHKERRQ(ierr);
389   ierr = PetscFree(sstatus);CHKERRQ(ierr);
390   ierr = PetscFree(buf_s);CHKERRQ(ierr);
391 
392 #if defined(DEBUG_MATPTAP)
393   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
394   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] Co is done\n",rank);
395 #endif
396 
397   /* compute the local portion of C (mpi mat) */
398   /*------------------------------------------*/
399   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
400 
401   /* allocate bi array and free space for accumulating nonzero column info */
402   ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
403   bi[0] = 0;
404 
405   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
406   nnz           = fill*(pi_loc[pm] + api[am]);
407   ierr          = PetscFreeSpaceGet(nnz,&free_space);
408   current_space = free_space;
409 
410   ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr);
411   for (k=0; k<merge->nrecv; k++){
412     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
413     nrows       = *buf_ri_k[k];
414     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
415     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
416   }
417   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
418   rmax = 0;
419   for (i=0; i<pn; i++) {
420     /* add pdt[i,:]*AP into lnk */
421     nnz = 0;
422     pnz = pdti[i+1] - pdti[i];
423     ptJ = pdtj + pdti[i];
424     for (j=0; j<pnz; j++){
425       row  = ptJ[j];  /* row of AP == col of Pt */
426       apnz = api[row+1] - api[row];
427       Jptr = apj + api[row];
428       /* add non-zero cols of AP into the sorted linked list lnk */
429       ierr = PetscLLAddSorted(apnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
430       nnz += nlnk;
431     }
432     /* add received col data into lnk */
433     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
434       if (i == *nextrow[k]) { /* i-th row */
435         nzi = *(nextci[k]+1) - *nextci[k];
436         Jptr  = buf_rj[k] + *nextci[k];
437         ierr = PetscLLAddSorted(nzi,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
438         nnz += nlnk;
439         nextrow[k]++; nextci[k]++;
440       }
441     }
442 
443     /* if free space is not available, make more free space */
444     if (current_space->local_remaining<nnz) {
445       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
446       nspacedouble++;
447     }
448     /* copy data into free space, then initialize lnk */
449     ierr = PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
450     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
451     current_space->array           += nnz;
452     current_space->local_used      += nnz;
453     current_space->local_remaining -= nnz;
454     bi[i+1] = bi[i] + nnz;
455     if (nnz > rmax) rmax = nnz;
456   }
457   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
458   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
459 
460   ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
461   ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
462   afill_tmp = (PetscReal)bi[pn]/(pi_loc[pm] + api[am]+1);
463   if (afill_tmp > afill) afill = afill_tmp;
464   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
465 
466   /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part - check runex55_SA ?  */
467   /*------------------------------------------------------------------------------------------------------*/
468   ierr = PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
469   ierr = PetscMemzero(vals,rmax*sizeof(PetscScalar));CHKERRQ(ierr);
470 
471   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
472   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
473   ierr = MatSetBlockSizes(Cmpi,P->cmap->bs,P->cmap->bs);CHKERRQ(ierr);
474   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
475   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
476   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
477 
478   for (i=0; i<pn; i++){
479     row = i + rstart;
480     nnz = bi[i+1] - bi[i];
481     Jptr = bj + bi[i];
482     ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr);
483   }
484   ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
485   ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
486   ierr = PetscFree(vals);CHKERRQ(ierr);
487 
488   merge->bi            = bi;
489   merge->bj            = bj;
490   merge->coi           = coi;
491   merge->coj           = coj;
492   merge->buf_ri        = buf_ri;
493   merge->buf_rj        = buf_rj;
494   merge->owners_co     = owners_co;
495   merge->destroy       = Cmpi->ops->destroy;
496   merge->duplicate     = Cmpi->ops->duplicate;
497 
498   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
499   /* Cmpi->assembled      = PETSC_FALSE; */
500   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;
501   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
502 
503   /* attach the supporting struct to Cmpi for reuse */
504   c = (Mat_MPIAIJ*)Cmpi->data;
505   c->ptap        = ptap;
506   ptap->api      = api;
507   ptap->apj      = apj;
508   ptap->merge    = merge;
509   ptap->rmax     = ap_rmax;
510 
511   *C = Cmpi;
512 #if defined(PETSC_USE_INFO)
513   if (bi[pn] != 0) {
514     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
515     ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr);
516   } else {
517     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
518   }
519 #endif
520   PetscFunctionReturn(0);
521 }
522 
523 #undef __FUNCT__
524 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
525 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
526 {
527   PetscErrorCode       ierr;
528   Mat_Merge_SeqsToMPI  *merge;
529   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
530   Mat_SeqAIJ           *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
531   Mat_SeqAIJ           *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
532   Mat_SeqAIJ           *p_loc,*p_oth;
533   Mat_PtAPMPI          *ptap;
534   PetscInt             *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
535   PetscInt             *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
536   PetscInt             i,j,k,anz,pnz,apnz,nextap,row,*cj;
537   MatScalar            *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
538   PetscInt             am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
539   MPI_Comm             comm=((PetscObject)C)->comm;
540   PetscMPIInt          size,rank,taga,*len_s;
541   PetscInt             *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
542   PetscInt             **buf_ri,**buf_rj;
543   PetscInt             cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
544   MPI_Request          *s_waits,*r_waits;
545   MPI_Status           *status;
546   MatScalar            **abuf_r,*ba_i,*pA,*coa,*ba;
547   PetscInt             *api,*apj,*coi,*coj;
548   PetscInt             *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
549   PetscInt             sparse_axpy;
550 
551   PetscFunctionBegin;
552   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
553   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
554 
555   ptap  = c->ptap;
556   if (!ptap) SETERRQ(((PetscObject)C)->comm,PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
557   merge = ptap->merge;
558 
559   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
560   /*--------------------------------------------------*/
561   if (ptap->reuse == MAT_INITIAL_MATRIX){
562     ptap->reuse = MAT_REUSE_MATRIX;
563   } else { /* update numerical values of P_oth and P_loc */
564     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
565     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
566   }
567 
568   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
569   /*--------------------------------------------------------------*/
570   /* get data from symbolic products */
571   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
572   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
573   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
574   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
575 
576   coi = merge->coi; coj = merge->coj;
577   ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr);
578   ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr);
579 
580   bi     = merge->bi; bj = merge->bj;
581   owners = merge->rowmap->range;
582   ierr   = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr);
583   ierr   = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr);
584 
585   api = ptap->api; apj = ptap->apj;
586   /* flag 'sparse_axpy' determines which implementations to be used:
587        0: do dense axpy in MatPtAPNumeric() - fastest, but requires storage of a dense array apa; (default)
588        1: do one sparse axpy - uses same memory as sparse_axpy=0 and might execute less flops
589           (apnz vs. cnz in the outerproduct), slower than case '0' when cnz is not too large than apnz;
590        2: do two sparse axpy in MatPtAPNumeric() - slowest, uses a sparse array apa */
591   /* set default sparse_axpy */
592   sparse_axpy = 0;
593   ierr = PetscOptionsGetInt(PETSC_NULL,"-matptap_sparseaxpy",&sparse_axpy,PETSC_NULL);CHKERRQ(ierr);
594   if (sparse_axpy == 0){  /* Do not perform sparse axpy */
595     /*--------------------------------------------------*/
596     /* malloc apa to store dense row A[i,:]*P */
597     ierr = PetscMalloc((P->cmap->N)*sizeof(PetscScalar),&apa);CHKERRQ(ierr);
598     ierr = PetscMemzero(apa,P->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr);
599 
600     for (i=0; i<am; i++) {
601       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
602       /*------------------------------------------------------------*/
603       apJ = apj + api[i];
604 
605       /* diagonal portion of A */
606       anz = adi[i+1] - adi[i];
607       adj = ad->j + adi[i];
608       ada = ad->a + adi[i];
609       for (j=0; j<anz; j++) {
610         row = adj[j];
611         pnz = pi_loc[row+1] - pi_loc[row];
612         pj  = pj_loc + pi_loc[row];
613         pa  = pa_loc + pi_loc[row];
614 
615         /* perform dense axpy */
616         valtmp = ada[j];
617         for (k=0; k<pnz; k++){
618           apa[pj[k]] += valtmp*pa[k];
619         }
620         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
621       }
622 
623       /* off-diagonal portion of A */
624       anz = aoi[i+1] - aoi[i];
625       aoj = ao->j + aoi[i];
626       aoa = ao->a + aoi[i];
627       for (j=0; j<anz; j++) {
628         row = aoj[j];
629         pnz = pi_oth[row+1] - pi_oth[row];
630         pj  = pj_oth + pi_oth[row];
631         pa  = pa_oth + pi_oth[row];
632 
633         /* perform dense axpy */
634         valtmp = aoa[j];
635         for (k=0; k<pnz; k++){
636           apa[pj[k]] += valtmp*pa[k];
637         }
638         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
639       }
640 
641       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
642       /*--------------------------------------------------------------*/
643       apnz = api[i+1] - api[i];
644       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
645       pnz = po->i[i+1] - po->i[i];
646       poJ = po->j + po->i[i];
647       pA  = po->a + po->i[i];
648       for (j=0; j<pnz; j++){
649         row = poJ[j];
650         cnz = coi[row+1] - coi[row];
651         cj  = coj + coi[row];
652         ca  = coa + coi[row];
653         /* perform dense axpy */
654         valtmp = pA[j];
655         for (k=0; k<cnz; k++) {
656           ca[k] += valtmp*apa[cj[k]];
657         }
658         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
659       }
660 
661       /* put the value into Cd (diagonal part) */
662       pnz = pd->i[i+1] - pd->i[i];
663       pdJ = pd->j + pd->i[i];
664       pA  = pd->a + pd->i[i];
665       for (j=0; j<pnz; j++){
666         row = pdJ[j];
667         cnz = bi[row+1] - bi[row];
668         cj  = bj + bi[row];
669         ca  = ba + bi[row];
670         /* perform dense axpy */
671         valtmp = pA[j];
672         for (k=0; k<cnz; k++) {
673           ca[k] += valtmp*apa[cj[k]];
674         }
675         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
676       }
677 
678       /* zero the current row of A*P */
679       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
680     }
681   } else if (sparse_axpy == 1){  /* Perform one sparse axpy */
682     /*------------------------------------------------------*/
683     /* malloc apa to store dense row A[i,:]*P */
684     ierr = PetscMalloc((P->cmap->N)*sizeof(PetscScalar),&apa);CHKERRQ(ierr);
685     ierr = PetscMemzero(apa,P->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr);
686 
687     for (i=0; i<am; i++) {
688       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
689       /*------------------------------------------------------------*/
690       apJ = apj + api[i];
691 
692       /* diagonal portion of A */
693       anz = adi[i+1] - adi[i];
694       adj = ad->j + adi[i];
695       ada = ad->a + adi[i];
696       for (j=0; j<anz; j++) {
697         row = adj[j];
698         pnz = pi_loc[row+1] - pi_loc[row];
699         pj  = pj_loc + pi_loc[row];
700         pa  = pa_loc + pi_loc[row];
701 
702         /* perform dense axpy */
703         valtmp = ada[j];
704         for (k=0; k<pnz; k++){
705           apa[pj[k]] += valtmp*pa[k];
706         }
707         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
708       }
709 
710       /* off-diagonal portion of A */
711       anz = aoi[i+1] - aoi[i];
712       aoj = ao->j + aoi[i];
713       aoa = ao->a + aoi[i];
714       for (j=0; j<anz; j++) {
715         row = aoj[j];
716         pnz = pi_oth[row+1] - pi_oth[row];
717         pj  = pj_oth + pi_oth[row];
718         pa  = pa_oth + pi_oth[row];
719 
720         /* perform dense axpy */
721         valtmp = aoa[j];
722         for (k=0; k<pnz; k++){
723           apa[pj[k]] += valtmp*pa[k];
724         }
725         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
726       }
727 
728       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
729       /*--------------------------------------------------------------*/
730       apnz = api[i+1] - api[i];
731       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
732       pnz = po->i[i+1] - po->i[i];
733       poJ = po->j + po->i[i];
734       pA  = po->a + po->i[i];
735       for (j=0; j<pnz; j++){
736         row    = poJ[j];
737         cj     = coj + coi[row];
738         ca     = coa + coi[row];
739         valtmp = pA[j];
740         /* perform sparse axpy */
741         nextap = 0;
742         for (k=0; nextap<apnz; k++) {
743           if (cj[k]==apJ[nextap]) { /* global column index */
744             ca[k] += valtmp*apa[cj[k]]; nextap++;
745           }
746         }
747         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
748       }
749 
750       /* put the value into Cd (diagonal part) */
751       pnz = pd->i[i+1] - pd->i[i];
752       pdJ = pd->j + pd->i[i];
753       pA  = pd->a + pd->i[i];
754       for (j=0; j<pnz; j++){
755         row    = pdJ[j];
756         cj     = bj + bi[row];
757         ca     = ba + bi[row];
758         valtmp = pA[j];
759         /* perform sparse axpy */
760         nextap = 0;
761         for (k=0; nextap<apnz; k++) {
762           if (cj[k]==apJ[nextap]) { /* global column index */
763             ca[k] += valtmp*apa[cj[k]];
764             nextap++;
765           }
766         }
767         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
768       }
769 
770       /* zero the current row of A*P */
771       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
772     }
773   } else if (sparse_axpy == 2){/* Perform two sparse axpy */
774     /*----------------------------------------------------*/
775     /* malloc apa to store sparse row A[i,:]*P */
776     ierr = PetscMalloc((ptap->rmax+1)*sizeof(MatScalar),&apa);CHKERRQ(ierr);
777     ierr = PetscMemzero(apa,ptap->rmax*sizeof(MatScalar));CHKERRQ(ierr);
778 
779     pA=pa_loc;
780     for (i=0; i<am; i++) {
781       /* form i-th sparse row of A*P */
782       apnz = api[i+1] - api[i];
783       apJ  = apj + api[i];
784       /* diagonal portion of A */
785       anz = adi[i+1] - adi[i];
786       adj = ad->j + adi[i];
787       ada = ad->a + adi[i];
788       for (j=0; j<anz; j++) {
789         row = adj[j];
790         pnz = pi_loc[row+1] - pi_loc[row];
791         pj  = pj_loc + pi_loc[row];
792         pa  = pa_loc + pi_loc[row];
793         valtmp = ada[j];
794         nextp  = 0;
795         for (k=0; nextp<pnz; k++) {
796           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
797             apa[k] += valtmp*pa[nextp++];
798           }
799         }
800         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
801       }
802       /* off-diagonal portion of A */
803       anz = aoi[i+1] - aoi[i];
804       aoj = ao->j + aoi[i];
805       aoa = ao->a + aoi[i];
806       for (j=0; j<anz; j++) {
807         row = aoj[j];
808         pnz = pi_oth[row+1] - pi_oth[row];
809         pj  = pj_oth + pi_oth[row];
810         pa  = pa_oth + pi_oth[row];
811         valtmp = aoa[j];
812         nextp  = 0;
813         for (k=0; nextp<pnz; k++) {
814           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
815             apa[k] += valtmp*pa[nextp++];
816           }
817         }
818         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
819       }
820 
821       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
822       /*--------------------------------------------------------------*/
823       pnz = pi_loc[i+1] - pi_loc[i];
824       pJ  = pj_loc + pi_loc[i];
825       for (j=0; j<pnz; j++) {
826         nextap = 0;
827         row    = pJ[j]; /* global index */
828         if (row < pcstart || row >=pcend) { /* put the value into Co */
829           row = *poJ;
830           cj  = coj + coi[row];
831           ca  = coa + coi[row]; poJ++;
832         } else {                            /* put the value into Cd */
833           row  = *pdJ;
834           cj   = bj + bi[row];
835           ca   = ba + bi[row]; pdJ++;
836         }
837         valtmp = pA[j];
838         for (k=0; nextap<apnz; k++) {
839           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
840         }
841         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
842       }
843       pA += pnz;
844       /* zero the current row info for A*P */
845       ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
846     }
847   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"sparse_axpy only takes values 0, 1 and 2");
848   ierr = PetscFree(apa);CHKERRQ(ierr);
849 
850   /* 3) send and recv matrix values coa */
851   /*------------------------------------*/
852   buf_ri = merge->buf_ri;
853   buf_rj = merge->buf_rj;
854   len_s  = merge->len_s;
855   ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
856   ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
857 
858   ierr = PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);CHKERRQ(ierr);
859   for (proc=0,k=0; proc<size; proc++){
860     if (!len_s[proc]) continue;
861     i = merge->owners_co[proc];
862     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
863     k++;
864   }
865   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
866   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
867 
868   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
869   ierr = PetscFree(r_waits);CHKERRQ(ierr);
870   ierr = PetscFree(coa);CHKERRQ(ierr);
871 
872   /* 4) insert local Cseq and received values into Cmpi */
873   /*------------------------------------------------------*/
874   ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr);
875   for (k=0; k<merge->nrecv; k++){
876     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
877     nrows       = *(buf_ri_k[k]);
878     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
879     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
880   }
881 
882   for (i=0; i<cm; i++) {
883     row = owners[rank] + i; /* global row index of C_seq */
884     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
885     ba_i = ba + bi[i];
886     bnz  = bi[i+1] - bi[i];
887     /* add received vals into ba */
888     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
889       /* i-th row */
890       if (i == *nextrow[k]) {
891         cnz = *(nextci[k]+1) - *nextci[k];
892         cj  = buf_rj[k] + *(nextci[k]);
893         ca  = abuf_r[k] + *(nextci[k]);
894         nextcj = 0;
895         for (j=0; nextcj<cnz; j++){
896           if (bj_i[j] == cj[nextcj]){ /* bcol == ccol */
897             ba_i[j] += ca[nextcj++];
898           }
899         }
900         nextrow[k]++; nextci[k]++;
901         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
902       }
903     }
904     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
905   }
906   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
907   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
908 
909   ierr = PetscFree(ba);CHKERRQ(ierr);
910   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
911   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
912   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
913   PetscFunctionReturn(0);
914 }
915