xref: /petsc/src/mat/impls/aij/mpi/mpimatmatmult.c (revision 047240e14af00aad1ef65e96f6fface8924f7f7e)
1 
2 /*
3   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4           C = A * B
5 */
6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
7 #include <../src/mat/utils/freespace.h>
8 #include <../src/mat/impls/aij/mpi/mpiaij.h>
9 #include <petscbt.h>
10 #include <../src/mat/impls/dense/mpi/mpidense.h>
11 
12 #undef __FUNCT__
13 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ"
14 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
15 {
16   PetscErrorCode ierr;
17 
18   PetscFunctionBegin;
19   if (scall == MAT_INITIAL_MATRIX) {
20     ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);
21   }
22 
23   ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr);
24   PetscFunctionReturn(0);
25 }
26 
27 #undef __FUNCT__
28 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult"
29 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
30 {
31   PetscErrorCode ierr;
32   Mat_MPIAIJ     *a    = (Mat_MPIAIJ*)A->data;
33   Mat_PtAPMPI    *ptap = a->ptap;
34 
35   PetscFunctionBegin;
36   ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
37   ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
38   ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
39   ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
40   ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr);
41   ierr = PetscFree(ptap->api);CHKERRQ(ierr);
42   ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
43   ierr = PetscFree(ptap->apa);CHKERRQ(ierr);
44   ierr = ptap->destroy(A);CHKERRQ(ierr);
45   ierr = PetscFree(ptap);CHKERRQ(ierr);
46   PetscFunctionReturn(0);
47 }
48 
49 #undef __FUNCT__
50 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult"
51 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M)
52 {
53   PetscErrorCode ierr;
54   Mat_MPIAIJ     *a    = (Mat_MPIAIJ*)A->data;
55   Mat_PtAPMPI    *ptap = a->ptap;
56 
57   PetscFunctionBegin;
58   ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr);
59 
60   (*M)->ops->destroy   = ptap->destroy;   /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */
61   (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */
62   PetscFunctionReturn(0);
63 }
64 
65 #undef __FUNCT__
66 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ"
67 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
68 {
69   PetscErrorCode ierr;
70   Mat_MPIAIJ     *a  =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
71   Mat_SeqAIJ     *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
72   Mat_SeqAIJ     *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
73   PetscInt       *adi=ad->i,*adj,*aoi=ao->i,*aoj;
74   PetscScalar    *ada,*aoa,*cda=cd->a,*coa=co->a;
75   Mat_SeqAIJ     *p_loc,*p_oth;
76   PetscInt       *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
77   PetscScalar    *pa_loc,*pa_oth,*pa,*apa,valtmp,*ca;
78   PetscInt       cm   =C->rmap->n,anz,pnz;
79   Mat_PtAPMPI    *ptap=c->ptap;
80   PetscInt       *api,*apj,*apJ,i,j,k,row;
81   PetscInt       cstart=C->cmap->rstart;
82   PetscInt       cdnz,conz,k0,k1;
83 
84   PetscFunctionBegin;
85   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
86   /*-----------------------------------------------------*/
87   /* update numerical values of P_oth and P_loc */
88   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
89   ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
90 
91   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
92   /*----------------------------------------------------------*/
93   /* get data from symbolic products */
94   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
95   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
96   pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a;
97   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
98 
99   /* get apa for storing dense row A[i,:]*P */
100   apa = ptap->apa;
101 
102   api = ptap->api;
103   apj = ptap->apj;
104   for (i=0; i<cm; i++) {
105     /* diagonal portion of A */
106     anz = adi[i+1] - adi[i];
107     adj = ad->j + adi[i];
108     ada = ad->a + adi[i];
109     for (j=0; j<anz; j++) {
110       row = adj[j];
111       pnz = pi_loc[row+1] - pi_loc[row];
112       pj  = pj_loc + pi_loc[row];
113       pa  = pa_loc + pi_loc[row];
114 
115       /* perform dense axpy */
116       valtmp = ada[j];
117       for (k=0; k<pnz; k++) {
118         apa[pj[k]] += valtmp*pa[k];
119       }
120       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
121     }
122 
123     /* off-diagonal portion of A */
124     anz = aoi[i+1] - aoi[i];
125     aoj = ao->j + aoi[i];
126     aoa = ao->a + aoi[i];
127     for (j=0; j<anz; j++) {
128       row = aoj[j];
129       pnz = pi_oth[row+1] - pi_oth[row];
130       pj  = pj_oth + pi_oth[row];
131       pa  = pa_oth + pi_oth[row];
132 
133       /* perform dense axpy */
134       valtmp = aoa[j];
135       for (k=0; k<pnz; k++) {
136         apa[pj[k]] += valtmp*pa[k];
137       }
138       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
139     }
140 
141     /* set values in C */
142     apJ  = apj + api[i];
143     cdnz = cd->i[i+1] - cd->i[i];
144     conz = co->i[i+1] - co->i[i];
145 
146     /* 1st off-diagoanl part of C */
147     ca = coa + co->i[i];
148     k  = 0;
149     for (k0=0; k0<conz; k0++) {
150       if (apJ[k] >= cstart) break;
151       ca[k0]      = apa[apJ[k]];
152       apa[apJ[k]] = 0.0;
153       k++;
154     }
155 
156     /* diagonal part of C */
157     ca = cda + cd->i[i];
158     for (k1=0; k1<cdnz; k1++) {
159       ca[k1]      = apa[apJ[k]];
160       apa[apJ[k]] = 0.0;
161       k++;
162     }
163 
164     /* 2nd off-diagoanl part of C */
165     ca = coa + co->i[i];
166     for (; k0<conz; k0++) {
167       ca[k0]      = apa[apJ[k]];
168       apa[apJ[k]] = 0.0;
169       k++;
170     }
171   }
172   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
173   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
174   PetscFunctionReturn(0);
175 }
176 
177 #undef __FUNCT__
178 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ"
179 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
180 {
181   PetscErrorCode     ierr;
182   MPI_Comm           comm=((PetscObject)A)->comm;
183   Mat                Cmpi;
184   Mat_PtAPMPI        *ptap;
185   PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL;
186   Mat_MPIAIJ         *a        =(Mat_MPIAIJ*)A->data,*c;
187   Mat_SeqAIJ         *ad       =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
188   PetscInt           *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
189   PetscInt           *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
190   PetscInt           *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi;
191   PetscInt           am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
192   PetscBT            lnkbt;
193   PetscScalar        *apa;
194   PetscReal          afill;
195   PetscBool          scalable=PETSC_TRUE;
196   PetscInt           nlnk_max,armax,prmax;
197 
198   PetscFunctionBegin;
199   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
200     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
201   }
202 
203   ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr);
204   ierr = PetscOptionsBool("-matmatmult_scalable","Use a scalable but slower C=A*B","",scalable,&scalable,PETSC_NULL);CHKERRQ(ierr);
205   if (scalable) {
206     ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(A,P,fill,C);CHKERRQ(ierr);
207     PetscFunctionReturn(0);
208   }
209   ierr = PetscOptionsEnd();CHKERRQ(ierr);
210 
211   /* create struct Mat_PtAPMPI and attached it to C later */
212   ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr);
213 
214   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
215   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
216 
217   /* get P_loc by taking all local rows of P */
218   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
219 
220   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
221   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
222   pi_loc = p_loc->i; pj_loc = p_loc->j;
223   pi_oth = p_oth->i; pj_oth = p_oth->j;
224 
225   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
226   /*-------------------------------------------------------------------*/
227   ierr      = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr);
228   ptap->api = api;
229   api[0]    = 0;
230 
231   /* create and initialize a linked list */
232   armax    = ad->rmax+ao->rmax;
233   prmax    = PetscMax(p_loc->rmax,p_oth->rmax);
234   nlnk_max = armax*prmax;
235   if (!nlnk_max || nlnk_max > pN) nlnk_max = pN;
236   ierr = PetscLLCondensedCreate(nlnk_max,pN,&lnk,&lnkbt);CHKERRQ(ierr);
237 
238   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
239   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
240 
241   current_space = free_space;
242 
243   ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr);
244   for (i=0; i<am; i++) {
245     /* diagonal portion of A */
246     nzi = adi[i+1] - adi[i];
247     for (j=0; j<nzi; j++) {
248       row  = *adj++;
249       pnz  = pi_loc[row+1] - pi_loc[row];
250       Jptr = pj_loc + pi_loc[row];
251       /* add non-zero cols of P into the sorted linked list lnk */
252       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
253     }
254     /* off-diagonal portion of A */
255     nzi = aoi[i+1] - aoi[i];
256     for (j=0; j<nzi; j++) {
257       row  = *aoj++;
258       pnz  = pi_oth[row+1] - pi_oth[row];
259       Jptr = pj_oth + pi_oth[row];
260       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
261     }
262 
263     apnz     = lnk[0];
264     api[i+1] = api[i] + apnz;
265 
266     /* if free space is not available, double the total space in the list */
267     if (current_space->local_remaining<apnz) {
268       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
269       nspacedouble++;
270     }
271 
272     /* Copy data into free space, then initialize lnk */
273     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
274     ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr);
275 
276     current_space->array           += apnz;
277     current_space->local_used      += apnz;
278     current_space->local_remaining -= apnz;
279   }
280 
281   /* Allocate space for apj, initialize apj, and */
282   /* destroy list of free space and other temporary array(s) */
283   ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr);
284   apj  = ptap->apj;
285   ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr);
286   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
287 
288   /* malloc apa to store dense row A[i,:]*P */
289   ierr = PetscMalloc(pN*sizeof(PetscScalar),&apa);CHKERRQ(ierr);
290   ierr = PetscMemzero(apa,pN*sizeof(PetscScalar));CHKERRQ(ierr);
291 
292   ptap->apa = apa;
293 
294   /* create and assemble symbolic parallel matrix Cmpi */
295   /*----------------------------------------------------*/
296   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
297   ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
298   ierr = MatSetBlockSizes(Cmpi,A->rmap->bs,P->cmap->bs);CHKERRQ(ierr);
299 
300   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
301   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
302   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
303   for (i=0; i<am; i++) {
304     row  = i + rstart;
305     apnz = api[i+1] - api[i];
306     ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr);
307     apj += apnz;
308   }
309   ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
310   ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
311 
312   ptap->destroy        = Cmpi->ops->destroy;
313   ptap->duplicate      = Cmpi->ops->duplicate;
314   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_MatMatMult;
315   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
316 
317   /* attach the supporting struct to Cmpi for reuse */
318   c       = (Mat_MPIAIJ*)Cmpi->data;
319   c->ptap = ptap;
320 
321   *C = Cmpi;
322 
323   /* set MatInfo */
324   afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
325   if (afill < 1.0) afill = 1.0;
326   Cmpi->info.mallocs           = nspacedouble;
327   Cmpi->info.fill_ratio_given  = fill;
328   Cmpi->info.fill_ratio_needed = afill;
329 
330 #if defined(PETSC_USE_INFO)
331   if (api[am]) {
332     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
333     ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr);
334   } else {
335     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
336   }
337 #endif
338   PetscFunctionReturn(0);
339 }
340 
341 #undef __FUNCT__
342 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense"
343 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
344 {
345   PetscErrorCode ierr;
346 
347   PetscFunctionBegin;
348   if (scall == MAT_INITIAL_MATRIX) {
349     ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr);
350   }
351   ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr);
352   PetscFunctionReturn(0);
353 }
354 
355 typedef struct {
356   Mat         workB;
357   PetscScalar *rvalues,*svalues;
358   MPI_Request *rwaits,*swaits;
359 } MPIAIJ_MPIDense;
360 
361 #undef __FUNCT__
362 #define __FUNCT__ "MatMPIAIJ_MPIDenseDestroy"
363 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
364 {
365   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
366   PetscErrorCode  ierr;
367 
368   PetscFunctionBegin;
369   ierr = MatDestroy(&contents->workB);CHKERRQ(ierr);
370   ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr);
371   ierr = PetscFree(contents);CHKERRQ(ierr);
372   PetscFunctionReturn(0);
373 }
374 
375 #undef __FUNCT__
376 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense"
377 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
378 {
379   PetscErrorCode         ierr;
380   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*) A->data;
381   PetscInt               nz   = aij->B->cmap->n;
382   PetscContainer         container;
383   MPIAIJ_MPIDense        *contents;
384   VecScatter             ctx   = aij->Mvctx;
385   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
386   VecScatter_MPI_General *to   = (VecScatter_MPI_General*) ctx->todata;
387   PetscInt               m     = A->rmap->n,n=B->cmap->n;
388 
389   PetscFunctionBegin;
390   ierr = MatCreate(((PetscObject)B)->comm,C);CHKERRQ(ierr);
391   ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr);
392   ierr = MatSetBlockSizes(*C,A->rmap->bs,B->cmap->bs);CHKERRQ(ierr);
393   ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr);
394   ierr = MatMPIDenseSetPreallocation(*C,PETSC_NULL);CHKERRQ(ierr);
395   ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
396   ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
397 
398   (*C)->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
399 
400   ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr);
401   /* Create work matrix used to store off processor rows of B needed for local product */
402   ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);CHKERRQ(ierr);
403   /* Create work arrays needed */
404   ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues,
405                       B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues,
406                       from->n,MPI_Request,&contents->rwaits,
407                       to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr);
408 
409   ierr = PetscContainerCreate(((PetscObject)A)->comm,&container);CHKERRQ(ierr);
410   ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr);
411   ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr);
412   ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr);
413   ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
414   PetscFunctionReturn(0);
415 }
416 
417 #undef __FUNCT__
418 #define __FUNCT__ "MatMPIDenseScatter"
419 /*
420     Performs an efficient scatter on the rows of B needed by this process; this is
421     a modification of the VecScatterBegin_() routines.
422 */
423 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
424 {
425   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*)A->data;
426   PetscErrorCode         ierr;
427   PetscScalar            *b,*w,*svalues,*rvalues;
428   VecScatter             ctx   = aij->Mvctx;
429   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
430   VecScatter_MPI_General *to   = (VecScatter_MPI_General*) ctx->todata;
431   PetscInt               i,j,k;
432   PetscInt               *sindices,*sstarts,*rindices,*rstarts;
433   PetscMPIInt            *sprocs,*rprocs,nrecvs;
434   MPI_Request            *swaits,*rwaits;
435   MPI_Comm               comm = ((PetscObject)A)->comm;
436   PetscMPIInt            tag  = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n;
437   MPI_Status             status;
438   MPIAIJ_MPIDense        *contents;
439   PetscContainer         container;
440   Mat                    workB;
441 
442   PetscFunctionBegin;
443   ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr);
444   if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist");
445   ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr);
446 
447   workB = *outworkB = contents->workB;
448   if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n);
449   sindices = to->indices;
450   sstarts  = to->starts;
451   sprocs   = to->procs;
452   swaits   = contents->swaits;
453   svalues  = contents->svalues;
454 
455   rindices = from->indices;
456   rstarts  = from->starts;
457   rprocs   = from->procs;
458   rwaits   = contents->rwaits;
459   rvalues  = contents->rvalues;
460 
461   ierr = MatDenseGetArray(B,&b);CHKERRQ(ierr);
462   ierr = MatDenseGetArray(workB,&w);CHKERRQ(ierr);
463 
464   for (i=0; i<from->n; i++) {
465     ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
466   }
467 
468   for (i=0; i<to->n; i++) {
469     /* pack a message at a time */
470     CHKMEMQ;
471     for (j=0; j<sstarts[i+1]-sstarts[i]; j++) {
472       for (k=0; k<ncols; k++) {
473         svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
474       }
475     }
476     CHKMEMQ;
477     ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
478   }
479 
480   nrecvs = from->n;
481   while (nrecvs) {
482     ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr);
483     nrecvs--;
484     /* unpack a message at a time */
485     CHKMEMQ;
486     for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++) {
487       for (k=0; k<ncols; k++) {
488         w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
489       }
490     }
491     CHKMEMQ;
492   }
493   if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);}
494 
495   ierr = MatDenseRestoreArray(B,&b);CHKERRQ(ierr);
496   ierr = MatDenseRestoreArray(workB,&w);CHKERRQ(ierr);
497   ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
498   ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
499   PetscFunctionReturn(0);
500 }
501 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat);
502 
503 #undef __FUNCT__
504 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense"
505 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
506 {
507   PetscErrorCode ierr;
508   Mat_MPIAIJ     *aij    = (Mat_MPIAIJ*)A->data;
509   Mat_MPIDense   *bdense = (Mat_MPIDense*)B->data;
510   Mat_MPIDense   *cdense = (Mat_MPIDense*)C->data;
511   Mat            workB;
512 
513   PetscFunctionBegin;
514   /* diagonal block of A times all local rows of B*/
515   ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr);
516 
517   /* get off processor parts of B needed to complete the product */
518   ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr);
519 
520   /* off-diagonal block of A times nonlocal rows of B */
521   ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr);
522   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
523   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
524   PetscFunctionReturn(0);
525 }
526 
527 #undef __FUNCT__
528 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable"
529 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable(Mat A,Mat P,Mat C)
530 {
531   PetscErrorCode ierr;
532   Mat_MPIAIJ     *a   = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
533   Mat_SeqAIJ     *ad  = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
534   Mat_SeqAIJ     *cd  = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
535   PetscInt       *adi = ad->i,*adj,*aoi=ao->i,*aoj;
536   PetscScalar    *ada,*aoa,*cda=cd->a,*coa=co->a;
537   Mat_SeqAIJ     *p_loc,*p_oth;
538   PetscInt       *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
539   PetscScalar    *pa_loc,*pa_oth,*pa,valtmp,*ca;
540   PetscInt       cm          = C->rmap->n,anz,pnz;
541   Mat_PtAPMPI    *ptap       = c->ptap;
542   PetscScalar    *apa_sparse = ptap->apa;
543   PetscInt       *api,*apj,*apJ,i,j,k,row;
544   PetscInt       cstart = C->cmap->rstart;
545   PetscInt       cdnz,conz,k0,k1,nextp;
546 
547   PetscFunctionBegin;
548   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
549   /*-----------------------------------------------------*/
550   /* update numerical values of P_oth and P_loc */
551   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
552   ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
553 
554   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
555   /*----------------------------------------------------------*/
556   /* get data from symbolic products */
557   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
558   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
559   pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a;
560   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
561 
562   api = ptap->api;
563   apj = ptap->apj;
564   for (i=0; i<cm; i++) {
565     apJ = apj + api[i];
566 
567     /* diagonal portion of A */
568     anz = adi[i+1] - adi[i];
569     adj = ad->j + adi[i];
570     ada = ad->a + adi[i];
571     for (j=0; j<anz; j++) {
572       row = adj[j];
573       pnz = pi_loc[row+1] - pi_loc[row];
574       pj  = pj_loc + pi_loc[row];
575       pa  = pa_loc + pi_loc[row];
576       /* perform sparse axpy */
577       valtmp = ada[j];
578       nextp  = 0;
579       for (k=0; nextp<pnz; k++) {
580         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
581           apa_sparse[k] += valtmp*pa[nextp++];
582         }
583       }
584       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
585     }
586 
587     /* off-diagonal portion of A */
588     anz = aoi[i+1] - aoi[i];
589     aoj = ao->j + aoi[i];
590     aoa = ao->a + aoi[i];
591     for (j=0; j<anz; j++) {
592       row = aoj[j];
593       pnz = pi_oth[row+1] - pi_oth[row];
594       pj  = pj_oth + pi_oth[row];
595       pa  = pa_oth + pi_oth[row];
596       /* perform sparse axpy */
597       valtmp = aoa[j];
598       nextp  = 0;
599       for (k=0; nextp<pnz; k++) {
600         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
601           apa_sparse[k] += valtmp*pa[nextp++];
602         }
603       }
604       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
605     }
606 
607     /* set values in C */
608     cdnz = cd->i[i+1] - cd->i[i];
609     conz = co->i[i+1] - co->i[i];
610 
611     /* 1st off-diagoanl part of C */
612     ca = coa + co->i[i];
613     k  = 0;
614     for (k0=0; k0<conz; k0++) {
615       if (apJ[k] >= cstart) break;
616       ca[k0]        = apa_sparse[k];
617       apa_sparse[k] = 0.0;
618       k++;
619     }
620 
621     /* diagonal part of C */
622     ca = cda + cd->i[i];
623     for (k1=0; k1<cdnz; k1++) {
624       ca[k1]        = apa_sparse[k];
625       apa_sparse[k] = 0.0;
626       k++;
627     }
628 
629     /* 2nd off-diagoanl part of C */
630     ca = coa + co->i[i];
631     for (; k0<conz; k0++) {
632       ca[k0]        = apa_sparse[k];
633       apa_sparse[k] = 0.0;
634       k++;
635     }
636   }
637   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
638   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
639   PetscFunctionReturn(0);
640 }
641 
642 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ(), except using LLCondensed to avoid O(BN) memory requirement */
643 #undef __FUNCT__
644 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable"
645 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(Mat A,Mat P,PetscReal fill,Mat *C)
646 {
647   PetscErrorCode     ierr;
648   MPI_Comm           comm=((PetscObject)A)->comm;
649   Mat                Cmpi;
650   Mat_PtAPMPI        *ptap;
651   PetscFreeSpaceList free_space = PETSC_NULL,current_space=PETSC_NULL;
652   Mat_MPIAIJ         *a         = (Mat_MPIAIJ*)A->data,*c;
653   Mat_SeqAIJ         *ad        = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
654   PetscInt           *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
655   PetscInt           *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
656   PetscInt           i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0;
657   PetscInt           am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
658   PetscInt           nlnk_max,armax,prmax;
659   PetscReal          afill;
660   PetscScalar        *apa;
661 
662   PetscFunctionBegin;
663   /* create struct Mat_PtAPMPI and attached it to C later */
664   ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr);
665 
666   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
667   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
668 
669   /* get P_loc by taking all local rows of P */
670   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
671 
672   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
673   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
674   pi_loc = p_loc->i; pj_loc = p_loc->j;
675   pi_oth = p_oth->i; pj_oth = p_oth->j;
676 
677   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
678   /*-------------------------------------------------------------------*/
679   ierr      = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr);
680   ptap->api = api;
681   api[0]    = 0;
682 
683   /* create and initialize a linked list */
684   armax    = ad->rmax+ao->rmax;
685   prmax    = PetscMax(p_loc->rmax,p_oth->rmax);
686   nlnk_max = armax*prmax;
687   if (!nlnk_max || nlnk_max > pN) nlnk_max = pN;
688   ierr = PetscLLCondensedCreate_Scalable(nlnk_max,&lnk);CHKERRQ(ierr);
689 
690   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
691   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
692 
693   current_space = free_space;
694 
695   ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr);
696   for (i=0; i<am; i++) {
697     /* diagonal portion of A */
698     nzi = adi[i+1] - adi[i];
699     for (j=0; j<nzi; j++) {
700       row  = *adj++;
701       pnz  = pi_loc[row+1] - pi_loc[row];
702       Jptr = pj_loc + pi_loc[row];
703       /* add non-zero cols of P into the sorted linked list lnk */
704       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
705     }
706     /* off-diagonal portion of A */
707     nzi = aoi[i+1] - aoi[i];
708     for (j=0; j<nzi; j++) {
709       row  = *aoj++;
710       pnz  = pi_oth[row+1] - pi_oth[row];
711       Jptr = pj_oth + pi_oth[row];
712       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
713     }
714 
715     apnz     = *lnk;
716     api[i+1] = api[i] + apnz;
717     if (apnz > apnz_max) apnz_max = apnz;
718 
719     /* if free space is not available, double the total space in the list */
720     if (current_space->local_remaining<apnz) {
721       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
722       nspacedouble++;
723     }
724 
725     /* Copy data into free space, then initialize lnk */
726     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
727     ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr);
728 
729     current_space->array           += apnz;
730     current_space->local_used      += apnz;
731     current_space->local_remaining -= apnz;
732   }
733 
734   /* Allocate space for apj, initialize apj, and */
735   /* destroy list of free space and other temporary array(s) */
736   ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr);
737   apj  = ptap->apj;
738   ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr);
739   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
740 
741   /* create and assemble symbolic parallel matrix Cmpi */
742   /*----------------------------------------------------*/
743   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
744   ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
745   ierr = MatSetBlockSizes(Cmpi,A->rmap->bs,P->cmap->bs);CHKERRQ(ierr);
746   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
747   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
748   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
749 
750   /* malloc apa for assembly Cmpi */
751   ierr = PetscMalloc(apnz_max*sizeof(PetscScalar),&apa);CHKERRQ(ierr);
752   ierr = PetscMemzero(apa,apnz_max*sizeof(PetscScalar));CHKERRQ(ierr);
753 
754   ptap->apa = apa;
755   for (i=0; i<am; i++) {
756     row  = i + rstart;
757     apnz = api[i+1] - api[i];
758     ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr);
759     apj += apnz;
760   }
761   ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
762   ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
763 
764   ptap->destroy             = Cmpi->ops->destroy;
765   ptap->duplicate           = Cmpi->ops->duplicate;
766   Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable;
767   Cmpi->ops->destroy        = MatDestroy_MPIAIJ_MatMatMult;
768   Cmpi->ops->duplicate      = MatDuplicate_MPIAIJ_MatMatMult;
769 
770   /* attach the supporting struct to Cmpi for reuse */
771   c       = (Mat_MPIAIJ*)Cmpi->data;
772   c->ptap = ptap;
773 
774   *C = Cmpi;
775 
776   /* set MatInfo */
777   afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
778   if (afill < 1.0) afill = 1.0;
779   Cmpi->info.mallocs           = nspacedouble;
780   Cmpi->info.fill_ratio_given  = fill;
781   Cmpi->info.fill_ratio_needed = afill;
782 
783 #if defined(PETSC_USE_INFO)
784   if (api[am]) {
785     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
786     ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr);
787   } else {
788     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
789   }
790 #endif
791   PetscFunctionReturn(0);
792 }
793 
794 /*-------------------------------------------------------------------------*/
795 #undef __FUNCT__
796 #define __FUNCT__ "MatTransposeMatMult_MPIAIJ_MPIAIJ"
797 PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat P,Mat A,MatReuse scall,PetscReal fill,Mat *C)
798 {
799   PetscErrorCode ierr;
800   PetscBool      scalable=PETSC_TRUE,viamatmatmult=PETSC_FALSE;
801 
802   PetscFunctionBegin;
803   ierr = PetscOptionsBool("-mattransposematmult_viamatmatmult","Use R=Pt and C=R*A","",viamatmatmult,&viamatmatmult,PETSC_NULL);CHKERRQ(ierr);
804   if (viamatmatmult) {
805     Mat         Pt;
806     Mat_PtAPMPI *ptap;
807     Mat_MPIAIJ  *c;
808     if (scall == MAT_INITIAL_MATRIX) {
809       ierr = MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);CHKERRQ(ierr);
810       ierr = MatMatMult(Pt,A,MAT_INITIAL_MATRIX,fill,C);CHKERRQ(ierr);
811 
812       c        = (Mat_MPIAIJ*)(*C)->data;
813       ptap     = c->ptap;
814       ptap->Pt = Pt;
815     } else if (scall == MAT_REUSE_MATRIX) {
816       c    = (Mat_MPIAIJ*)(*C)->data;
817       ptap = c->ptap;
818       Pt   = ptap->Pt;
819       ierr = MatTranspose(P,scall,&Pt);CHKERRQ(ierr);
820       ierr = MatMatMult(Pt,A,scall,fill,C);CHKERRQ(ierr);
821     } else SETERRQ(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONGSTATE,"Not supported");
822     PetscFunctionReturn(0);
823   }
824 
825   if (scall == MAT_INITIAL_MATRIX) {
826     ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr);
827     ierr = PetscOptionsBool("-mattransposematmult_scalable","Use a scalable but slower C=Pt*A","",scalable,&scalable,PETSC_NULL);CHKERRQ(ierr);
828     if  (scalable) {
829       ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(P,A,fill,C);CHKERRQ(ierr);
830     } else {
831       ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);CHKERRQ(ierr);
832     }
833     ierr = PetscOptionsEnd();CHKERRQ(ierr);
834   }
835   ierr = (*(*C)->ops->mattransposemultnumeric)(P,A,*C);CHKERRQ(ierr);
836   PetscFunctionReturn(0);
837 }
838 
839 #undef __FUNCT__
840 #define __FUNCT__ "MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ"
841 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C)
842 {
843   PetscErrorCode      ierr;
844   Mat_Merge_SeqsToMPI *merge;
845   Mat_MPIAIJ          *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
846   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
847   Mat_PtAPMPI         *ptap;
848   PetscInt            *adj,*aJ;
849   PetscInt            i,j,k,anz,pnz,row,*cj;
850   MatScalar           *ada,*aval,*ca,valtmp;
851   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
852   MPI_Comm            comm=((PetscObject)C)->comm;
853   PetscMPIInt         size,rank,taga,*len_s;
854   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
855   PetscInt            **buf_ri,**buf_rj;
856   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
857   MPI_Request         *s_waits,*r_waits;
858   MPI_Status          *status;
859   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
860   PetscInt            *ai,*aj,*coi,*coj;
861   PetscInt            *poJ,*pdJ;
862   Mat                 A_loc;
863   Mat_SeqAIJ          *a_loc;
864 
865   PetscFunctionBegin;
866   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
867   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
868 
869   ptap  = c->ptap;
870   merge = ptap->merge;
871 
872   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
873   /*--------------------------------------------------------------*/
874   /* get data from symbolic products */
875   coi  = merge->coi; coj = merge->coj;
876   ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr);
877   ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr);
878 
879   bi     = merge->bi; bj = merge->bj;
880   owners = merge->rowmap->range;
881   ierr   = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr);
882   ierr   = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr);
883 
884   /* get A_loc by taking all local rows of A */
885   A_loc = ptap->A_loc;
886   ierr  = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr);
887   a_loc = (Mat_SeqAIJ*)(A_loc)->data;
888   ai    = a_loc->i;
889   aj    = a_loc->j;
890 
891   ierr = PetscMalloc((A->cmap->N)*sizeof(PetscScalar),&aval);CHKERRQ(ierr); /* non-scalable!!! */
892   ierr = PetscMemzero(aval,A->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr);
893 
894   for (i=0; i<am; i++) {
895     /* 2-a) put A[i,:] to dense array aval */
896     anz = ai[i+1] - ai[i];
897     adj = aj + ai[i];
898     ada = a_loc->a + ai[i];
899     for (j=0; j<anz; j++) {
900       aval[adj[j]] = ada[j];
901     }
902 
903     /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
904     /*--------------------------------------------------------------*/
905     /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
906     pnz = po->i[i+1] - po->i[i];
907     poJ = po->j + po->i[i];
908     pA  = po->a + po->i[i];
909     for (j=0; j<pnz; j++) {
910       row = poJ[j];
911       cnz = coi[row+1] - coi[row];
912       cj  = coj + coi[row];
913       ca  = coa + coi[row];
914       /* perform dense axpy */
915       valtmp = pA[j];
916       for (k=0; k<cnz; k++) {
917         ca[k] += valtmp*aval[cj[k]];
918       }
919       ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
920     }
921 
922     /* put the value into Cd (diagonal part) */
923     pnz = pd->i[i+1] - pd->i[i];
924     pdJ = pd->j + pd->i[i];
925     pA  = pd->a + pd->i[i];
926     for (j=0; j<pnz; j++) {
927       row = pdJ[j];
928       cnz = bi[row+1] - bi[row];
929       cj  = bj + bi[row];
930       ca  = ba + bi[row];
931       /* perform dense axpy */
932       valtmp = pA[j];
933       for (k=0; k<cnz; k++) {
934         ca[k] += valtmp*aval[cj[k]];
935       }
936       ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
937     }
938 
939     /* zero the current row of Pt*A */
940     aJ = aj + ai[i];
941     for (k=0; k<anz; k++) aval[aJ[k]] = 0.0;
942   }
943 
944   /* 3) send and recv matrix values coa */
945   /*------------------------------------*/
946   buf_ri = merge->buf_ri;
947   buf_rj = merge->buf_rj;
948   len_s  = merge->len_s;
949   ierr   = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
950   ierr   = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
951 
952   ierr = PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);CHKERRQ(ierr);
953   for (proc=0,k=0; proc<size; proc++) {
954     if (!len_s[proc]) continue;
955     i    = merge->owners_co[proc];
956     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
957     k++;
958   }
959   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
960   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
961 
962   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
963   ierr = PetscFree(r_waits);CHKERRQ(ierr);
964   ierr = PetscFree(coa);CHKERRQ(ierr);
965 
966   /* 4) insert local Cseq and received values into Cmpi */
967   /*----------------------------------------------------*/
968   ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr);
969   for (k=0; k<merge->nrecv; k++) {
970     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
971     nrows       = *(buf_ri_k[k]);
972     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
973     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
974   }
975 
976   for (i=0; i<cm; i++) {
977     row  = owners[rank] + i; /* global row index of C_seq */
978     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
979     ba_i = ba + bi[i];
980     bnz  = bi[i+1] - bi[i];
981     /* add received vals into ba */
982     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
983       /* i-th row */
984       if (i == *nextrow[k]) {
985         cnz    = *(nextci[k]+1) - *nextci[k];
986         cj     = buf_rj[k] + *(nextci[k]);
987         ca     = abuf_r[k] + *(nextci[k]);
988         nextcj = 0;
989         for (j=0; nextcj<cnz; j++) {
990           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
991             ba_i[j] += ca[nextcj++];
992           }
993         }
994         nextrow[k]++; nextci[k]++;
995         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
996       }
997     }
998     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
999   }
1000   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1001   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1002 
1003   ierr = PetscFree(ba);CHKERRQ(ierr);
1004   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
1005   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
1006   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1007   ierr = PetscFree(aval);CHKERRQ(ierr);
1008   PetscFunctionReturn(0);
1009 }
1010 
1011 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1012 #undef __FUNCT__
1013 #define __FUNCT__ "MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ"
1014 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat *C)
1015 {
1016   PetscErrorCode      ierr;
1017   Mat                 Cmpi,A_loc,POt,PDt;
1018   Mat_PtAPMPI         *ptap;
1019   PetscFreeSpaceList  free_space=PETSC_NULL,current_space=PETSC_NULL;
1020   Mat_MPIAIJ          *p        =(Mat_MPIAIJ*)P->data,*c;
1021   PetscInt            *pdti,*pdtj,*poti,*potj,*ptJ;
1022   PetscInt            nnz;
1023   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1024   PetscInt            am=A->rmap->n,pn=P->cmap->n;
1025   PetscBT             lnkbt;
1026   MPI_Comm            comm=((PetscObject)A)->comm;
1027   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1028   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
1029   PetscInt            len,proc,*dnz,*onz,*owners;
1030   PetscInt            nzi,*bi,*bj;
1031   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1032   MPI_Request         *swaits,*rwaits;
1033   MPI_Status          *sstatus,rstatus;
1034   Mat_Merge_SeqsToMPI *merge;
1035   PetscInt            *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1036   PetscReal           afill  =1.0,afill_tmp;
1037   PetscInt            rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Crmax;
1038   PetscScalar         *vals;
1039   Mat_SeqAIJ          *a_loc, *pdt,*pot;
1040 
1041   PetscFunctionBegin;
1042   /* check if matrix local sizes are compatible */
1043   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
1044     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
1045   }
1046 
1047   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1048   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1049 
1050   /* create struct Mat_PtAPMPI and attached it to C later */
1051   ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr);
1052 
1053   /* get A_loc by taking all local rows of A */
1054   ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr);
1055 
1056   ptap->A_loc = A_loc;
1057 
1058   a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1059   ai    = a_loc->i;
1060   aj    = a_loc->j;
1061 
1062   /* determine symbolic Co=(p->B)^T*A - send to others */
1063   /*----------------------------------------------------*/
1064   ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr);
1065   pdt  = (Mat_SeqAIJ*)PDt->data;
1066   pdti = pdt->i; pdtj = pdt->j;
1067 
1068   ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr);
1069   pot  = (Mat_SeqAIJ*)POt->data;
1070   poti = pot->i; potj = pot->j;
1071 
1072   /* then, compute symbolic Co = (p->B)^T*A */
1073   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors >= (num of nonzero rows of C_seq) - pn */
1074   ierr   = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr);
1075   coi[0] = 0;
1076 
1077   /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1078   nnz           = fill*(poti[pon] + ai[am]);
1079   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
1080   current_space = free_space;
1081 
1082   /* create and initialize a linked list */
1083   i     = PetscMax(pdt->rmax,pot->rmax);
1084   Crmax = i*a_loc->rmax*size;
1085   if (!Crmax || Crmax > aN) Crmax = aN;
1086   ierr = PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);CHKERRQ(ierr);
1087 
1088   for (i=0; i<pon; i++) {
1089     pnz = poti[i+1] - poti[i];
1090     ptJ = potj + poti[i];
1091     for (j=0; j<pnz; j++) {
1092       row  = ptJ[j]; /* row of A_loc == col of Pot */
1093       anz  = ai[row+1] - ai[row];
1094       Jptr = aj + ai[row];
1095       /* add non-zero cols of AP into the sorted linked list lnk */
1096       ierr = PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
1097     }
1098     nnz = lnk[0];
1099 
1100     /* If free space is not available, double the total space in the list */
1101     if (current_space->local_remaining<nnz) {
1102       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
1103       nspacedouble++;
1104     }
1105 
1106     /* Copy data into free space, and zero out denserows */
1107     ierr = PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
1108 
1109     current_space->array           += nnz;
1110     current_space->local_used      += nnz;
1111     current_space->local_remaining -= nnz;
1112 
1113     coi[i+1] = coi[i] + nnz;
1114   }
1115 
1116   ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr);
1117   ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
1118 
1119   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1120   if (afill_tmp > afill) afill = afill_tmp;
1121 
1122   /* send j-array (coj) of Co to other processors */
1123   /*----------------------------------------------*/
1124   /* determine row ownership */
1125   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
1126   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
1127 
1128   merge->rowmap->n  = pn;
1129   merge->rowmap->bs = 1;
1130 
1131   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
1132   owners = merge->rowmap->range;
1133 
1134   /* determine the number of messages to send, their lengths */
1135   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
1136   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
1137   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
1138 
1139   len_s        = merge->len_s;
1140   merge->nsend = 0;
1141 
1142   ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr);
1143   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
1144 
1145   proc = 0;
1146   for (i=0; i<pon; i++) {
1147     while (prmap[i] >= owners[proc+1]) proc++;
1148     len_si[proc]++;  /* num of rows in Co to be sent to [proc] */
1149     len_s[proc] += coi[i+1] - coi[i];
1150   }
1151 
1152   len          = 0; /* max length of buf_si[] */
1153   owners_co[0] = 0;
1154   for (proc=0; proc<size; proc++) {
1155     owners_co[proc+1] = owners_co[proc] + len_si[proc];
1156     if (len_si[proc]) {
1157       merge->nsend++;
1158       len_si[proc] = 2*(len_si[proc] + 1);
1159       len         += len_si[proc];
1160     }
1161   }
1162 
1163   /* determine the number and length of messages to receive for coi and coj  */
1164   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
1165   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
1166 
1167   /* post the Irecv and Isend of coj */
1168   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
1169   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
1170   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr);
1171   for (proc=0, k=0; proc<size; proc++) {
1172     if (!len_s[proc]) continue;
1173     i    = owners_co[proc];
1174     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
1175     k++;
1176   }
1177 
1178   /* receives and sends of coj are complete */
1179   ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr);
1180   for (i=0; i<merge->nrecv; i++) {
1181     PetscMPIInt icompleted;
1182     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
1183   }
1184   ierr = PetscFree(rwaits);CHKERRQ(ierr);
1185   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
1186 
1187   /* send and recv coi */
1188   /*-------------------*/
1189   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
1190   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
1191   ierr   = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
1192   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
1193   for (proc=0,k=0; proc<size; proc++) {
1194     if (!len_s[proc]) continue;
1195     /* form outgoing message for i-structure:
1196          buf_si[0]:                 nrows to be sent
1197                [1:nrows]:           row index (global)
1198                [nrows+1:2*nrows+1]: i-structure index
1199     */
1200     /*-------------------------------------------*/
1201     nrows       = len_si[proc]/2 - 1;
1202     buf_si_i    = buf_si + nrows+1;
1203     buf_si[0]   = nrows;
1204     buf_si_i[0] = 0;
1205     nrows       = 0;
1206     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1207       nzi               = coi[i+1] - coi[i];
1208       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1209       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
1210       nrows++;
1211     }
1212     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
1213     k++;
1214     buf_si += len_si[proc];
1215   }
1216   i = merge->nrecv;
1217   while (i--) {
1218     PetscMPIInt icompleted;
1219     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
1220   }
1221   ierr = PetscFree(rwaits);CHKERRQ(ierr);
1222   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
1223   ierr = PetscFree(len_si);CHKERRQ(ierr);
1224   ierr = PetscFree(len_ri);CHKERRQ(ierr);
1225   ierr = PetscFree(swaits);CHKERRQ(ierr);
1226   ierr = PetscFree(sstatus);CHKERRQ(ierr);
1227   ierr = PetscFree(buf_s);CHKERRQ(ierr);
1228 
1229   /* compute the local portion of C (mpi mat) */
1230   /*------------------------------------------*/
1231   /* allocate bi array and free space for accumulating nonzero column info */
1232   ierr  = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
1233   bi[0] = 0;
1234 
1235   /* set initial free space to be fill*(nnz(P) + nnz(A)) */
1236   nnz           = fill*(pdti[pn] + poti[pon] + ai[am]);
1237   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
1238   current_space = free_space;
1239 
1240   ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr);
1241   for (k=0; k<merge->nrecv; k++) {
1242     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1243     nrows       = *buf_ri_k[k];
1244     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
1245     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
1246   }
1247 
1248   ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr);
1249   rmax = 0;
1250   for (i=0; i<pn; i++) {
1251     /* add pdt[i,:]*AP into lnk */
1252     pnz = pdti[i+1] - pdti[i];
1253     ptJ = pdtj + pdti[i];
1254     for (j=0; j<pnz; j++) {
1255       row  = ptJ[j];  /* row of AP == col of Pt */
1256       anz  = ai[row+1] - ai[row];
1257       Jptr = aj + ai[row];
1258       /* add non-zero cols of AP into the sorted linked list lnk */
1259       ierr = PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
1260     }
1261 
1262     /* add received col data into lnk */
1263     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1264       if (i == *nextrow[k]) { /* i-th row */
1265         nzi  = *(nextci[k]+1) - *nextci[k];
1266         Jptr = buf_rj[k] + *nextci[k];
1267         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
1268         nextrow[k]++; nextci[k]++;
1269       }
1270     }
1271     nnz = lnk[0];
1272 
1273     /* if free space is not available, make more free space */
1274     if (current_space->local_remaining<nnz) {
1275       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
1276       nspacedouble++;
1277     }
1278     /* copy data into free space, then initialize lnk */
1279     ierr = PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
1280     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
1281 
1282     current_space->array           += nnz;
1283     current_space->local_used      += nnz;
1284     current_space->local_remaining -= nnz;
1285 
1286     bi[i+1] = bi[i] + nnz;
1287     if (nnz > rmax) rmax = nnz;
1288   }
1289   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1290 
1291   ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
1292   ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
1293 
1294   afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
1295   if (afill_tmp > afill) afill = afill_tmp;
1296   ierr = PetscLLCondensedDestroy(lnk,lnkbt);CHKERRQ(ierr);
1297   ierr = MatDestroy(&POt);CHKERRQ(ierr);
1298   ierr = MatDestroy(&PDt);CHKERRQ(ierr);
1299 
1300   /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part   */
1301   /*----------------------------------------------------------------------------------*/
1302   ierr = PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1303   ierr = PetscMemzero(vals,rmax*sizeof(PetscScalar));CHKERRQ(ierr);
1304 
1305   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
1306   ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
1307   ierr = MatSetBlockSizes(Cmpi,P->cmap->bs,A->cmap->bs);CHKERRQ(ierr);
1308   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
1309   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
1310   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
1311   ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr);
1312   for (i=0; i<pn; i++) {
1313     row  = i + rstart;
1314     nnz  = bi[i+1] - bi[i];
1315     Jptr = bj + bi[i];
1316     ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr);
1317   }
1318   ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1319   ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1320   ierr = PetscFree(vals);CHKERRQ(ierr);
1321 
1322   merge->bi        = bi;
1323   merge->bj        = bj;
1324   merge->coi       = coi;
1325   merge->coj       = coj;
1326   merge->buf_ri    = buf_ri;
1327   merge->buf_rj    = buf_rj;
1328   merge->owners_co = owners_co;
1329   merge->destroy   = Cmpi->ops->destroy;
1330   merge->duplicate = Cmpi->ops->duplicate;
1331 
1332   Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
1333   Cmpi->ops->destroy                 = MatDestroy_MPIAIJ_PtAP;
1334 
1335   /* attach the supporting struct to Cmpi for reuse */
1336   c           = (Mat_MPIAIJ*)Cmpi->data;
1337   c->ptap     = ptap;
1338   ptap->api   = PETSC_NULL;
1339   ptap->apj   = PETSC_NULL;
1340   ptap->merge = merge;
1341   ptap->rmax  = rmax;
1342 
1343   *C = Cmpi;
1344 #if defined(PETSC_USE_INFO)
1345   if (bi[pn] != 0) {
1346     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
1347     ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr);
1348   } else {
1349     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
1350   }
1351 #endif
1352   PetscFunctionReturn(0);
1353 }
1354 
1355 #undef __FUNCT__
1356 #define __FUNCT__ "MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_Scalable"
1357 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_Scalable(Mat P,Mat A,Mat C)
1358 {
1359   PetscErrorCode      ierr;
1360   Mat_Merge_SeqsToMPI *merge;
1361   Mat_MPIAIJ          *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
1362   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1363   Mat_PtAPMPI         *ptap;
1364   PetscInt            *adj;
1365   PetscInt            i,j,k,anz,pnz,row,*cj,nexta;
1366   MatScalar           *ada,*ca,valtmp;
1367   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1368   MPI_Comm            comm=((PetscObject)C)->comm;
1369   PetscMPIInt         size,rank,taga,*len_s;
1370   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1371   PetscInt            **buf_ri,**buf_rj;
1372   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
1373   MPI_Request         *s_waits,*r_waits;
1374   MPI_Status          *status;
1375   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
1376   PetscInt            *ai,*aj,*coi,*coj;
1377   PetscInt            *poJ,*pdJ;
1378   Mat                 A_loc;
1379   Mat_SeqAIJ          *a_loc;
1380 
1381   PetscFunctionBegin;
1382   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1383   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1384 
1385   ptap  = c->ptap;
1386   merge = ptap->merge;
1387 
1388   /* 2) compute numeric C_seq = P_loc^T*A_loc */
1389   /*------------------------------------------*/
1390   /* get data from symbolic products */
1391   coi    = merge->coi; coj = merge->coj;
1392   ierr   = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr);
1393   ierr   = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr);
1394   bi     = merge->bi; bj = merge->bj;
1395   owners = merge->rowmap->range;
1396   ierr   = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr);
1397   ierr   = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr);
1398 
1399   /* get A_loc by taking all local rows of A */
1400   A_loc = ptap->A_loc;
1401   ierr  = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr);
1402   a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1403   ai    = a_loc->i;
1404   aj    = a_loc->j;
1405 
1406   for (i=0; i<am; i++) {
1407     anz = ai[i+1] - ai[i];
1408     adj = aj + ai[i];
1409     ada = a_loc->a + ai[i];
1410 
1411     /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1412     /*-------------------------------------------------------------*/
1413     /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1414     pnz = po->i[i+1] - po->i[i];
1415     poJ = po->j + po->i[i];
1416     pA  = po->a + po->i[i];
1417     for (j=0; j<pnz; j++) {
1418       row = poJ[j];
1419       cj  = coj + coi[row];
1420       ca  = coa + coi[row];
1421       /* perform sparse axpy */
1422       nexta  = 0;
1423       valtmp = pA[j];
1424       for (k=0; nexta<anz; k++) {
1425         if (cj[k] == adj[nexta]) {
1426           ca[k] += valtmp*ada[nexta];
1427           nexta++;
1428         }
1429       }
1430       ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr);
1431     }
1432 
1433     /* put the value into Cd (diagonal part) */
1434     pnz = pd->i[i+1] - pd->i[i];
1435     pdJ = pd->j + pd->i[i];
1436     pA  = pd->a + pd->i[i];
1437     for (j=0; j<pnz; j++) {
1438       row = pdJ[j];
1439       cj  = bj + bi[row];
1440       ca  = ba + bi[row];
1441       /* perform sparse axpy */
1442       nexta  = 0;
1443       valtmp = pA[j];
1444       for (k=0; nexta<anz; k++) {
1445         if (cj[k] == adj[nexta]) {
1446           ca[k] += valtmp*ada[nexta];
1447           nexta++;
1448         }
1449       }
1450       ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr);
1451     }
1452   }
1453 
1454   /* 3) send and recv matrix values coa */
1455   /*------------------------------------*/
1456   buf_ri = merge->buf_ri;
1457   buf_rj = merge->buf_rj;
1458   len_s  = merge->len_s;
1459   ierr   = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
1460   ierr   = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
1461 
1462   ierr = PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);CHKERRQ(ierr);
1463   for (proc=0,k=0; proc<size; proc++) {
1464     if (!len_s[proc]) continue;
1465     i    = merge->owners_co[proc];
1466     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
1467     k++;
1468   }
1469   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
1470   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
1471 
1472   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
1473   ierr = PetscFree(r_waits);CHKERRQ(ierr);
1474   ierr = PetscFree(coa);CHKERRQ(ierr);
1475 
1476   /* 4) insert local Cseq and received values into Cmpi */
1477   /*----------------------------------------------------*/
1478   ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr);
1479   for (k=0; k<merge->nrecv; k++) {
1480     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1481     nrows       = *(buf_ri_k[k]);
1482     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
1483     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
1484   }
1485 
1486   for (i=0; i<cm; i++) {
1487     row  = owners[rank] + i; /* global row index of C_seq */
1488     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
1489     ba_i = ba + bi[i];
1490     bnz  = bi[i+1] - bi[i];
1491     /* add received vals into ba */
1492     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1493       /* i-th row */
1494       if (i == *nextrow[k]) {
1495         cnz    = *(nextci[k]+1) - *nextci[k];
1496         cj     = buf_rj[k] + *(nextci[k]);
1497         ca     = abuf_r[k] + *(nextci[k]);
1498         nextcj = 0;
1499         for (j=0; nextcj<cnz; j++) {
1500           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1501             ba_i[j] += ca[nextcj++];
1502           }
1503         }
1504         nextrow[k]++; nextci[k]++;
1505         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1506       }
1507     }
1508     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
1509   }
1510   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1511   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1512 
1513   ierr = PetscFree(ba);CHKERRQ(ierr);
1514   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
1515   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
1516   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1517   PetscFunctionReturn(0);
1518 }
1519 
1520 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1521 #undef __FUNCT__
1522 #define __FUNCT__ "MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable"
1523 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(Mat P,Mat A,PetscReal fill,Mat *C)
1524 {
1525   PetscErrorCode      ierr;
1526   Mat                 Cmpi,A_loc,POt,PDt;
1527   Mat_PtAPMPI         *ptap;
1528   PetscFreeSpaceList  free_space=PETSC_NULL,current_space=PETSC_NULL;
1529   Mat_MPIAIJ          *p        =(Mat_MPIAIJ*)P->data,*c;
1530   PetscInt            *pdti,*pdtj,*poti,*potj,*ptJ;
1531   PetscInt            nnz;
1532   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1533   PetscInt            am  =A->rmap->n,pn=P->cmap->n;
1534   MPI_Comm            comm=((PetscObject)A)->comm;
1535   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1536   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
1537   PetscInt            len,proc,*dnz,*onz,*owners;
1538   PetscInt            nzi,*bi,*bj;
1539   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1540   MPI_Request         *swaits,*rwaits;
1541   MPI_Status          *sstatus,rstatus;
1542   Mat_Merge_SeqsToMPI *merge;
1543   PetscInt            *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1544   PetscReal           afill  =1.0,afill_tmp;
1545   PetscInt            rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Crmax;
1546   PetscScalar         *vals;
1547   Mat_SeqAIJ          *a_loc, *pdt,*pot;
1548 
1549   PetscFunctionBegin;
1550   /* check if matrix local sizes are compatible */
1551   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
1552     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
1553   }
1554 
1555   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1556   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1557 
1558   /* create struct Mat_PtAPMPI and attached it to C later */
1559   ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr);
1560 
1561   /* get A_loc by taking all local rows of A */
1562   ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr);
1563 
1564   ptap->A_loc = A_loc;
1565   a_loc       = (Mat_SeqAIJ*)(A_loc)->data;
1566   ai          = a_loc->i;
1567   aj          = a_loc->j;
1568 
1569   /* determine symbolic Co=(p->B)^T*A - send to others */
1570   /*----------------------------------------------------*/
1571   ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr);
1572   pdt  = (Mat_SeqAIJ*)PDt->data;
1573   pdti = pdt->i; pdtj = pdt->j;
1574 
1575   ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr);
1576   pot  = (Mat_SeqAIJ*)POt->data;
1577   poti = pot->i; potj = pot->j;
1578 
1579   /* then, compute symbolic Co = (p->B)^T*A */
1580   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1581                          >= (num of nonzero rows of C_seq) - pn */
1582   ierr   = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr);
1583   coi[0] = 0;
1584 
1585   /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1586   nnz           = fill*(poti[pon] + ai[am]);
1587   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
1588   current_space = free_space;
1589 
1590   /* create and initialize a linked list */
1591   i     = PetscMax(pdt->rmax,pot->rmax);
1592   Crmax = i*a_loc->rmax*size; /* non-scalable! */
1593   if (!Crmax || Crmax > aN) Crmax = aN;
1594   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
1595 
1596   for (i=0; i<pon; i++) {
1597     pnz = poti[i+1] - poti[i];
1598     ptJ = potj + poti[i];
1599     for (j=0; j<pnz; j++) {
1600       row  = ptJ[j]; /* row of A_loc == col of Pot */
1601       anz  = ai[row+1] - ai[row];
1602       Jptr = aj + ai[row];
1603       /* add non-zero cols of AP into the sorted linked list lnk */
1604       ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr);
1605     }
1606     nnz = lnk[0];
1607 
1608     /* If free space is not available, double the total space in the list */
1609     if (current_space->local_remaining<nnz) {
1610       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
1611       nspacedouble++;
1612     }
1613 
1614     /* Copy data into free space, and zero out denserows */
1615     ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr);
1616 
1617     current_space->array           += nnz;
1618     current_space->local_used      += nnz;
1619     current_space->local_remaining -= nnz;
1620 
1621     coi[i+1] = coi[i] + nnz;
1622   }
1623 
1624   ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr);
1625   ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
1626 
1627   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1628   if (afill_tmp > afill) afill = afill_tmp;
1629 
1630   /* send j-array (coj) of Co to other processors */
1631   /*----------------------------------------------*/
1632   /* determine row ownership */
1633   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
1634   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
1635 
1636   merge->rowmap->n  = pn;
1637   merge->rowmap->bs = 1;
1638 
1639   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
1640   owners = merge->rowmap->range;
1641 
1642   /* determine the number of messages to send, their lengths */
1643   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
1644   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
1645   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
1646 
1647   len_s        = merge->len_s;
1648   merge->nsend = 0;
1649 
1650   ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr);
1651   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
1652 
1653   proc = 0;
1654   for (i=0; i<pon; i++) {
1655     while (prmap[i] >= owners[proc+1]) proc++;
1656     len_si[proc]++;  /* num of rows in Co to be sent to [proc] */
1657     len_s[proc] += coi[i+1] - coi[i];
1658   }
1659 
1660   len          = 0; /* max length of buf_si[] */
1661   owners_co[0] = 0;
1662   for (proc=0; proc<size; proc++) {
1663     owners_co[proc+1] = owners_co[proc] + len_si[proc];
1664     if (len_si[proc]) {
1665       merge->nsend++;
1666       len_si[proc] = 2*(len_si[proc] + 1);
1667       len         += len_si[proc];
1668     }
1669   }
1670 
1671   /* determine the number and length of messages to receive for coi and coj  */
1672   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
1673   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
1674 
1675   /* post the Irecv and Isend of coj */
1676   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
1677   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
1678   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr);
1679   for (proc=0, k=0; proc<size; proc++) {
1680     if (!len_s[proc]) continue;
1681     i    = owners_co[proc];
1682     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
1683     k++;
1684   }
1685 
1686   /* receives and sends of coj are complete */
1687   ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr);
1688   for (i=0; i<merge->nrecv; i++) {
1689     PetscMPIInt icompleted;
1690     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
1691   }
1692   ierr = PetscFree(rwaits);CHKERRQ(ierr);
1693   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
1694 
1695   /* send and recv coi */
1696   /*-------------------*/
1697   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
1698   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
1699   ierr   = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
1700   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
1701   for (proc=0,k=0; proc<size; proc++) {
1702     if (!len_s[proc]) continue;
1703     /* form outgoing message for i-structure:
1704          buf_si[0]:                 nrows to be sent
1705                [1:nrows]:           row index (global)
1706                [nrows+1:2*nrows+1]: i-structure index
1707     */
1708     /*-------------------------------------------*/
1709     nrows       = len_si[proc]/2 - 1;
1710     buf_si_i    = buf_si + nrows+1;
1711     buf_si[0]   = nrows;
1712     buf_si_i[0] = 0;
1713     nrows       = 0;
1714     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1715       nzi               = coi[i+1] - coi[i];
1716       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
1717       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
1718       nrows++;
1719     }
1720     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
1721     k++;
1722     buf_si += len_si[proc];
1723   }
1724   i = merge->nrecv;
1725   while (i--) {
1726     PetscMPIInt icompleted;
1727     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
1728   }
1729   ierr = PetscFree(rwaits);CHKERRQ(ierr);
1730   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
1731   ierr = PetscFree(len_si);CHKERRQ(ierr);
1732   ierr = PetscFree(len_ri);CHKERRQ(ierr);
1733   ierr = PetscFree(swaits);CHKERRQ(ierr);
1734   ierr = PetscFree(sstatus);CHKERRQ(ierr);
1735   ierr = PetscFree(buf_s);CHKERRQ(ierr);
1736 
1737   /* compute the local portion of C (mpi mat) */
1738   /*------------------------------------------*/
1739   /* allocate bi array and free space for accumulating nonzero column info */
1740   ierr  = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
1741   bi[0] = 0;
1742 
1743   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1744   nnz           = fill*(pdti[pn] + poti[pon] + ai[am]);
1745   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
1746   current_space = free_space;
1747 
1748   ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr);
1749   for (k=0; k<merge->nrecv; k++) {
1750     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1751     nrows       = *buf_ri_k[k];
1752     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
1753     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recieved i-structure  */
1754   }
1755 
1756   ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr);
1757   rmax = 0;
1758   for (i=0; i<pn; i++) {
1759     /* add pdt[i,:]*AP into lnk */
1760     pnz = pdti[i+1] - pdti[i];
1761     ptJ = pdtj + pdti[i];
1762     for (j=0; j<pnz; j++) {
1763       row  = ptJ[j];  /* row of AP == col of Pt */
1764       anz  = ai[row+1] - ai[row];
1765       Jptr = aj + ai[row];
1766       /* add non-zero cols of AP into the sorted linked list lnk */
1767       ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr);
1768     }
1769 
1770     /* add received col data into lnk */
1771     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1772       if (i == *nextrow[k]) { /* i-th row */
1773         nzi  = *(nextci[k]+1) - *nextci[k];
1774         Jptr = buf_rj[k] + *nextci[k];
1775         ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
1776         nextrow[k]++; nextci[k]++;
1777       }
1778     }
1779     nnz = lnk[0];
1780 
1781     /* if free space is not available, make more free space */
1782     if (current_space->local_remaining<nnz) {
1783       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
1784       nspacedouble++;
1785     }
1786     /* copy data into free space, then initialize lnk */
1787     ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr);
1788     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
1789 
1790     current_space->array           += nnz;
1791     current_space->local_used      += nnz;
1792     current_space->local_remaining -= nnz;
1793 
1794     bi[i+1] = bi[i] + nnz;
1795     if (nnz > rmax) rmax = nnz;
1796   }
1797   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1798 
1799   ierr      = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
1800   ierr      = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
1801   afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
1802   if (afill_tmp > afill) afill = afill_tmp;
1803   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
1804   ierr = MatDestroy(&POt);CHKERRQ(ierr);
1805   ierr = MatDestroy(&PDt);CHKERRQ(ierr);
1806 
1807   /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part   */
1808   /*----------------------------------------------------------------------------------*/
1809   ierr = PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1810   ierr = PetscMemzero(vals,rmax*sizeof(PetscScalar));CHKERRQ(ierr);
1811 
1812   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
1813   ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
1814   ierr = MatSetBlockSizes(Cmpi,P->cmap->bs,A->cmap->bs);CHKERRQ(ierr);
1815   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
1816   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
1817   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
1818   ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr);
1819   for (i=0; i<pn; i++) {
1820     row  = i + rstart;
1821     nnz  = bi[i+1] - bi[i];
1822     Jptr = bj + bi[i];
1823     ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr);
1824   }
1825   ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1826   ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1827   ierr = PetscFree(vals);CHKERRQ(ierr);
1828 
1829   merge->bi        = bi;
1830   merge->bj        = bj;
1831   merge->coi       = coi;
1832   merge->coj       = coj;
1833   merge->buf_ri    = buf_ri;
1834   merge->buf_rj    = buf_rj;
1835   merge->owners_co = owners_co;
1836   merge->destroy   = Cmpi->ops->destroy;
1837   merge->duplicate = Cmpi->ops->duplicate;
1838 
1839   Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_Scalable;
1840   Cmpi->ops->destroy                 = MatDestroy_MPIAIJ_PtAP;
1841 
1842   /* attach the supporting struct to Cmpi for reuse */
1843   c = (Mat_MPIAIJ*)Cmpi->data;
1844 
1845   c->ptap     = ptap;
1846   ptap->api   = PETSC_NULL;
1847   ptap->apj   = PETSC_NULL;
1848   ptap->merge = merge;
1849   ptap->rmax  = rmax;
1850   ptap->apa   = PETSC_NULL;
1851 
1852   *C = Cmpi;
1853 #if defined(PETSC_USE_INFO)
1854   if (bi[pn] != 0) {
1855     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
1856     ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr);
1857   } else {
1858     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
1859   }
1860 #endif
1861   PetscFunctionReturn(0);
1862 }
1863