xref: /petsc/src/mat/impls/aij/mpi/mpimatmatmult.c (revision a63bb30eff8b1dff6ddce5531d6230e04f6fd10b)
1 
2 /*
3   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4           C = A * B
5 */
6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
7 #include <../src/mat/utils/freespace.h>
8 #include <../src/mat/impls/aij/mpi/mpiaij.h>
9 #include <petscbt.h>
10 #include <../src/mat/impls/dense/mpi/mpidense.h>
11 /*
12 #define DEBUG_MATMATMULT
13  */
14 
15 #undef __FUNCT__
16 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ"
17 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
18 {
19   PetscErrorCode ierr;
20 
21   PetscFunctionBegin;
22   if (scall == MAT_INITIAL_MATRIX){
23     ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr);
24     ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);
25     ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr);
26   }
27   ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr);
28   ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr);
29   ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr);
30   PetscFunctionReturn(0);
31 }
32 
33 #undef __FUNCT__
34 #define __FUNCT__ "PetscContainerDestroy_Mat_MatMatMultMPI"
35 PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr)
36 {
37   PetscErrorCode       ierr;
38   Mat_MatMatMultMPI    *mult=(Mat_MatMatMultMPI*)ptr;
39 
40   PetscFunctionBegin;
41   ierr = ISDestroy(&mult->isrowa);CHKERRQ(ierr);
42   ierr = ISDestroy(&mult->isrowb);CHKERRQ(ierr);
43   ierr = ISDestroy(&mult->iscolb);CHKERRQ(ierr);
44   ierr = MatDestroy(&mult->C_seq);CHKERRQ(ierr);
45   ierr = MatDestroy(&mult->A_loc);CHKERRQ(ierr);
46   ierr = MatDestroy(&mult->B_seq);CHKERRQ(ierr);
47   ierr = PetscFree(mult);CHKERRQ(ierr);
48   PetscFunctionReturn(0);
49 }
50 
51 #undef __FUNCT__
52 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult"
53 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
54 {
55   PetscErrorCode ierr;
56   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
57   Mat_PtAPMPI    *ptap=a->ptap;
58 
59   PetscFunctionBegin;
60   ierr = PetscFree2(ptap->startsj,ptap->startsj_r);CHKERRQ(ierr);
61   ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
62   ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
63   ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
64   ierr = PetscFree(ptap->api);CHKERRQ(ierr);
65   ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
66   ierr = PetscFree(ptap->apa);CHKERRQ(ierr);
67   ierr = ptap->destroy(A);CHKERRQ(ierr);
68   ierr = PetscFree(ptap);CHKERRQ(ierr);
69   PetscFunctionReturn(0);
70 }
71 
72 #undef __FUNCT__
73 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult_32"
74 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult_32(Mat A)
75 {
76   PetscErrorCode     ierr;
77   PetscContainer     container;
78   Mat_MatMatMultMPI  *mult=PETSC_NULL;
79 
80   PetscFunctionBegin;
81   ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
82   if (!container) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_PLIB,"Container does not exist");
83   ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
84   A->ops->destroy   = mult->destroy;
85   A->ops->duplicate = mult->duplicate;
86   if (A->ops->destroy) {
87     ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
88   }
89   ierr = PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);CHKERRQ(ierr);
90   PetscFunctionReturn(0);
91 }
92 
93 #undef __FUNCT__
94 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult_32"
95 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult_32(Mat A, MatDuplicateOption op, Mat *M)
96 {
97   PetscErrorCode     ierr;
98   Mat_MatMatMultMPI  *mult;
99   PetscContainer     container;
100 
101   PetscFunctionBegin;
102   ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
103   if (!container) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_PLIB,"Container does not exist");
104   ierr  = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
105   /* Note: the container is not duplicated, because it requires deep copying of
106      several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()).
107      These data sets are only used for repeated calling of MatMatMultNumeric().
108      *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */
109   ierr = (*mult->duplicate)(A,op,M);CHKERRQ(ierr);
110   (*M)->ops->destroy   = mult->destroy;   /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */
111   (*M)->ops->duplicate = mult->duplicate; /* = MatDuplicate_MPIAIJ */
112   PetscFunctionReturn(0);
113 }
114 
115 #undef __FUNCT__
116 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult"
117 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M)
118 {
119   PetscErrorCode     ierr;
120   Mat_MPIAIJ         *a=(Mat_MPIAIJ*)A->data;
121   Mat_PtAPMPI        *ptap=a->ptap;
122 
123   PetscFunctionBegin;
124   ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr);
125   (*M)->ops->destroy   = ptap->destroy;   /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */
126   (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */
127   PetscFunctionReturn(0);
128 }
129 
130 #undef __FUNCT__
131 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ"
132 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
133 {
134   PetscErrorCode     ierr;
135   Mat_MPIAIJ         *a=(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
136   Mat_SeqAIJ         *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
137   Mat_SeqAIJ         *cd=(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
138   PetscInt           *adi=ad->i,*adj,*aoi=ao->i,*aoj;
139   PetscScalar        *ada,*aoa,*cda=cd->a,*coa=co->a;
140   Mat_SeqAIJ         *p_loc,*p_oth;
141   PetscInt           *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
142   PetscScalar        *pa_loc,*pa_oth,*pa,*apa,valtmp,*ca;
143   PetscInt           cm=C->rmap->n,anz,pnz;
144   Mat_PtAPMPI        *ptap=c->ptap;
145   PetscInt           *api,*apj,*apJ,i,j,k,row;
146   PetscInt           cstart=C->cmap->rstart;
147   PetscInt           cdnz,conz,k0,k1;
148 #if defined(DEBUG_MATMATMULT)
149   PetscMPIInt        rank=a->rank;
150 #endif
151 
152   PetscFunctionBegin;
153 #if defined(DEBUG_MATMATMULT)
154   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultNumeric_MPIAIJ_MPIAIJ()...\n",rank);
155 #endif
156 
157   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
158   /*-----------------------------------------------------*/
159   /* update numerical values of P_oth and P_loc */
160   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
161   ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
162 #if defined(DEBUG_MATMATMULT)
163   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] got P_oth and P_loc...\n",rank);
164 #endif
165 
166   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
167   /*----------------------------------------------------------*/
168   /* get data from symbolic products */
169   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
170   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
171   pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a;
172   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
173 
174   /* get apa for storing dense row A[i,:]*P */
175   apa = ptap->apa;
176 
177   api = ptap->api;
178   apj = ptap->apj;
179   for (i=0; i<cm; i++) {
180     /* diagonal portion of A */
181     anz = adi[i+1] - adi[i];
182     adj = ad->j + adi[i];
183     ada = ad->a + adi[i];
184     for (j=0; j<anz; j++) {
185       row = adj[j];
186       pnz = pi_loc[row+1] - pi_loc[row];
187       pj  = pj_loc + pi_loc[row];
188       pa  = pa_loc + pi_loc[row];
189 
190       /* perform dense axpy */
191       valtmp = ada[j];
192       for (k=0; k<pnz; k++){
193         apa[pj[k]] += valtmp*pa[k];
194       }
195       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
196     }
197 
198     /* off-diagonal portion of A */
199     anz = aoi[i+1] - aoi[i];
200     aoj = ao->j + aoi[i];
201     aoa = ao->a + aoi[i];
202     for (j=0; j<anz; j++) {
203       row = aoj[j];
204       pnz = pi_oth[row+1] - pi_oth[row];
205       pj  = pj_oth + pi_oth[row];
206       pa  = pa_oth + pi_oth[row];
207 
208       /* perform dense axpy */
209       valtmp = aoa[j];
210       for (k=0; k<pnz; k++){
211         apa[pj[k]] += valtmp*pa[k];
212       }
213       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
214     }
215 
216     /* set values in C */
217     apJ = apj + api[i];
218     cdnz = cd->i[i+1] - cd->i[i];
219     conz = co->i[i+1] - co->i[i];
220 
221     /* 1st off-diagoanl part of C */
222     ca = coa + co->i[i];
223     k  = 0;
224     for (k0=0; k0<conz; k0++){
225       if (apJ[k] >= cstart) break;
226       ca[k0]      = apa[apJ[k]];
227       apa[apJ[k]] = 0.0;
228       k++;
229     }
230 
231     /* diagonal part of C */
232     ca = cda + cd->i[i];
233     for (k1=0; k1<cdnz; k1++){
234       ca[k1]      = apa[apJ[k]];
235       apa[apJ[k]] = 0.0;
236       k++;
237     }
238 
239     /* 2nd off-diagoanl part of C */
240     ca = coa + co->i[i];
241     for (; k0<conz; k0++){
242       ca[k0]      = apa[apJ[k]];
243       apa[apJ[k]] = 0.0;
244       k++;
245     }
246   }
247   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
248   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
249   PetscFunctionReturn(0);
250 }
251 
252 #undef __FUNCT__
253 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ"
254 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
255 {
256   PetscErrorCode       ierr;
257   MPI_Comm             comm=((PetscObject)A)->comm;
258   Mat                  Cmpi;
259   Mat_PtAPMPI          *ptap;
260   PetscFreeSpaceList   free_space=PETSC_NULL,current_space=PETSC_NULL;
261   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data,*c;
262   Mat_SeqAIJ           *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
263   PetscInt             *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
264   PetscInt             *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
265   PetscInt             *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi;
266   PetscInt             am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
267   PetscBT              lnkbt;
268   PetscScalar          *apa;
269   PetscReal            afill;
270   PetscBool            scalable=PETSC_FALSE;
271   PetscInt             nlnk_max,armax,prmax;
272 #if defined(DEBUG_MATMATMULT)
273   PetscMPIInt          rank=a->rank;
274 #endif
275 
276   PetscFunctionBegin;
277   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend){
278     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
279   }
280 
281   ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr);
282 
283     ierr = PetscOptionsBool("-matmatmult_32","Use a scalable but slower C=A*B","",scalable,&scalable,PETSC_NULL);CHKERRQ(ierr);
284     if (scalable){
285       ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable_32(A,P,fill,C);;CHKERRQ(ierr);
286       PetscFunctionReturn(0);
287     }
288     ierr = PetscOptionsBool("-matmatmult_scalable","Use a scalable but slower C=A*B","",scalable,&scalable,PETSC_NULL);CHKERRQ(ierr);
289     if (scalable){
290       ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(A,P,fill,C);;CHKERRQ(ierr);
291       PetscFunctionReturn(0);
292     }
293   ierr = PetscOptionsEnd();CHKERRQ(ierr);
294 
295 #if defined(DEBUG_MATMATMULT)
296   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultSymbolic_MPIAIJ_MPIAIJ()...\n",rank);
297 #endif
298 
299   /* create struct Mat_PtAPMPI and attached it to C later */
300   ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr);
301 
302   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
303   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
304 #if defined(DEBUG_MATMATMULT)
305   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] P_oth is done...\n",rank);
306 #endif
307   /* get P_loc by taking all local rows of P */
308   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
309 
310   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
311   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
312   pi_loc = p_loc->i; pj_loc = p_loc->j;
313   pi_oth = p_oth->i; pj_oth = p_oth->j;
314 
315 #if defined(DEBUG_MATMATMULT)
316   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] P_loc is done, Annz %D * P_locnnz %D = %D...\n",rank,ad->i[am]+ao->i[am],p_loc->rmax,(ad->i[am]+ao->i[am])*p_loc->rmax);
317 #endif
318 
319   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
320   /*-------------------------------------------------------------------*/
321   ierr  = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr);
322   ptap->api = api;
323   api[0]    = 0;
324 
325   /* create and initialize a linked list */
326   armax = ad->rmax+ao->rmax;
327   prmax = PetscMax(p_loc->rmax,p_oth->rmax);
328   nlnk_max = armax*prmax;
329   if (!nlnk_max || nlnk_max > pN) nlnk_max = pN;
330 #if defined(DEBUG_MATMATMULT)
331   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] pN %d; nlnk_max %d; Armax %d+%d=%d; Prmax Max(%d,%d)=%d\n",rank,pN,nlnk_max,ad->rmax,ao->rmax,armax,p_loc->rmax,p_oth->rmax,prmax);
332 #endif
333   ierr = PetscLLCondensedCreate(nlnk_max,pN,&lnk,&lnkbt);CHKERRQ(ierr);
334 
335   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
336   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
337   current_space = free_space;
338 
339   ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr);
340   for (i=0; i<am; i++) {
341     apnz = 0;
342     /* diagonal portion of A */
343     nzi = adi[i+1] - adi[i];
344     for (j=0; j<nzi; j++){
345       row = *adj++;
346       pnz = pi_loc[row+1] - pi_loc[row];
347       Jptr  = pj_loc + pi_loc[row];
348       /* add non-zero cols of P into the sorted linked list lnk */
349       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
350     }
351     /* off-diagonal portion of A */
352     nzi = aoi[i+1] - aoi[i];
353     for (j=0; j<nzi; j++){
354       row = *aoj++;
355       pnz = pi_oth[row+1] - pi_oth[row];
356       Jptr  = pj_oth + pi_oth[row];
357       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
358     }
359 
360     apnz     = lnk[0];
361     api[i+1] = api[i] + apnz;
362 
363     /* if free space is not available, double the total space in the list */
364     if (current_space->local_remaining<apnz) {
365       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
366       nspacedouble++;
367     }
368 
369     /* Copy data into free space, then initialize lnk */
370     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
371     ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr);
372     current_space->array           += apnz;
373     current_space->local_used      += apnz;
374     current_space->local_remaining -= apnz;
375   }
376 
377   /* Allocate space for apj, initialize apj, and */
378   /* destroy list of free space and other temporary array(s) */
379   ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr);
380   apj  = ptap->apj;
381   ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr);
382   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
383 #if defined(DEBUG_MATMATMULT)
384   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] AP is done...\n",rank);
385 #endif
386 
387   /* malloc apa to store dense row A[i,:]*P */
388   ierr = PetscMalloc(pN*sizeof(PetscScalar),&apa);CHKERRQ(ierr);
389   ierr = PetscMemzero(apa,pN*sizeof(PetscScalar));CHKERRQ(ierr);
390   ptap->apa = apa;
391 #if defined(DEBUG_MATMATMULT)
392   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] Malloc apa pN %D is done...\n",rank,pN);
393 #endif
394 
395   /* create and assemble symbolic parallel matrix Cmpi */
396   /*----------------------------------------------------*/
397   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
398   ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
399   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
400   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
401   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
402   ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr);
403   for (i=0; i<am; i++){
404     row  = i + rstart;
405     apnz = api[i+1] - api[i];
406     ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr);
407     apj += apnz;
408   }
409   ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
410   ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
411 
412   ptap->destroy             = Cmpi->ops->destroy;
413   ptap->duplicate           = Cmpi->ops->duplicate;
414   Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
415   Cmpi->ops->destroy        = MatDestroy_MPIAIJ_MatMatMult;
416   Cmpi->ops->duplicate      = MatDuplicate_MPIAIJ_MatMatMult;
417 
418   /* attach the supporting struct to Cmpi for reuse */
419   c = (Mat_MPIAIJ*)Cmpi->data;
420   c->ptap  = ptap;
421 
422   *C = Cmpi;
423 
424   /* set MatInfo */
425   afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]) + 1.e-5;
426   if (afill < 1.0) afill = 1.0;
427   Cmpi->info.mallocs           = nspacedouble;
428   Cmpi->info.fill_ratio_given  = fill;
429   Cmpi->info.fill_ratio_needed = afill;
430 
431 #if defined(PETSC_USE_INFO)
432   if (api[am]) {
433     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
434     ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr);
435   } else {
436     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
437   }
438 #endif
439   PetscFunctionReturn(0);
440 }
441 
442 /* implementation used in PETSc-3.2 */
443 /* This routine is called ONLY in the case of reusing previously computed symbolic C */
444 #undef __FUNCT__
445 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable_32"
446 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable_32(Mat A,Mat B,Mat C)
447 {
448   PetscErrorCode     ierr;
449   Mat                *seq;
450   Mat_MatMatMultMPI  *mult;
451   PetscContainer     container;
452 
453   PetscFunctionBegin;
454   ierr = PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr);
455   if (!container) SETERRQ(((PetscObject)A)->comm,PETSC_ERR_PLIB,"Container does not exist");
456   ierr  = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
457 
458   if (mult->skipNumeric){ /* first numeric product is done during symbolic product */
459     mult->skipNumeric = PETSC_FALSE;
460     PetscFunctionReturn(0);
461   }
462 #if defined(DEBUG_MATMATMULT)
463   PetscMPIInt rank;
464   MPI_Comm    comm = ((PetscObject)C)->comm;
465   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
466   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
467   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable_32()...\n",rank);
468 #endif
469 
470   seq = &mult->B_seq;
471   ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr);
472   mult->B_seq = *seq;
473 
474   seq = &mult->A_loc;
475   ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr);
476   mult->A_loc = *seq;
477 
478   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(mult->A_loc,mult->B_seq,mult->C_seq);CHKERRQ(ierr);
479 
480   ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr);
481   ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr);
482   PetscFunctionReturn(0);
483 }
484 
485 /* numeric product is computed as well */
486 #undef __FUNCT__
487 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable_32"
488 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable_32(Mat A,Mat B,PetscReal fill,Mat *C)
489 {
490   PetscErrorCode     ierr;
491   Mat_MatMatMultMPI  *mult;
492   PetscContainer     container;
493   Mat                AB,*seq;
494   Mat_MPIAIJ         *a=(Mat_MPIAIJ*)A->data;
495   PetscInt           *idx,i,start,ncols,nzA,nzB,*cmap,imark;
496 #if defined(DEBUG_MATMATMULT)
497   MPI_Comm             comm = ((PetscObject)A)->comm;
498   PetscMPIInt          rank=a->rank;
499 #endif
500 
501   PetscFunctionBegin;
502 #if defined(DEBUG_MATMATMULT)
503   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable_32()...\n",rank);
504 #endif
505   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
506     SETERRQ4(((PetscObject)A)->comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
507   }
508 
509   ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr);
510 
511   /* get isrowb: nonzero col of A */
512   start = A->cmap->rstart;
513   cmap  = a->garray;
514   nzA   = a->A->cmap->n;
515   nzB   = a->B->cmap->n;
516   ierr  = PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);CHKERRQ(ierr);
517   ncols = 0;
518   for (i=0; i<nzB; i++) {  /* row < local row index */
519     if (cmap[i] < start) idx[ncols++] = cmap[i];
520     else break;
521   }
522   imark = i;
523   for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
524   for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
525   ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&mult->isrowb);CHKERRQ(ierr);
526   ierr = ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&mult->iscolb);CHKERRQ(ierr);
527 
528   /*  get isrowa: all local rows of A */
529   ierr = ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,&mult->isrowa);CHKERRQ(ierr);
530 
531   /* Below should go to MatMatMultNumeric_MPIAIJ_MPIAIJ() - How to generate C there? */
532   /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */
533   ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr);
534   mult->B_seq = *seq;
535   ierr = PetscFree(seq);CHKERRQ(ierr);
536 #if defined(DEBUG_MATMATMULT)
537   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] B_seq is done...\n",rank);
538 #endif
539 
540   /*  create a seq matrix A_seq = submatrix of A by taking all local rows of A */
541   ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr);
542   mult->A_loc = *seq;
543   ierr = PetscFree(seq);CHKERRQ(ierr);
544 #if defined(DEBUG_MATMATMULT)
545   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] A_loc is done...\n",rank);
546 #endif
547 
548   /* compute C_seq = A_seq * B_seq */
549   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ_Scalable(mult->A_loc,mult->B_seq,fill,&mult->C_seq);CHKERRQ(ierr);
550 #if defined(DEBUG_MATMATMULT)
551   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
552   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] C_seq Symbolic is done...\n",rank);
553 #endif
554   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(mult->A_loc,mult->B_seq,mult->C_seq);CHKERRQ(ierr);
555 #if defined(DEBUG_MATMATMULT)
556   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] C_seq Numeric is done...\n",rank);
557 #endif
558 
559   /* create mpi matrix C by concatinating C_seq */
560   ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); /* prevent C_seq being destroyed by MatMerge() */
561   ierr = MatMergeSymbolic(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,&AB);CHKERRQ(ierr);
562   ierr = MatMergeNumeric(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,AB);CHKERRQ(ierr);
563 #if defined(DEBUG_MATMATMULT)
564   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] Merge is done...\n",rank);
565 #endif
566 
567   /* attach the supporting struct to C for reuse of symbolic C */
568   ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
569   ierr = PetscContainerSetPointer(container,mult);CHKERRQ(ierr);
570   ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr);
571   ierr = PetscObjectCompose((PetscObject)AB,"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr);
572   ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
573   mult->skipNumeric  = PETSC_TRUE; /* a numeric product is done here */
574   mult->destroy      = AB->ops->destroy;
575   mult->duplicate    = AB->ops->duplicate;
576   AB->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable_32;
577   AB->ops->destroy        = MatDestroy_MPIAIJ_MatMatMult_32;
578   AB->ops->duplicate      = MatDuplicate_MPIAIJ_MatMatMult_32;
579   AB->ops->matmult        = MatMatMult_MPIAIJ_MPIAIJ;
580   *C                      = AB;
581   PetscFunctionReturn(0);
582 }
583 
584 #undef __FUNCT__
585 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense"
586 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
587 {
588   PetscErrorCode ierr;
589 
590   PetscFunctionBegin;
591   if (scall == MAT_INITIAL_MATRIX){
592     ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr);
593   }
594   ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr);
595   PetscFunctionReturn(0);
596 }
597 
598 typedef struct {
599   Mat         workB;
600   PetscScalar *rvalues,*svalues;
601   MPI_Request *rwaits,*swaits;
602 } MPIAIJ_MPIDense;
603 
604 #undef __FUNCT__
605 #define __FUNCT__ "MPIAIJ_MPIDenseDestroy"
606 PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx)
607 {
608   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
609   PetscErrorCode  ierr;
610 
611   PetscFunctionBegin;
612   ierr = MatDestroy(&contents->workB);CHKERRQ(ierr);
613   ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr);
614   ierr = PetscFree(contents);CHKERRQ(ierr);
615   PetscFunctionReturn(0);
616 }
617 
618 #undef __FUNCT__
619 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense"
620 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
621 {
622   PetscErrorCode         ierr;
623   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*) A->data;
624   PetscInt               nz = aij->B->cmap->n;
625   PetscContainer         container;
626   MPIAIJ_MPIDense        *contents;
627   VecScatter             ctx = aij->Mvctx;
628   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
629   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
630   PetscInt               m=A->rmap->n,n=B->cmap->n;
631 
632   PetscFunctionBegin;
633   ierr = MatCreate(((PetscObject)B)->comm,C);CHKERRQ(ierr);
634   ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr);
635   ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr);
636   ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
637   ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
638   (*C)->ops->matmult = MatMatMult_MPIAIJ_MPIDense;
639 
640   ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr);
641   /* Create work matrix used to store off processor rows of B needed for local product */
642   ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);CHKERRQ(ierr);
643   /* Create work arrays needed */
644   ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues,
645                       B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues,
646                       from->n,MPI_Request,&contents->rwaits,
647                       to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr);
648 
649   ierr = PetscContainerCreate(((PetscObject)A)->comm,&container);CHKERRQ(ierr);
650   ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr);
651   ierr = PetscContainerSetUserDestroy(container,MPIAIJ_MPIDenseDestroy);CHKERRQ(ierr);
652   ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr);
653   ierr = PetscContainerDestroy(&container);CHKERRQ(ierr);
654   PetscFunctionReturn(0);
655 }
656 
657 #undef __FUNCT__
658 #define __FUNCT__ "MatMPIDenseScatter"
659 /*
660     Performs an efficient scatter on the rows of B needed by this process; this is
661     a modification of the VecScatterBegin_() routines.
662 */
663 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
664 {
665   Mat_MPIAIJ             *aij = (Mat_MPIAIJ*)A->data;
666   PetscErrorCode         ierr;
667   PetscScalar            *b,*w,*svalues,*rvalues;
668   VecScatter             ctx = aij->Mvctx;
669   VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
670   VecScatter_MPI_General *to   = ( VecScatter_MPI_General*) ctx->todata;
671   PetscInt               i,j,k;
672   PetscInt               *sindices,*sstarts,*rindices,*rstarts;
673   PetscMPIInt            *sprocs,*rprocs,nrecvs;
674   MPI_Request            *swaits,*rwaits;
675   MPI_Comm               comm = ((PetscObject)A)->comm;
676   PetscMPIInt            tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n;
677   MPI_Status             status;
678   MPIAIJ_MPIDense        *contents;
679   PetscContainer         container;
680   Mat                    workB;
681 
682   PetscFunctionBegin;
683   ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr);
684   if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist");
685   ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr);
686 
687   workB = *outworkB = contents->workB;
688   if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n);
689   sindices  = to->indices;
690   sstarts   = to->starts;
691   sprocs    = to->procs;
692   swaits    = contents->swaits;
693   svalues   = contents->svalues;
694 
695   rindices  = from->indices;
696   rstarts   = from->starts;
697   rprocs    = from->procs;
698   rwaits    = contents->rwaits;
699   rvalues   = contents->rvalues;
700 
701   ierr = MatGetArray(B,&b);CHKERRQ(ierr);
702   ierr = MatGetArray(workB,&w);CHKERRQ(ierr);
703 
704   for (i=0; i<from->n; i++) {
705     ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
706   }
707 
708   for (i=0; i<to->n; i++) {
709     /* pack a message at a time */
710     CHKMEMQ;
711     for (j=0; j<sstarts[i+1]-sstarts[i]; j++){
712       for (k=0; k<ncols; k++) {
713         svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
714       }
715     }
716     CHKMEMQ;
717     ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
718   }
719 
720   nrecvs = from->n;
721   while (nrecvs) {
722     ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr);
723     nrecvs--;
724     /* unpack a message at a time */
725     CHKMEMQ;
726     for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){
727       for (k=0; k<ncols; k++) {
728         w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
729       }
730     }
731     CHKMEMQ;
732   }
733   if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);}
734 
735   ierr = MatRestoreArray(B,&b);CHKERRQ(ierr);
736   ierr = MatRestoreArray(workB,&w);CHKERRQ(ierr);
737   ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
738   ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
739   PetscFunctionReturn(0);
740 }
741 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat);
742 
743 #undef __FUNCT__
744 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense"
745 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
746 {
747   PetscErrorCode       ierr;
748   Mat_MPIAIJ           *aij = (Mat_MPIAIJ*)A->data;
749   Mat_MPIDense         *bdense = (Mat_MPIDense*)B->data;
750   Mat_MPIDense         *cdense = (Mat_MPIDense*)C->data;
751   Mat                  workB;
752 
753   PetscFunctionBegin;
754 
755   /* diagonal block of A times all local rows of B*/
756   ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr);
757 
758   /* get off processor parts of B needed to complete the product */
759   ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr);
760 
761   /* off-diagonal block of A times nonlocal rows of B */
762   ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr);
763   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
764   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
765   PetscFunctionReturn(0);
766 }
767 
768 #undef __FUNCT__
769 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable"
770 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable(Mat A,Mat P,Mat C)
771 {
772   PetscErrorCode     ierr;
773   Mat_MPIAIJ         *a=(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
774   Mat_SeqAIJ         *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
775   Mat_SeqAIJ         *cd=(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
776   PetscInt           *adi=ad->i,*adj,*aoi=ao->i,*aoj;
777   PetscScalar        *ada,*aoa,*cda=cd->a,*coa=co->a;
778   Mat_SeqAIJ         *p_loc,*p_oth;
779   PetscInt           *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
780   PetscScalar        *pa_loc,*pa_oth,*pa,valtmp,*ca;
781   PetscInt           cm=C->rmap->n,anz,pnz;
782   Mat_PtAPMPI        *ptap=c->ptap;
783   PetscScalar        *apa_sparse=ptap->apa;
784   PetscInt           *api,*apj,*apJ,i,j,k,row;
785   PetscInt           cstart=C->cmap->rstart;
786   PetscInt           cdnz,conz,k0,k1,nextp;
787 #if defined(DEBUG_MATMATMULT)
788   PetscMPIInt        rank=a->rank;
789 #endif
790 
791   PetscFunctionBegin;
792 #if defined(DEBUG_MATMATMULT)
793   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable()...\n",rank);
794 #endif
795 
796   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
797   /*-----------------------------------------------------*/
798   /* update numerical values of P_oth and P_loc */
799   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
800   ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
801 #if defined(DEBUG_MATMATMULT)
802   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] got P_oth and P_loc...\n",rank);
803 #endif
804 
805   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
806   /*----------------------------------------------------------*/
807   /* get data from symbolic products */
808   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
809   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
810   pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a;
811   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
812 
813   api = ptap->api;
814   apj = ptap->apj;
815   for (i=0; i<cm; i++) {
816     apJ = apj + api[i];
817 
818     /* diagonal portion of A */
819     anz = adi[i+1] - adi[i];
820     adj = ad->j + adi[i];
821     ada = ad->a + adi[i];
822     for (j=0; j<anz; j++) {
823       row = adj[j];
824       pnz = pi_loc[row+1] - pi_loc[row];
825       pj  = pj_loc + pi_loc[row];
826       pa  = pa_loc + pi_loc[row];
827       /* perform sparse axpy */
828       valtmp = ada[j];
829       nextp  = 0;
830       for (k=0; nextp<pnz; k++) {
831         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
832           apa_sparse[k] += valtmp*pa[nextp++];
833         }
834       }
835       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
836     }
837 
838     /* off-diagonal portion of A */
839     anz = aoi[i+1] - aoi[i];
840     aoj = ao->j + aoi[i];
841     aoa = ao->a + aoi[i];
842     for (j=0; j<anz; j++) {
843       row = aoj[j];
844       pnz = pi_oth[row+1] - pi_oth[row];
845       pj  = pj_oth + pi_oth[row];
846       pa  = pa_oth + pi_oth[row];
847       /* perform sparse axpy */
848       valtmp = aoa[j];
849       nextp  = 0;
850       for (k=0; nextp<pnz; k++) {
851         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
852           apa_sparse[k] += valtmp*pa[nextp++];
853         }
854       }
855       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
856     }
857 
858     /* set values in C */
859     cdnz = cd->i[i+1] - cd->i[i];
860     conz = co->i[i+1] - co->i[i];
861 
862     /* 1st off-diagoanl part of C */
863     ca = coa + co->i[i];
864     k  = 0;
865     for (k0=0; k0<conz; k0++){
866       if (apJ[k] >= cstart) break;
867       ca[k0]      = apa_sparse[k];
868       apa_sparse[k] = 0.0;
869       k++;
870     }
871 
872     /* diagonal part of C */
873     ca = cda + cd->i[i];
874     for (k1=0; k1<cdnz; k1++){
875       ca[k1]      = apa_sparse[k];
876       apa_sparse[k] = 0.0;
877       k++;
878     }
879 
880     /* 2nd off-diagoanl part of C */
881     ca = coa + co->i[i];
882     for (; k0<conz; k0++){
883       ca[k0]      = apa_sparse[k];
884       apa_sparse[k] = 0.0;
885       k++;
886     }
887   }
888   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
889   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
890   PetscFunctionReturn(0);
891 }
892 
893 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ(), except using LLCondensed to avoid O(BN) memory requirement */
894 #undef __FUNCT__
895 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable"
896 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(Mat A,Mat P,PetscReal fill,Mat *C)
897 {
898   PetscErrorCode       ierr;
899   MPI_Comm             comm=((PetscObject)A)->comm;
900   Mat                  Cmpi;
901   Mat_PtAPMPI          *ptap;
902   PetscFreeSpaceList   free_space=PETSC_NULL,current_space=PETSC_NULL;
903   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data,*c;
904   Mat_SeqAIJ           *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
905   PetscInt             *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
906   PetscInt             *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
907   PetscInt             i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0;
908   PetscInt             am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
909   PetscInt             nlnk_max,armax,prmax;
910   PetscReal            afill;
911   PetscScalar          *apa;
912 #if defined(DEBUG_MATMATMULT)
913   PetscMPIInt          rank=a->rank;
914 #endif
915 
916   PetscFunctionBegin;
917 #if defined(DEBUG_MATMATMULT)
918   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
919   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable()...\n",rank);
920 #endif
921 
922   /* create struct Mat_PtAPMPI and attached it to C later */
923   ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr);
924 
925   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
926   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
927 #if defined(DEBUG_MATMATMULT)
928   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] P_oth is done...\n",rank);
929 #endif
930   /* get P_loc by taking all local rows of P */
931   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
932 
933   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
934   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
935   pi_loc = p_loc->i; pj_loc = p_loc->j;
936   pi_oth = p_oth->i; pj_oth = p_oth->j;
937 
938 #if defined(DEBUG_MATMATMULT)
939   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] P_loc is done, Annz %D * P_locnnz %D = %D...\n",rank,ad->i[am]+ao->i[am],p_loc->rmax,(ad->i[am]+ao->i[am])*p_loc->rmax);
940 #endif
941 
942   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
943   /*-------------------------------------------------------------------*/
944   ierr  = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr);
945   ptap->api = api;
946   api[0]    = 0;
947 
948   /* create and initialize a linked list */
949   armax = ad->rmax+ao->rmax;
950   prmax = PetscMax(p_loc->rmax,p_oth->rmax);
951   nlnk_max = armax*prmax;
952   if (!nlnk_max || nlnk_max > pN) nlnk_max = pN;
953 #if defined(DEBUG_MATMATMULT)
954   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] pN %d; nlnk_max %d; Armax %d+%d=%d; Prmax Max(%d,%d)=%d\n",rank,pN,nlnk_max,ad->rmax,ao->rmax,armax,p_loc->rmax,p_oth->rmax,prmax);
955 #endif
956   ierr = PetscLLCondensedCreate_Scalable(nlnk_max,&lnk);CHKERRQ(ierr);
957 
958   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
959   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
960   current_space = free_space;
961 
962   ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr);
963   for (i=0; i<am; i++) {
964     apnz = 0;
965     /* diagonal portion of A */
966     nzi = adi[i+1] - adi[i];
967     for (j=0; j<nzi; j++){
968       row = *adj++;
969       pnz = pi_loc[row+1] - pi_loc[row];
970       Jptr  = pj_loc + pi_loc[row];
971       /* add non-zero cols of P into the sorted linked list lnk */
972       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
973     }
974     /* off-diagonal portion of A */
975     nzi = aoi[i+1] - aoi[i];
976     for (j=0; j<nzi; j++){
977       row = *aoj++;
978       pnz = pi_oth[row+1] - pi_oth[row];
979       Jptr  = pj_oth + pi_oth[row];
980       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
981     }
982 
983     apnz     = *lnk;
984     api[i+1] = api[i] + apnz;
985     if (apnz > apnz_max) apnz_max = apnz;
986 
987     /* if free space is not available, double the total space in the list */
988     if (current_space->local_remaining<apnz) {
989       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
990       nspacedouble++;
991     }
992 
993     /* Copy data into free space, then initialize lnk */
994     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
995     ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr);
996     current_space->array           += apnz;
997     current_space->local_used      += apnz;
998     current_space->local_remaining -= apnz;
999   }
1000 
1001   /* Allocate space for apj, initialize apj, and */
1002   /* destroy list of free space and other temporary array(s) */
1003   ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr);
1004   apj  = ptap->apj;
1005   ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr);
1006   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
1007 #if defined(DEBUG_MATMATMULT)
1008   if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] AP is done..., apnz_max %d\n",rank,apnz_max);
1009 #endif
1010 
1011   /* create and assemble symbolic parallel matrix Cmpi */
1012   /*----------------------------------------------------*/
1013   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
1014   ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
1015   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
1016   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
1017   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
1018   ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr);
1019 
1020   /* malloc apa for assembly Cmpi */
1021   ierr = PetscMalloc(apnz_max*sizeof(PetscScalar),&apa);CHKERRQ(ierr);
1022   ierr = PetscMemzero(apa,apnz_max*sizeof(PetscScalar));CHKERRQ(ierr);
1023   ptap->apa = apa;
1024   for (i=0; i<am; i++){
1025     row  = i + rstart;
1026     apnz = api[i+1] - api[i];
1027     ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr);
1028     apj += apnz;
1029   }
1030   ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1031   ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1032 
1033   ptap->destroy             = Cmpi->ops->destroy;
1034   ptap->duplicate           = Cmpi->ops->duplicate;
1035   Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable;
1036   Cmpi->ops->destroy        = MatDestroy_MPIAIJ_MatMatMult;
1037   Cmpi->ops->duplicate      = MatDuplicate_MPIAIJ_MatMatMult;
1038 
1039   /* attach the supporting struct to Cmpi for reuse */
1040   c = (Mat_MPIAIJ*)Cmpi->data;
1041   c->ptap  = ptap;
1042 
1043   *C = Cmpi;
1044 
1045   /* set MatInfo */
1046   afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]) + 1.e-5;
1047   if (afill < 1.0) afill = 1.0;
1048   Cmpi->info.mallocs           = nspacedouble;
1049   Cmpi->info.fill_ratio_given  = fill;
1050   Cmpi->info.fill_ratio_needed = afill;
1051 
1052 #if defined(PETSC_USE_INFO)
1053   if (api[am]) {
1054     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
1055     ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr);
1056   } else {
1057     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
1058   }
1059 #endif
1060   PetscFunctionReturn(0);
1061 }
1062 
1063 /*-------------------------------------------------------------------------*/
1064 #undef __FUNCT__
1065 #define __FUNCT__ "MatTransposeMatMult_MPIAIJ_MPIAIJ"
1066 PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
1067 {
1068   PetscErrorCode ierr;
1069 
1070   PetscFunctionBegin;
1071   if (scall == MAT_INITIAL_MATRIX){
1072     ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);
1073   }
1074   ierr = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);CHKERRQ(ierr);
1075   PetscFunctionReturn(0);
1076 }
1077 
1078 #undef __FUNCT__
1079 #define __FUNCT__ "MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ"
1080 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C)
1081 {
1082 
1083   PetscFunctionBegin;
1084   SETERRQ(((PetscObject)A)->comm,PETSC_ERR_PLIB,"Not done yet");
1085   PetscFunctionReturn(0);
1086 }
1087 
1088 
1089 #undef __FUNCT__
1090 #define __FUNCT__ "MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ"
1091 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat *C)
1092 {
1093   PetscErrorCode       ierr;
1094   Mat                  Cmpi;
1095   Mat_PtAPMPI          *ptap;
1096   PetscFreeSpaceList   free_space=PETSC_NULL,current_space=PETSC_NULL;
1097   Mat_MPIAIJ           *p=(Mat_MPIAIJ*)P->data,*c;
1098   PetscInt             *pdti,*pdtj,*poti,*potj,*ptJ;
1099   PetscInt             nnz;
1100   PetscInt             nlnk,*lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1101   PetscInt             am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
1102   PetscBT              lnkbt;
1103   MPI_Comm             comm=((PetscObject)A)->comm;
1104   PetscMPIInt          size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1105   PetscInt             **buf_rj,**buf_ri,**buf_ri_k;
1106   PetscInt             len,proc,*dnz,*onz,*owners;
1107   PetscInt             nzi,*bi,*bj;
1108   PetscInt             nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1109   MPI_Request          *swaits,*rwaits;
1110   MPI_Status           *sstatus,rstatus;
1111   Mat_Merge_SeqsToMPI  *merge;
1112   PetscInt             *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
1113   PetscReal            afill=1.0,afill_tmp;
1114   PetscInt             rstart = P->cmap->rstart,rmax;
1115   PetscScalar          *vals;
1116 
1117   PetscFunctionBegin;
1118   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1119   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1120   if (!rank) printf("MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ P: %d %d; A %d %d\n",P->rmap->N,pN,A->rmap->N,A->cmap->N);
1121 
1122   /* create struct Mat_PtAPMPI and attached it to C later */
1123   ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr);
1124   ptap->reuse    = MAT_INITIAL_MATRIX;
1125 
1126   /* get A_loc by taking all local rows of A */
1127   Mat      A_loc;
1128   ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr);
1129   Mat_SeqAIJ  *a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1130   api   = a_loc->i;
1131   apj   = a_loc->j;
1132 
1133   /* determine symbolic Co=(p->B)^T*A - send to others */
1134   /*----------------------------------------------------*/
1135   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
1136 
1137   /* then, compute symbolic Co = (p->B)^T*A */
1138   pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1139                          >= (num of nonzero rows of C_seq) - pn */
1140   ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr);
1141   coi[0] = 0;
1142 
1143   /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1144   nnz           = fill*(poti[pon] + api[am]);
1145   ierr          = PetscFreeSpaceGet(nnz,&free_space);
1146   current_space = free_space;
1147   ierr = PetscSynchronizedPrintf(comm, "[%d] nnz = fill %g *(%d + %d)\n",rank,fill,poti[pon],api[am]);CHKERRQ(ierr);
1148   ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
1149 
1150   /* create and initialize a linked list */
1151   PetscInt aN=A->cmap->N;
1152   nlnk = aN+1;
1153   ierr = PetscLLCreate(aN,aN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
1154 
1155   for (i=0; i<pon; i++) {
1156     nnz = 0;
1157     pnz = poti[i+1] - poti[i];
1158     ptJ = potj + poti[i];
1159     for (j=0; j<pnz; j++){
1160       row  = ptJ[j]; /* row of A_loc == col of Pot */
1161       apnz = api[row+1] - api[row];
1162       Jptr = apj + api[row];
1163       /* add non-zero cols of AP into the sorted linked list lnk */
1164       ierr = PetscLLAddSorted(apnz,Jptr,aN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
1165       nnz += nlnk;
1166     }
1167 
1168     /* If free space is not available, double the total space in the list */
1169     if (current_space->local_remaining<nnz) {
1170       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
1171       nspacedouble++;
1172     }
1173 
1174     /* Copy data into free space, and zero out denserows */
1175     ierr = PetscLLClean(aN,aN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
1176     current_space->array           += nnz;
1177     current_space->local_used      += nnz;
1178     current_space->local_remaining -= nnz;
1179     coi[i+1] = coi[i] + nnz;
1180   }
1181 
1182   ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr);
1183   ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
1184   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]);
1185   if (afill_tmp > afill) afill = afill_tmp;
1186 
1187   ierr = PetscSynchronizedPrintf(comm, "[%d] local Co is done, Co_nnz %d\n",rank,coi[pon]);CHKERRQ(ierr);
1188   ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
1189 
1190   /* send j-array (coj) of Co to other processors */
1191   /*----------------------------------------------*/
1192   /* determine row ownership */
1193   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
1194   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
1195   merge->rowmap->n = pn;
1196   merge->rowmap->bs = 1;
1197   ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
1198   owners = merge->rowmap->range;
1199 
1200   /* determine the number of messages to send, their lengths */
1201   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
1202   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
1203   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
1204   len_s = merge->len_s;
1205   merge->nsend = 0;
1206 
1207   ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr);
1208   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
1209 
1210   proc = 0;
1211   for (i=0; i<pon; i++){
1212     while (prmap[i] >= owners[proc+1]) proc++;
1213     len_si[proc]++;  /* num of rows in Co to be sent to [proc] */
1214     len_s[proc] += coi[i+1] - coi[i];
1215   }
1216 
1217   len   = 0;  /* max length of buf_si[] */
1218   owners_co[0] = 0;
1219   for (proc=0; proc<size; proc++){
1220     owners_co[proc+1] = owners_co[proc] + len_si[proc];
1221     if (len_si[proc]){
1222       merge->nsend++;
1223       len_si[proc] = 2*(len_si[proc] + 1);
1224       len += len_si[proc];
1225     }
1226   }
1227 
1228   /* determine the number and length of messages to receive for coi and coj  */
1229   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
1230   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
1231 
1232   /* post the Irecv and Isend of coj */
1233   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
1234   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
1235   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr);
1236   for (proc=0, k=0; proc<size; proc++){
1237     if (!len_s[proc]) continue;
1238     i = owners_co[proc];
1239     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
1240     k++;
1241   }
1242 
1243   /* receives and sends of coj are complete */
1244   ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr);
1245   for (i=0; i<merge->nrecv; i++){
1246     ierr = MPI_Waitany(merge->nrecv,rwaits,&j,&rstatus);CHKERRQ(ierr);
1247   }
1248   ierr = PetscFree(rwaits);CHKERRQ(ierr);
1249   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
1250 
1251   /* send and recv coi */
1252   /*-------------------*/
1253   ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
1254   ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
1255   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
1256   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
1257   for (proc=0,k=0; proc<size; proc++){
1258     if (!len_s[proc]) continue;
1259     /* form outgoing message for i-structure:
1260          buf_si[0]:                 nrows to be sent
1261                [1:nrows]:           row index (global)
1262                [nrows+1:2*nrows+1]: i-structure index
1263     */
1264     /*-------------------------------------------*/
1265     nrows = len_si[proc]/2 - 1;
1266     buf_si_i    = buf_si + nrows+1;
1267     buf_si[0]   = nrows;
1268     buf_si_i[0] = 0;
1269     nrows = 0;
1270     for (i=owners_co[proc]; i<owners_co[proc+1]; i++){
1271       nzi = coi[i+1] - coi[i];
1272       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
1273       buf_si[nrows+1] =prmap[i] -owners[proc]; /* local row index */
1274       nrows++;
1275     }
1276     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
1277     k++;
1278     buf_si += len_si[proc];
1279   }
1280   i = merge->nrecv;
1281   while (i--) {
1282     ierr = MPI_Waitany(merge->nrecv,rwaits,&j,&rstatus);CHKERRQ(ierr);
1283   }
1284   ierr = PetscFree(rwaits);CHKERRQ(ierr);
1285   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
1286   /*
1287   ierr = PetscInfo2(A,"nsend: %d, nrecv: %d\n",merge->nsend,merge->nrecv);CHKERRQ(ierr);
1288   for (i=0; i<merge->nrecv; i++){
1289     ierr = PetscInfo3(A,"recv len_ri=%d, len_rj=%d from [%d]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr);
1290   }
1291   */
1292   ierr = PetscFree(len_si);CHKERRQ(ierr);
1293   ierr = PetscFree(len_ri);CHKERRQ(ierr);
1294   ierr = PetscFree(swaits);CHKERRQ(ierr);
1295   ierr = PetscFree(sstatus);CHKERRQ(ierr);
1296   ierr = PetscFree(buf_s);CHKERRQ(ierr);
1297 
1298   ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] Co is done\n",rank);
1299   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
1300 
1301   /* compute the local portion of C (mpi mat) */
1302   /*------------------------------------------*/
1303   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
1304 
1305   /* allocate bi array and free space for accumulating nonzero column info */
1306   ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
1307   bi[0] = 0;
1308 
1309   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1310   nnz           = fill*(pdti[pn] + poti[pon] + api[am]);
1311   ierr          = PetscFreeSpaceGet(nnz,&free_space);
1312   current_space = free_space;
1313 
1314   ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] nnz=%d + %d + %d\n",rank,pdti[pn],poti[pon],api[am]);
1315   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
1316 
1317   ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr);
1318   for (k=0; k<merge->nrecv; k++){
1319     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1320     nrows       = *buf_ri_k[k];
1321     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
1322     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
1323   }
1324 
1325   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
1326   rmax = 0;
1327   for (i=0; i<pn; i++) {
1328     /* add pdt[i,:]*AP into lnk */
1329     nnz = 0;
1330     pnz = pdti[i+1] - pdti[i];
1331     ptJ = pdtj + pdti[i];
1332     for (j=0; j<pnz; j++){
1333       row  = ptJ[j];  /* row of AP == col of Pt */
1334       apnz = api[row+1] - api[row];
1335       Jptr = apj + api[row];
1336       /* add non-zero cols of AP into the sorted linked list lnk */
1337       ierr = PetscLLAddSorted(apnz,Jptr,aN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
1338       nnz += nlnk;
1339     }
1340     /* add received col data into lnk */
1341     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
1342       if (i == *nextrow[k]) { /* i-th row */
1343         nzi = *(nextci[k]+1) - *nextci[k];
1344         Jptr  = buf_rj[k] + *nextci[k];
1345         ierr = PetscLLAddSorted(nzi,Jptr,aN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
1346         nnz += nlnk;
1347         nextrow[k]++; nextci[k]++;
1348       }
1349     }
1350 
1351     /* if free space is not available, make more free space */
1352     if (current_space->local_remaining<nnz) {
1353       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
1354       nspacedouble++;
1355     }
1356     /* copy data into free space, then initialize lnk */
1357     ierr = PetscLLClean(aN,aN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
1358     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
1359     current_space->array           += nnz;
1360     current_space->local_used      += nnz;
1361     current_space->local_remaining -= nnz;
1362     bi[i+1] = bi[i] + nnz;
1363     if (nnz > rmax) rmax = nnz;
1364   }
1365   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
1366   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1367 
1368   ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] Cd is done, bi[pn] %d\n",rank,bi[pn]);
1369   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
1370 
1371   ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
1372   ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
1373   /* afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + api[am]); */
1374   afill_tmp = 1.0;
1375   if (afill_tmp > afill) afill = afill_tmp;
1376   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
1377   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
1378 
1379   /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part - check runex55_SA ?  */
1380   /*------------------------------------------------------------------------------------------------------*/
1381   /* ierr = PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr); */
1382   ierr = PetscMalloc((A->cmap->N+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1383   ierr = PetscMemzero(vals,A->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr);
1384 
1385   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
1386   ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
1387   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
1388   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
1389   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
1390   ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr);
1391   ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] Cmpi %d %d\n",rank,pn,A->cmap->n);
1392   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
1393 
1394   for (i=0; i<pn; i++){
1395     row = i + rstart;
1396     nnz = bi[i+1] - bi[i];
1397     Jptr = bj + bi[i];
1398     ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr);
1399   }
1400   ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1401   ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1402   ierr = PetscFree(vals);CHKERRQ(ierr);
1403 
1404   if (!rank) {
1405     printf("Cmpi\n");
1406     ierr = MatView(Cmpi,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
1407   }
1408 
1409   merge->bi            = bi;
1410   merge->bj            = bj;
1411   merge->coi           = coi;
1412   merge->coj           = coj;
1413   merge->buf_ri        = buf_ri;
1414   merge->buf_rj        = buf_rj;
1415   merge->owners_co     = owners_co;
1416   merge->destroy       = Cmpi->ops->destroy;
1417   merge->duplicate     = Cmpi->ops->duplicate;
1418 
1419   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
1420   /* Cmpi->assembled      = PETSC_FALSE; */
1421   /* Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;  */
1422 
1423   /* attach the supporting struct to Cmpi for reuse */
1424   c = (Mat_MPIAIJ*)Cmpi->data;
1425   c->ptap        = ptap;
1426   ptap->api      = api;
1427   ptap->apj      = apj;
1428   ptap->merge    = merge;
1429   ptap->apnz_max = ap_rmax;
1430 
1431   *C = Cmpi;
1432 #if defined(PETSC_USE_INFO)
1433   if (bi[pn] != 0) {
1434     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
1435     ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr);
1436   } else {
1437     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
1438   }
1439 #endif
1440 
1441   PetscFunctionReturn(0);
1442 }
1443