xref: /petsc/src/mat/impls/aij/mpi/mpimatmatmult.c (revision bf0c7fc2d7fb4e90d42e412b25194b878e6e442d)
1 /*
2   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
3           C = A * B
4 */
5 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
6 #include <../src/mat/utils/freespace.h>
7 #include <../src/mat/impls/aij/mpi/mpiaij.h>
8 #include <petscbt.h>
9 #include <../src/mat/impls/dense/mpi/mpidense.h>
10 #include <petsc/private/vecimpl.h>
11 #include <petsc/private/sfimpl.h>
12 
13 #if defined(PETSC_HAVE_HYPRE)
14 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat, Mat, PetscReal, Mat);
15 #endif
16 
17 PETSC_INTERN PetscErrorCode MatProductSymbolic_ABt_MPIAIJ_MPIAIJ(Mat C)
18 {
19   Mat_Product *product = C->product;
20   Mat          B       = product->B;
21 
22   PetscFunctionBegin;
23   PetscCall(MatTranspose(B, MAT_INITIAL_MATRIX, &product->B));
24   PetscCall(MatDestroy(&B));
25   PetscCall(MatProductSymbolic_AB_MPIAIJ_MPIAIJ(C));
26   PetscFunctionReturn(PETSC_SUCCESS);
27 }
28 
29 PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
30 {
31   Mat_Product        *product = C->product;
32   Mat                 A = product->A, B = product->B;
33   MatProductAlgorithm alg  = product->alg;
34   PetscReal           fill = product->fill;
35   PetscBool           flg;
36 
37   PetscFunctionBegin;
38   /* scalable */
39   PetscCall(PetscStrcmp(alg, "scalable", &flg));
40   if (flg) {
41     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
42     PetscFunctionReturn(PETSC_SUCCESS);
43   }
44 
45   /* nonscalable */
46   PetscCall(PetscStrcmp(alg, "nonscalable", &flg));
47   if (flg) {
48     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
49     PetscFunctionReturn(PETSC_SUCCESS);
50   }
51 
52   /* seqmpi */
53   PetscCall(PetscStrcmp(alg, "seqmpi", &flg));
54   if (flg) {
55     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A, B, fill, C));
56     PetscFunctionReturn(PETSC_SUCCESS);
57   }
58 
59   /* backend general code */
60   PetscCall(PetscStrcmp(alg, "backend", &flg));
61   if (flg) {
62     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
63     PetscFunctionReturn(PETSC_SUCCESS);
64   }
65 
66 #if defined(PETSC_HAVE_HYPRE)
67   PetscCall(PetscStrcmp(alg, "hypre", &flg));
68   if (flg) {
69     PetscCall(MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A, B, fill, C));
70     PetscFunctionReturn(PETSC_SUCCESS);
71   }
72 #endif
73   SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_SUP, "Mat Product Algorithm is not supported");
74 }
75 
76 PetscErrorCode MatProductCtxDestroy_MPIAIJ_MatMatMult(PetscCtxRt data)
77 {
78   MatProductCtx_APMPI *ptap = *(MatProductCtx_APMPI **)data;
79 
80   PetscFunctionBegin;
81   PetscCall(PetscFree2(ptap->startsj_s, ptap->startsj_r));
82   PetscCall(PetscFree(ptap->bufa));
83   PetscCall(MatDestroy(&ptap->P_loc));
84   PetscCall(MatDestroy(&ptap->P_oth));
85   PetscCall(MatDestroy(&ptap->Pt));
86   PetscCall(PetscFree(ptap->api));
87   PetscCall(PetscFree(ptap->apj));
88   PetscCall(PetscFree(ptap->apa));
89   PetscCall(PetscFree(ptap));
90   PetscFunctionReturn(PETSC_SUCCESS);
91 }
92 
93 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, Mat C)
94 {
95   Mat_MPIAIJ          *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
96   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
97   Mat_SeqAIJ          *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
98   PetscScalar         *cda, *coa;
99   Mat_SeqAIJ          *p_loc, *p_oth;
100   PetscScalar         *apa, *ca;
101   PetscInt             cm = C->rmap->n;
102   MatProductCtx_APMPI *ptap;
103   PetscInt            *api, *apj, *apJ, i, k;
104   PetscInt             cstart = C->cmap->rstart;
105   PetscInt             cdnz, conz, k0, k1;
106   const PetscScalar   *dummy1, *dummy2, *dummy3, *dummy4;
107   MPI_Comm             comm;
108   PetscMPIInt          size;
109 
110   PetscFunctionBegin;
111   MatCheckProduct(C, 3);
112   ptap = (MatProductCtx_APMPI *)C->product->data;
113   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
114   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
115   PetscCallMPI(MPI_Comm_size(comm, &size));
116   PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");
117 
118   /* flag CPU mask for C */
119 #if defined(PETSC_HAVE_DEVICE)
120   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
121   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
122   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
123 #endif
124 
125   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
126   /* update numerical values of P_oth and P_loc */
127   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
128   PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));
129 
130   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
131   /* get data from symbolic products */
132   p_loc = (Mat_SeqAIJ *)ptap->P_loc->data;
133   p_oth = NULL;
134   if (size > 1) p_oth = (Mat_SeqAIJ *)ptap->P_oth->data;
135 
136   /* get apa for storing dense row A[i,:]*P */
137   apa = ptap->apa;
138 
139   api = ptap->api;
140   apj = ptap->apj;
141   /* trigger copy to CPU */
142   PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy1));
143   PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy2));
144   PetscCall(MatSeqAIJGetArrayRead(ptap->P_loc, &dummy3));
145   if (ptap->P_oth) PetscCall(MatSeqAIJGetArrayRead(ptap->P_oth, &dummy4));
146   PetscCall(MatSeqAIJGetArrayWrite(c->A, &cda));
147   PetscCall(MatSeqAIJGetArrayWrite(c->B, &coa));
148   for (i = 0; i < cm; i++) {
149     /* compute apa = A[i,:]*P */
150     AProw_nonscalable(i, ad, ao, p_loc, p_oth, apa);
151 
152     /* set values in C */
153     apJ  = PetscSafePointerPlusOffset(apj, api[i]);
154     cdnz = cd->i[i + 1] - cd->i[i];
155     conz = co->i[i + 1] - co->i[i];
156 
157     /* 1st off-diagonal part of C */
158     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
159     k  = 0;
160     for (k0 = 0; k0 < conz; k0++) {
161       if (apJ[k] >= cstart) break;
162       ca[k0]        = apa[apJ[k]];
163       apa[apJ[k++]] = 0.0;
164     }
165 
166     /* diagonal part of C */
167     ca = PetscSafePointerPlusOffset(cda, cd->i[i]);
168     for (k1 = 0; k1 < cdnz; k1++) {
169       ca[k1]        = apa[apJ[k]];
170       apa[apJ[k++]] = 0.0;
171     }
172 
173     /* 2nd off-diagonal part of C */
174     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
175     for (; k0 < conz; k0++) {
176       ca[k0]        = apa[apJ[k]];
177       apa[apJ[k++]] = 0.0;
178     }
179   }
180   PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy1));
181   PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy2));
182   PetscCall(MatSeqAIJRestoreArrayRead(ptap->P_loc, &dummy3));
183   if (ptap->P_oth) PetscCall(MatSeqAIJRestoreArrayRead(ptap->P_oth, &dummy4));
184   PetscCall(MatSeqAIJRestoreArrayWrite(c->A, &cda));
185   PetscCall(MatSeqAIJRestoreArrayWrite(c->B, &coa));
186 
187   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
188   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
189   PetscFunctionReturn(PETSC_SUCCESS);
190 }
191 
192 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, PetscReal fill, Mat C)
193 {
194   MPI_Comm             comm;
195   PetscMPIInt          size;
196   MatProductCtx_APMPI *ptap;
197   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
198   Mat_MPIAIJ          *a  = (Mat_MPIAIJ *)A->data;
199   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
200   PetscInt            *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
201   PetscInt            *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
202   PetscInt            *lnk, i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi;
203   PetscInt             am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n;
204   PetscBT              lnkbt;
205   PetscReal            afill;
206   MatType              mtype;
207 
208   PetscFunctionBegin;
209   MatCheckProduct(C, 4);
210   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
211   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
212   PetscCallMPI(MPI_Comm_size(comm, &size));
213 
214   /* create struct MatProductCtx_APMPI and attached it to C later */
215   PetscCall(PetscNew(&ptap));
216 
217   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
218   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
219 
220   /* get P_loc by taking all local rows of P */
221   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
222 
223   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
224   pi_loc = p_loc->i;
225   pj_loc = p_loc->j;
226   if (size > 1) {
227     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
228     pi_oth = p_oth->i;
229     pj_oth = p_oth->j;
230   } else {
231     p_oth  = NULL;
232     pi_oth = NULL;
233     pj_oth = NULL;
234   }
235 
236   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
237   PetscCall(PetscMalloc1(am + 1, &api));
238   ptap->api = api;
239   api[0]    = 0;
240 
241   /* create and initialize a linked list */
242   PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));
243 
244   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
245   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
246   current_space = free_space;
247 
248   MatPreallocateBegin(comm, am, pn, dnz, onz);
249   for (i = 0; i < am; i++) {
250     /* diagonal portion of A */
251     nzi = adi[i + 1] - adi[i];
252     for (j = 0; j < nzi; j++) {
253       row  = *adj++;
254       pnz  = pi_loc[row + 1] - pi_loc[row];
255       Jptr = pj_loc + pi_loc[row];
256       /* add non-zero cols of P into the sorted linked list lnk */
257       PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
258     }
259     /* off-diagonal portion of A */
260     nzi = aoi[i + 1] - aoi[i];
261     for (j = 0; j < nzi; j++) {
262       row  = *aoj++;
263       pnz  = pi_oth[row + 1] - pi_oth[row];
264       Jptr = pj_oth + pi_oth[row];
265       PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
266     }
267     /* add possible missing diagonal entry */
268     if (C->force_diagonals) {
269       j = i + rstart; /* column index */
270       PetscCall(PetscLLCondensedAddSorted(1, &j, lnk, lnkbt));
271     }
272 
273     apnz       = lnk[0];
274     api[i + 1] = api[i] + apnz;
275 
276     /* if free space is not available, double the total space in the list */
277     if (current_space->local_remaining < apnz) {
278       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space));
279       nspacedouble++;
280     }
281 
282     /* Copy data into free space, then initialize lnk */
283     PetscCall(PetscLLCondensedClean(pN, apnz, current_space->array, lnk, lnkbt));
284     PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));
285 
286     current_space->array += apnz;
287     current_space->local_used += apnz;
288     current_space->local_remaining -= apnz;
289   }
290 
291   /* Allocate space for apj, initialize apj, and */
292   /* destroy list of free space and other temporary array(s) */
293   PetscCall(PetscMalloc1(api[am], &ptap->apj));
294   apj = ptap->apj;
295   PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
296   PetscCall(PetscLLDestroy(lnk, lnkbt));
297 
298   /* malloc apa to store dense row A[i,:]*P */
299   PetscCall(PetscCalloc1(pN, &ptap->apa));
300 
301   /* set and assemble symbolic parallel matrix C */
302   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
303   PetscCall(MatSetBlockSizesFromMats(C, A, P));
304 
305   PetscCall(MatGetType(A, &mtype));
306   PetscCall(MatSetType(C, mtype));
307   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
308   MatPreallocateEnd(dnz, onz);
309 
310   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
311   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
312   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
313   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
314   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
315 
316   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
317   C->ops->productnumeric = MatProductNumeric_AB;
318 
319   /* attach the supporting struct to C for reuse */
320   C->product->data    = ptap;
321   C->product->destroy = MatProductCtxDestroy_MPIAIJ_MatMatMult;
322 
323   /* set MatInfo */
324   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
325   if (afill < 1.0) afill = 1.0;
326   C->info.mallocs           = nspacedouble;
327   C->info.fill_ratio_given  = fill;
328   C->info.fill_ratio_needed = afill;
329 
330 #if defined(PETSC_USE_INFO)
331   if (api[am]) {
332     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
333     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
334   } else {
335     PetscCall(PetscInfo(C, "Empty matrix product\n"));
336   }
337 #endif
338   PetscFunctionReturn(PETSC_SUCCESS);
339 }
340 
341 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat, Mat, PetscReal, Mat);
342 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat, Mat, Mat);
343 
344 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
345 {
346   Mat_Product *product = C->product;
347   Mat          A = product->A, B = product->B;
348 
349   PetscFunctionBegin;
350   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
351     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
352 
353   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
354   C->ops->productsymbolic = MatProductSymbolic_AB;
355   PetscFunctionReturn(PETSC_SUCCESS);
356 }
357 
358 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
359 {
360   Mat_Product *product = C->product;
361   Mat          A = product->A, B = product->B;
362 
363   PetscFunctionBegin;
364   if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
365     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);
366 
367   C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
368   C->ops->productsymbolic          = MatProductSymbolic_AtB;
369   PetscFunctionReturn(PETSC_SUCCESS);
370 }
371 
372 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
373 {
374   Mat_Product *product = C->product;
375 
376   PetscFunctionBegin;
377   switch (product->type) {
378   case MATPRODUCT_AB:
379     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C));
380     break;
381   case MATPRODUCT_AtB:
382     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C));
383     break;
384   default:
385     break;
386   }
387   PetscFunctionReturn(PETSC_SUCCESS);
388 }
389 
390 typedef struct {
391   Mat           workB, workB1;
392   MPI_Request  *rwaits, *swaits;
393   PetscInt      nsends, nrecvs;
394   MPI_Datatype *stype, *rtype;
395   PetscInt      blda;
396 } MPIAIJ_MPIDense;
397 
398 static PetscErrorCode MatMPIAIJ_MPIDenseDestroy(PetscCtxRt ctx)
399 {
400   MPIAIJ_MPIDense *contents = *(MPIAIJ_MPIDense **)ctx;
401   PetscInt         i;
402 
403   PetscFunctionBegin;
404   PetscCall(MatDestroy(&contents->workB));
405   PetscCall(MatDestroy(&contents->workB1));
406   for (i = 0; i < contents->nsends; i++) PetscCallMPI(MPI_Type_free(&contents->stype[i]));
407   for (i = 0; i < contents->nrecvs; i++) PetscCallMPI(MPI_Type_free(&contents->rtype[i]));
408   PetscCall(PetscFree4(contents->stype, contents->rtype, contents->rwaits, contents->swaits));
409   PetscCall(PetscFree(contents));
410   PetscFunctionReturn(PETSC_SUCCESS);
411 }
412 
413 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A, Mat B, PetscReal fill, Mat C)
414 {
415   Mat_MPIAIJ      *aij = (Mat_MPIAIJ *)A->data;
416   PetscInt         nz  = aij->B->cmap->n, blda, m, M, n, N;
417   MPIAIJ_MPIDense *contents;
418   VecScatter       ctx = aij->Mvctx;
419   PetscInt         Am = A->rmap->n, Bm = B->rmap->n, BN = B->cmap->N, Bbn, Bbn1, bs, numBb;
420   MPI_Comm         comm;
421   MPI_Datatype     type1, *stype, *rtype;
422   const PetscInt  *sindices, *sstarts, *rstarts;
423   PetscMPIInt     *disp, nsends, nrecvs, nrows_to, nrows_from;
424   PetscBool        cisdense;
425 
426   PetscFunctionBegin;
427   MatCheckProduct(C, 4);
428   PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data not empty");
429   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
430   PetscCall(PetscObjectBaseTypeCompare((PetscObject)C, MATMPIDENSE, &cisdense));
431   if (!cisdense) PetscCall(MatSetType(C, ((PetscObject)B)->type_name));
432   PetscCall(MatGetLocalSize(C, &m, &n));
433   PetscCall(MatGetSize(C, &M, &N));
434   if (m == PETSC_DECIDE || n == PETSC_DECIDE || M == PETSC_DECIDE || N == PETSC_DECIDE) PetscCall(MatSetSizes(C, Am, B->cmap->n, A->rmap->N, BN));
435   PetscCall(MatSetBlockSizesFromMats(C, A, B));
436   PetscCall(MatSetUp(C));
437   PetscCall(MatDenseGetLDA(B, &blda));
438   PetscCall(PetscNew(&contents));
439 
440   PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
441   PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));
442 
443   /* Create column block of B and C for memory scalability when BN is too large */
444   /* Estimate Bbn, column size of Bb */
445   if (nz) {
446     Bbn1 = 2 * Am * BN / nz;
447     if (!Bbn1) Bbn1 = 1;
448   } else Bbn1 = BN;
449 
450   bs   = B->cmap->bs;
451   Bbn1 = Bbn1 / bs * bs; /* Bbn1 is a multiple of bs */
452   if (Bbn1 > BN) Bbn1 = BN;
453   PetscCallMPI(MPIU_Allreduce(&Bbn1, &Bbn, 1, MPIU_INT, MPI_MAX, comm));
454 
455   /* Enable runtime option for Bbn */
456   PetscOptionsBegin(comm, ((PetscObject)C)->prefix, "MatProduct", "Mat");
457   PetscCall(PetscOptionsDeprecated("-matmatmult_Bbn", "-matproduct_batch_size", "3.25", NULL));
458   PetscCall(PetscOptionsInt("-matproduct_batch_size", "Number of columns in Bb", "MatProduct", Bbn, &Bbn, NULL));
459   PetscOptionsEnd();
460   Bbn = PetscMin(Bbn, BN);
461 
462   if (Bbn > 0 && Bbn < BN) {
463     numBb = BN / Bbn;
464     Bbn1  = BN - numBb * Bbn;
465   } else numBb = 0;
466 
467   if (numBb) {
468     PetscCall(PetscInfo(C, "use Bb, BN=%" PetscInt_FMT ", Bbn=%" PetscInt_FMT "; numBb=%" PetscInt_FMT "\n", BN, Bbn, numBb));
469     if (Bbn1) { /* Create workB1 for the remaining columns */
470       PetscCall(PetscInfo(C, "use Bb1, BN=%" PetscInt_FMT ", Bbn1=%" PetscInt_FMT "\n", BN, Bbn1));
471       /* Create work matrix used to store off processor rows of B needed for local product */
472       PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn1, NULL, &contents->workB1));
473     } else contents->workB1 = NULL;
474   }
475 
476   /* Create work matrix used to store off processor rows of B needed for local product */
477   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn, NULL, &contents->workB));
478 
479   /* Use MPI derived data type to reduce memory required by the send/recv buffers */
480   PetscCall(PetscMalloc4(nsends, &stype, nrecvs, &rtype, nrecvs, &contents->rwaits, nsends, &contents->swaits));
481   contents->stype  = stype;
482   contents->nsends = nsends;
483 
484   contents->rtype  = rtype;
485   contents->nrecvs = nrecvs;
486   contents->blda   = blda;
487 
488   PetscCall(PetscMalloc1(Bm + 1, &disp));
489   for (PetscMPIInt i = 0; i < nsends; i++) {
490     PetscCall(PetscMPIIntCast(sstarts[i + 1] - sstarts[i], &nrows_to));
491     for (PetscInt j = 0; j < nrows_to; j++) PetscCall(PetscMPIIntCast(sindices[sstarts[i] + j], &disp[j])); /* rowB to be sent */
492     PetscCallMPI(MPI_Type_create_indexed_block(nrows_to, 1, disp, MPIU_SCALAR, &type1));
493     PetscCallMPI(MPI_Type_create_resized(type1, 0, blda * sizeof(PetscScalar), &stype[i]));
494     PetscCallMPI(MPI_Type_commit(&stype[i]));
495     PetscCallMPI(MPI_Type_free(&type1));
496   }
497 
498   for (PetscMPIInt i = 0; i < nrecvs; i++) {
499     /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
500     PetscCall(PetscMPIIntCast(rstarts[i + 1] - rstarts[i], &nrows_from));
501     disp[0] = 0;
502     PetscCallMPI(MPI_Type_create_indexed_block(1, nrows_from, disp, MPIU_SCALAR, &type1));
503     PetscCallMPI(MPI_Type_create_resized(type1, 0, nz * sizeof(PetscScalar), &rtype[i]));
504     PetscCallMPI(MPI_Type_commit(&rtype[i]));
505     PetscCallMPI(MPI_Type_free(&type1));
506   }
507 
508   PetscCall(PetscFree(disp));
509   PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
510   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));
511   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
512   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
513   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
514 
515   C->product->data       = contents;
516   C->product->destroy    = MatMPIAIJ_MPIDenseDestroy;
517   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
518   PetscFunctionReturn(PETSC_SUCCESS);
519 }
520 
521 PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat, Mat, Mat, const PetscBool);
522 
523 /*
524     Performs an efficient scatter on the rows of B needed by this process; this is
525     a modification of the VecScatterBegin_() routines.
526 
527     Input: If Bbidx = 0, uses B = Bb, else B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
528 */
529 
530 static PetscErrorCode MatMPIDenseScatter(Mat A, Mat B, PetscInt Bbidx, Mat C, Mat *outworkB)
531 {
532   Mat_MPIAIJ        *aij = (Mat_MPIAIJ *)A->data;
533   const PetscScalar *b;
534   PetscScalar       *rvalues;
535   VecScatter         ctx = aij->Mvctx;
536   const PetscInt    *sindices, *sstarts, *rstarts;
537   const PetscMPIInt *sprocs, *rprocs;
538   PetscMPIInt        nsends, nrecvs;
539   MPI_Request       *swaits, *rwaits;
540   MPI_Comm           comm;
541   PetscMPIInt        tag = ((PetscObject)ctx)->tag, ncols, nrows, nsends_mpi, nrecvs_mpi;
542   MPIAIJ_MPIDense   *contents;
543   Mat                workB;
544   MPI_Datatype      *stype, *rtype;
545   PetscInt           blda;
546 
547   PetscFunctionBegin;
548   MatCheckProduct(C, 4);
549   PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
550   PetscCall(PetscMPIIntCast(B->cmap->N, &ncols));
551   PetscCall(PetscMPIIntCast(aij->B->cmap->n, &nrows));
552   contents = (MPIAIJ_MPIDense *)C->product->data;
553   PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL /*bs*/));
554   PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL /*bs*/));
555   PetscCall(PetscMPIIntCast(nsends, &nsends_mpi));
556   PetscCall(PetscMPIIntCast(nrecvs, &nrecvs_mpi));
557   if (Bbidx == 0) workB = *outworkB = contents->workB;
558   else workB = *outworkB = contents->workB1;
559   PetscCheck(nrows == workB->rmap->n, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Number of rows of workB %" PetscInt_FMT " not equal to columns of aij->B %d", workB->cmap->n, nrows);
560   swaits = contents->swaits;
561   rwaits = contents->rwaits;
562 
563   PetscCall(MatDenseGetArrayRead(B, &b));
564   PetscCall(MatDenseGetLDA(B, &blda));
565   PetscCheck(blda == contents->blda, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot reuse an input matrix with lda %" PetscInt_FMT " != %" PetscInt_FMT, blda, contents->blda);
566   PetscCall(MatDenseGetArray(workB, &rvalues));
567 
568   /* Post recv, use MPI derived data type to save memory */
569   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
570   rtype = contents->rtype;
571   for (PetscMPIInt i = 0; i < nrecvs; i++) PetscCallMPI(MPIU_Irecv(rvalues + (rstarts[i] - rstarts[0]), ncols, rtype[i], rprocs[i], tag, comm, rwaits + i));
572 
573   stype = contents->stype;
574   for (PetscMPIInt i = 0; i < nsends; i++) PetscCallMPI(MPIU_Isend(b, ncols, stype[i], sprocs[i], tag, comm, swaits + i));
575 
576   if (nrecvs) PetscCallMPI(MPI_Waitall(nrecvs_mpi, rwaits, MPI_STATUSES_IGNORE));
577   if (nsends) PetscCallMPI(MPI_Waitall(nsends_mpi, swaits, MPI_STATUSES_IGNORE));
578 
579   PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL));
580   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL));
581   PetscCall(MatDenseRestoreArrayRead(B, &b));
582   PetscCall(MatDenseRestoreArray(workB, &rvalues));
583   PetscFunctionReturn(PETSC_SUCCESS);
584 }
585 
586 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A, Mat B, Mat C)
587 {
588   Mat_MPIAIJ      *aij    = (Mat_MPIAIJ *)A->data;
589   Mat_MPIDense    *bdense = (Mat_MPIDense *)B->data;
590   Mat_MPIDense    *cdense = (Mat_MPIDense *)C->data;
591   Mat              workB;
592   MPIAIJ_MPIDense *contents;
593 
594   PetscFunctionBegin;
595   MatCheckProduct(C, 3);
596   PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
597   contents = (MPIAIJ_MPIDense *)C->product->data;
598   /* diagonal block of A times all local rows of B */
599   /* TODO: this calls a symbolic multiplication every time, which could be avoided */
600   PetscCall(MatMatMult(aij->A, bdense->A, MAT_REUSE_MATRIX, PETSC_CURRENT, &cdense->A));
601   if (contents->workB->cmap->n == B->cmap->N) {
602     /* get off processor parts of B needed to complete C=A*B */
603     PetscCall(MatMPIDenseScatter(A, B, 0, C, &workB));
604 
605     /* off-diagonal block of A times nonlocal rows of B */
606     PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
607   } else {
608     Mat       Bb, Cb;
609     PetscInt  BN = B->cmap->N, n = contents->workB->cmap->n;
610     PetscBool ccpu;
611 
612     PetscCheck(n > 0, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Column block size %" PetscInt_FMT " must be positive", n);
613     /* Prevent from unneeded copies back and forth from the GPU
614        when getting and restoring the submatrix
615        We need a proper GPU code for AIJ * dense in parallel */
616     PetscCall(MatBoundToCPU(C, &ccpu));
617     PetscCall(MatBindToCPU(C, PETSC_TRUE));
618     for (PetscInt i = 0; i < BN; i += n) {
619       PetscCall(MatDenseGetSubMatrix(B, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Bb));
620       PetscCall(MatDenseGetSubMatrix(C, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Cb));
621 
622       /* get off processor parts of B needed to complete C=A*B */
623       PetscCall(MatMPIDenseScatter(A, Bb, (i + n) > BN, C, &workB));
624 
625       /* off-diagonal block of A times nonlocal rows of B */
626       cdense = (Mat_MPIDense *)Cb->data;
627       PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
628       PetscCall(MatDenseRestoreSubMatrix(B, &Bb));
629       PetscCall(MatDenseRestoreSubMatrix(C, &Cb));
630     }
631     PetscCall(MatBindToCPU(C, ccpu));
632   }
633   PetscFunctionReturn(PETSC_SUCCESS);
634 }
635 
636 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A, Mat P, Mat C)
637 {
638   Mat_MPIAIJ          *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
639   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
640   Mat_SeqAIJ          *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
641   PetscInt            *adi = ad->i, *adj, *aoi = ao->i, *aoj;
642   PetscScalar         *ada, *aoa, *cda = cd->a, *coa = co->a;
643   Mat_SeqAIJ          *p_loc, *p_oth;
644   PetscInt            *pi_loc, *pj_loc, *pi_oth, *pj_oth, *pj;
645   PetscScalar         *pa_loc, *pa_oth, *pa, valtmp, *ca;
646   PetscInt             cm = C->rmap->n, anz, pnz;
647   MatProductCtx_APMPI *ptap;
648   PetscScalar         *apa_sparse;
649   const PetscScalar   *dummy;
650   PetscInt            *api, *apj, *apJ, i, j, k, row;
651   PetscInt             cstart = C->cmap->rstart;
652   PetscInt             cdnz, conz, k0, k1, nextp;
653   MPI_Comm             comm;
654   PetscMPIInt          size;
655 
656   PetscFunctionBegin;
657   MatCheckProduct(C, 3);
658   ptap = (MatProductCtx_APMPI *)C->product->data;
659   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
660   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
661   PetscCallMPI(MPI_Comm_size(comm, &size));
662   PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");
663 
664   /* flag CPU mask for C */
665 #if defined(PETSC_HAVE_DEVICE)
666   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
667   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
668   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
669 #endif
670   apa_sparse = ptap->apa;
671 
672   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
673   /* update numerical values of P_oth and P_loc */
674   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
675   PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));
676 
677   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
678   /* get data from symbolic products */
679   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
680   pi_loc = p_loc->i;
681   pj_loc = p_loc->j;
682   pa_loc = p_loc->a;
683   if (size > 1) {
684     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
685     pi_oth = p_oth->i;
686     pj_oth = p_oth->j;
687     pa_oth = p_oth->a;
688   } else {
689     p_oth  = NULL;
690     pi_oth = NULL;
691     pj_oth = NULL;
692     pa_oth = NULL;
693   }
694 
695   /* trigger copy to CPU */
696   PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy));
697   PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy));
698   PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy));
699   PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy));
700   api = ptap->api;
701   apj = ptap->apj;
702   for (i = 0; i < cm; i++) {
703     apJ = apj + api[i];
704 
705     /* diagonal portion of A */
706     anz = adi[i + 1] - adi[i];
707     adj = ad->j + adi[i];
708     ada = ad->a + adi[i];
709     for (j = 0; j < anz; j++) {
710       row = adj[j];
711       pnz = pi_loc[row + 1] - pi_loc[row];
712       pj  = pj_loc + pi_loc[row];
713       pa  = pa_loc + pi_loc[row];
714       /* perform sparse axpy */
715       valtmp = ada[j];
716       nextp  = 0;
717       for (k = 0; nextp < pnz; k++) {
718         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
719           apa_sparse[k] += valtmp * pa[nextp++];
720         }
721       }
722       PetscCall(PetscLogFlops(2.0 * pnz));
723     }
724 
725     /* off-diagonal portion of A */
726     anz = aoi[i + 1] - aoi[i];
727     aoj = PetscSafePointerPlusOffset(ao->j, aoi[i]);
728     aoa = PetscSafePointerPlusOffset(ao->a, aoi[i]);
729     for (j = 0; j < anz; j++) {
730       row = aoj[j];
731       pnz = pi_oth[row + 1] - pi_oth[row];
732       pj  = pj_oth + pi_oth[row];
733       pa  = pa_oth + pi_oth[row];
734       /* perform sparse axpy */
735       valtmp = aoa[j];
736       nextp  = 0;
737       for (k = 0; nextp < pnz; k++) {
738         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
739           apa_sparse[k] += valtmp * pa[nextp++];
740         }
741       }
742       PetscCall(PetscLogFlops(2.0 * pnz));
743     }
744 
745     /* set values in C */
746     cdnz = cd->i[i + 1] - cd->i[i];
747     conz = co->i[i + 1] - co->i[i];
748 
749     /* 1st off-diagonal part of C */
750     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
751     k  = 0;
752     for (k0 = 0; k0 < conz; k0++) {
753       if (apJ[k] >= cstart) break;
754       ca[k0]        = apa_sparse[k];
755       apa_sparse[k] = 0.0;
756       k++;
757     }
758 
759     /* diagonal part of C */
760     ca = cda + cd->i[i];
761     for (k1 = 0; k1 < cdnz; k1++) {
762       ca[k1]        = apa_sparse[k];
763       apa_sparse[k] = 0.0;
764       k++;
765     }
766 
767     /* 2nd off-diagonal part of C */
768     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
769     for (; k0 < conz; k0++) {
770       ca[k0]        = apa_sparse[k];
771       apa_sparse[k] = 0.0;
772       k++;
773     }
774   }
775   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
776   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
777   PetscFunctionReturn(PETSC_SUCCESS);
778 }
779 
780 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
781 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A, Mat P, PetscReal fill, Mat C)
782 {
783   MPI_Comm             comm;
784   PetscMPIInt          size;
785   MatProductCtx_APMPI *ptap;
786   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
787   Mat_MPIAIJ          *a  = (Mat_MPIAIJ *)A->data;
788   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
789   PetscInt            *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
790   PetscInt            *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
791   PetscInt             i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi, *lnk, apnz_max = 1;
792   PetscInt             am = A->rmap->n, pn = P->cmap->n, pm = P->rmap->n, lsize = pn + 20;
793   PetscReal            afill;
794   MatType              mtype;
795 
796   PetscFunctionBegin;
797   MatCheckProduct(C, 4);
798   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
799   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
800   PetscCallMPI(MPI_Comm_size(comm, &size));
801 
802   /* create struct MatProductCtx_APMPI and attached it to C later */
803   PetscCall(PetscNew(&ptap));
804 
805   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
806   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
807 
808   /* get P_loc by taking all local rows of P */
809   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
810 
811   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
812   pi_loc = p_loc->i;
813   pj_loc = p_loc->j;
814   if (size > 1) {
815     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
816     pi_oth = p_oth->i;
817     pj_oth = p_oth->j;
818   } else {
819     p_oth  = NULL;
820     pi_oth = NULL;
821     pj_oth = NULL;
822   }
823 
824   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
825   PetscCall(PetscMalloc1(am + 1, &api));
826   ptap->api = api;
827   api[0]    = 0;
828 
829   PetscCall(PetscLLCondensedCreate_Scalable(lsize, &lnk));
830 
831   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
832   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
833   current_space = free_space;
834   MatPreallocateBegin(comm, am, pn, dnz, onz);
835   for (i = 0; i < am; i++) {
836     /* diagonal portion of A */
837     nzi = adi[i + 1] - adi[i];
838     for (j = 0; j < nzi; j++) {
839       row  = *adj++;
840       pnz  = pi_loc[row + 1] - pi_loc[row];
841       Jptr = pj_loc + pi_loc[row];
842       /* Expand list if it is not long enough */
843       if (pnz + apnz_max > lsize) {
844         lsize = pnz + apnz_max;
845         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
846       }
847       /* add non-zero cols of P into the sorted linked list lnk */
848       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
849       apnz       = *lnk; /* The first element in the list is the number of items in the list */
850       api[i + 1] = api[i] + apnz;
851       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
852     }
853     /* off-diagonal portion of A */
854     nzi = aoi[i + 1] - aoi[i];
855     for (j = 0; j < nzi; j++) {
856       row  = *aoj++;
857       pnz  = pi_oth[row + 1] - pi_oth[row];
858       Jptr = pj_oth + pi_oth[row];
859       /* Expand list if it is not long enough */
860       if (pnz + apnz_max > lsize) {
861         lsize = pnz + apnz_max;
862         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
863       }
864       /* add non-zero cols of P into the sorted linked list lnk */
865       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
866       apnz       = *lnk; /* The first element in the list is the number of items in the list */
867       api[i + 1] = api[i] + apnz;
868       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
869     }
870 
871     /* add missing diagonal entry */
872     if (C->force_diagonals) {
873       j = i + rstart; /* column index */
874       PetscCall(PetscLLCondensedAddSorted_Scalable(1, &j, lnk));
875     }
876 
877     apnz       = *lnk;
878     api[i + 1] = api[i] + apnz;
879     if (apnz > apnz_max) apnz_max = apnz;
880 
881     /* if free space is not available, double the total space in the list */
882     if (current_space->local_remaining < apnz) {
883       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space));
884       nspacedouble++;
885     }
886 
887     /* Copy data into free space, then initialize lnk */
888     PetscCall(PetscLLCondensedClean_Scalable(apnz, current_space->array, lnk));
889     PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));
890 
891     current_space->array += apnz;
892     current_space->local_used += apnz;
893     current_space->local_remaining -= apnz;
894   }
895 
896   /* Allocate space for apj, initialize apj, and */
897   /* destroy list of free space and other temporary array(s) */
898   PetscCall(PetscMalloc1(api[am], &ptap->apj));
899   apj = ptap->apj;
900   PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
901   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
902 
903   /* create and assemble symbolic parallel matrix C */
904   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
905   PetscCall(MatSetBlockSizesFromMats(C, A, P));
906   PetscCall(MatGetType(A, &mtype));
907   PetscCall(MatSetType(C, mtype));
908   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
909   MatPreallocateEnd(dnz, onz);
910 
911   /* malloc apa for assembly C */
912   PetscCall(PetscCalloc1(apnz_max, &ptap->apa));
913 
914   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
915   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
916   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
917   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
918   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
919 
920   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
921   C->ops->productnumeric = MatProductNumeric_AB;
922 
923   /* attach the supporting struct to C for reuse */
924   C->product->data    = ptap;
925   C->product->destroy = MatProductCtxDestroy_MPIAIJ_MatMatMult;
926 
927   /* set MatInfo */
928   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
929   if (afill < 1.0) afill = 1.0;
930   C->info.mallocs           = nspacedouble;
931   C->info.fill_ratio_given  = fill;
932   C->info.fill_ratio_needed = afill;
933 
934 #if defined(PETSC_USE_INFO)
935   if (api[am]) {
936     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
937     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
938   } else {
939     PetscCall(PetscInfo(C, "Empty matrix product\n"));
940   }
941 #endif
942   PetscFunctionReturn(PETSC_SUCCESS);
943 }
944 
945 /* This function is needed for the seqMPI matrix-matrix multiplication.  */
946 /* Three input arrays are merged to one output array. The size of the    */
947 /* output array is also output. Duplicate entries only show up once.     */
948 static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, PetscInt size2, PetscInt *in2, PetscInt size3, PetscInt *in3, PetscInt *size4, PetscInt *out)
949 {
950   int i = 0, j = 0, k = 0, l = 0;
951 
952   /* Traverse all three arrays */
953   while (i < size1 && j < size2 && k < size3) {
954     if (in1[i] < in2[j] && in1[i] < in3[k]) {
955       out[l++] = in1[i++];
956     } else if (in2[j] < in1[i] && in2[j] < in3[k]) {
957       out[l++] = in2[j++];
958     } else if (in3[k] < in1[i] && in3[k] < in2[j]) {
959       out[l++] = in3[k++];
960     } else if (in1[i] == in2[j] && in1[i] < in3[k]) {
961       out[l++] = in1[i];
962       i++, j++;
963     } else if (in1[i] == in3[k] && in1[i] < in2[j]) {
964       out[l++] = in1[i];
965       i++, k++;
966     } else if (in3[k] == in2[j] && in2[j] < in1[i]) {
967       out[l++] = in2[j];
968       k++, j++;
969     } else if (in1[i] == in2[j] && in1[i] == in3[k]) {
970       out[l++] = in1[i];
971       i++, j++, k++;
972     }
973   }
974 
975   /* Traverse two remaining arrays */
976   while (i < size1 && j < size2) {
977     if (in1[i] < in2[j]) {
978       out[l++] = in1[i++];
979     } else if (in1[i] > in2[j]) {
980       out[l++] = in2[j++];
981     } else {
982       out[l++] = in1[i];
983       i++, j++;
984     }
985   }
986 
987   while (i < size1 && k < size3) {
988     if (in1[i] < in3[k]) {
989       out[l++] = in1[i++];
990     } else if (in1[i] > in3[k]) {
991       out[l++] = in3[k++];
992     } else {
993       out[l++] = in1[i];
994       i++, k++;
995     }
996   }
997 
998   while (k < size3 && j < size2) {
999     if (in3[k] < in2[j]) {
1000       out[l++] = in3[k++];
1001     } else if (in3[k] > in2[j]) {
1002       out[l++] = in2[j++];
1003     } else {
1004       out[l++] = in3[k];
1005       k++, j++;
1006     }
1007   }
1008 
1009   /* Traverse one remaining array */
1010   while (i < size1) out[l++] = in1[i++];
1011   while (j < size2) out[l++] = in2[j++];
1012   while (k < size3) out[l++] = in3[k++];
1013 
1014   *size4 = l;
1015 }
1016 
1017 /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and  */
1018 /* adds up the products. Two of these three multiplications are performed with existing (sequential)      */
1019 /* matrix-matrix multiplications.  */
1020 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
1021 {
1022   MPI_Comm             comm;
1023   PetscMPIInt          size;
1024   MatProductCtx_APMPI *ptap;
1025   PetscFreeSpaceList   free_space_diag = NULL, current_space = NULL;
1026   Mat_MPIAIJ          *a  = (Mat_MPIAIJ *)A->data;
1027   Mat_SeqAIJ          *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc;
1028   Mat_MPIAIJ          *p = (Mat_MPIAIJ *)P->data;
1029   Mat_SeqAIJ          *adpd_seq, *p_off, *aopoth_seq;
1030   PetscInt             adponz, adpdnz;
1031   PetscInt            *pi_loc, *dnz, *onz;
1032   PetscInt            *adi = ad->i, *adj = ad->j, *aoi = ao->i, rstart = A->rmap->rstart;
1033   PetscInt            *lnk, i, i1 = 0, pnz, row, *adpoi, *adpoj, *api, *adpoJ, *aopJ, *apJ, *Jptr, aopnz, nspacedouble = 0, j, nzi, *apj, apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1034   PetscInt             am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n, p_colstart, p_colend;
1035   PetscBT              lnkbt;
1036   PetscReal            afill;
1037   PetscMPIInt          rank;
1038   Mat                  adpd, aopoth;
1039   MatType              mtype;
1040   const char          *prefix;
1041 
1042   PetscFunctionBegin;
1043   MatCheckProduct(C, 4);
1044   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
1045   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1046   PetscCallMPI(MPI_Comm_size(comm, &size));
1047   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1048   PetscCall(MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend));
1049 
1050   /* create struct MatProductCtx_APMPI and attached it to C later */
1051   PetscCall(PetscNew(&ptap));
1052 
1053   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1054   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
1055 
1056   /* get P_loc by taking all local rows of P */
1057   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
1058 
1059   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
1060   pi_loc = p_loc->i;
1061 
1062   /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1063   PetscCall(PetscMalloc1(am + 1, &api));
1064   PetscCall(PetscMalloc1(am + 1, &adpoi));
1065 
1066   adpoi[0]  = 0;
1067   ptap->api = api;
1068   api[0]    = 0;
1069 
1070   /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1071   PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));
1072   MatPreallocateBegin(comm, am, pn, dnz, onz);
1073 
1074   /* Symbolic calc of A_loc_diag * P_loc_diag */
1075   PetscCall(MatGetOptionsPrefix(A, &prefix));
1076   PetscCall(MatProductCreate(a->A, p->A, NULL, &adpd));
1077   PetscCall(MatGetOptionsPrefix(A, &prefix));
1078   PetscCall(MatSetOptionsPrefix(adpd, prefix));
1079   PetscCall(MatAppendOptionsPrefix(adpd, "inner_diag_"));
1080 
1081   PetscCall(MatProductSetType(adpd, MATPRODUCT_AB));
1082   PetscCall(MatProductSetAlgorithm(adpd, "sorted"));
1083   PetscCall(MatProductSetFill(adpd, fill));
1084   PetscCall(MatProductSetFromOptions(adpd));
1085 
1086   adpd->force_diagonals = C->force_diagonals;
1087   PetscCall(MatProductSymbolic(adpd));
1088 
1089   adpd_seq = (Mat_SeqAIJ *)((adpd)->data);
1090   adpdi    = adpd_seq->i;
1091   adpdj    = adpd_seq->j;
1092   p_off    = (Mat_SeqAIJ *)p->B->data;
1093   poff_i   = p_off->i;
1094   poff_j   = p_off->j;
1095 
1096   /* j_temp stores indices of a result row before they are added to the linked list */
1097   PetscCall(PetscMalloc1(pN, &j_temp));
1098 
1099   /* Symbolic calc of the A_diag * p_loc_off */
1100   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1101   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space_diag));
1102   current_space = free_space_diag;
1103 
1104   for (i = 0; i < am; i++) {
1105     /* A_diag * P_loc_off */
1106     nzi = adi[i + 1] - adi[i];
1107     for (j = 0; j < nzi; j++) {
1108       row  = *adj++;
1109       pnz  = poff_i[row + 1] - poff_i[row];
1110       Jptr = poff_j + poff_i[row];
1111       for (i1 = 0; i1 < pnz; i1++) j_temp[i1] = p->garray[Jptr[i1]];
1112       /* add non-zero cols of P into the sorted linked list lnk */
1113       PetscCall(PetscLLCondensedAddSorted(pnz, j_temp, lnk, lnkbt));
1114     }
1115 
1116     adponz       = lnk[0];
1117     adpoi[i + 1] = adpoi[i] + adponz;
1118 
1119     /* if free space is not available, double the total space in the list */
1120     if (current_space->local_remaining < adponz) {
1121       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(adponz, current_space->total_array_size), &current_space));
1122       nspacedouble++;
1123     }
1124 
1125     /* Copy data into free space, then initialize lnk */
1126     PetscCall(PetscLLCondensedClean(pN, adponz, current_space->array, lnk, lnkbt));
1127 
1128     current_space->array += adponz;
1129     current_space->local_used += adponz;
1130     current_space->local_remaining -= adponz;
1131   }
1132 
1133   /* Symbolic calc of A_off * P_oth */
1134   PetscCall(MatSetOptionsPrefix(a->B, prefix));
1135   PetscCall(MatAppendOptionsPrefix(a->B, "inner_offdiag_"));
1136   PetscCall(MatCreate(PETSC_COMM_SELF, &aopoth));
1137   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth));
1138   aopoth_seq = (Mat_SeqAIJ *)((aopoth)->data);
1139   aopothi    = aopoth_seq->i;
1140   aopothj    = aopoth_seq->j;
1141 
1142   /* Allocate space for apj, adpj, aopj, ... */
1143   /* destroy lists of free space and other temporary array(s) */
1144 
1145   PetscCall(PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am], &ptap->apj));
1146   PetscCall(PetscMalloc1(adpoi[am], &adpoj));
1147 
1148   /* Copy from linked list to j-array */
1149   PetscCall(PetscFreeSpaceContiguous(&free_space_diag, adpoj));
1150   PetscCall(PetscLLDestroy(lnk, lnkbt));
1151 
1152   adpoJ = adpoj;
1153   adpdJ = adpdj;
1154   aopJ  = aopothj;
1155   apj   = ptap->apj;
1156   apJ   = apj; /* still empty */
1157 
1158   /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1159   /* A_diag * P_loc_diag to get A*P */
1160   for (i = 0; i < am; i++) {
1161     aopnz  = aopothi[i + 1] - aopothi[i];
1162     adponz = adpoi[i + 1] - adpoi[i];
1163     adpdnz = adpdi[i + 1] - adpdi[i];
1164 
1165     /* Correct indices from A_diag*P_diag */
1166     for (i1 = 0; i1 < adpdnz; i1++) adpdJ[i1] += p_colstart;
1167     /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1168     Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1169     PetscCall(MatPreallocateSet(i + rstart, apnz, apJ, dnz, onz));
1170 
1171     aopJ += aopnz;
1172     adpoJ += adponz;
1173     adpdJ += adpdnz;
1174     apJ += apnz;
1175     api[i + 1] = api[i] + apnz;
1176   }
1177 
1178   /* malloc apa to store dense row A[i,:]*P */
1179   PetscCall(PetscCalloc1(pN, &ptap->apa));
1180 
1181   /* create and assemble symbolic parallel matrix C */
1182   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
1183   PetscCall(MatSetBlockSizesFromMats(C, A, P));
1184   PetscCall(MatGetType(A, &mtype));
1185   PetscCall(MatSetType(C, mtype));
1186   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1187   MatPreallocateEnd(dnz, onz);
1188 
1189   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
1190   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
1191   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1192   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1193   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1194 
1195   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1196   C->ops->productnumeric = MatProductNumeric_AB;
1197 
1198   /* attach the supporting struct to C for reuse */
1199   C->product->data    = ptap;
1200   C->product->destroy = MatProductCtxDestroy_MPIAIJ_MatMatMult;
1201 
1202   /* set MatInfo */
1203   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
1204   if (afill < 1.0) afill = 1.0;
1205   C->info.mallocs           = nspacedouble;
1206   C->info.fill_ratio_given  = fill;
1207   C->info.fill_ratio_needed = afill;
1208 
1209 #if defined(PETSC_USE_INFO)
1210   if (api[am]) {
1211     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
1212     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
1213   } else {
1214     PetscCall(PetscInfo(C, "Empty matrix product\n"));
1215   }
1216 #endif
1217 
1218   PetscCall(MatDestroy(&aopoth));
1219   PetscCall(MatDestroy(&adpd));
1220   PetscCall(PetscFree(j_temp));
1221   PetscCall(PetscFree(adpoj));
1222   PetscCall(PetscFree(adpoi));
1223   PetscFunctionReturn(PETSC_SUCCESS);
1224 }
1225 
1226 /* This routine only works when scall=MAT_REUSE_MATRIX! */
1227 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P, Mat A, Mat C)
1228 {
1229   MatProductCtx_APMPI *ptap;
1230   Mat                  Pt;
1231 
1232   PetscFunctionBegin;
1233   MatCheckProduct(C, 3);
1234   ptap = (MatProductCtx_APMPI *)C->product->data;
1235   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1236   PetscCheck(ptap->Pt, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1237 
1238   Pt = ptap->Pt;
1239   PetscCall(MatTransposeSetPrecursor(P, Pt));
1240   PetscCall(MatTranspose(P, MAT_REUSE_MATRIX, &Pt));
1241   PetscCall(MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt, A, C));
1242   PetscFunctionReturn(PETSC_SUCCESS);
1243 }
1244 
1245 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1246 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, PetscReal fill, Mat C)
1247 {
1248   MatProductCtx_APMPI     *ptap;
1249   Mat_MPIAIJ              *p = (Mat_MPIAIJ *)P->data;
1250   MPI_Comm                 comm;
1251   PetscMPIInt              size, rank;
1252   PetscFreeSpaceList       free_space = NULL, current_space = NULL;
1253   PetscInt                 pn = P->cmap->n, aN = A->cmap->N, an = A->cmap->n;
1254   PetscInt                *lnk, i, k, rstart;
1255   PetscBT                  lnkbt;
1256   PetscMPIInt              tagi, tagj, *len_si, *len_s, *len_ri, nrecv, proc, nsend;
1257   PETSC_UNUSED PetscMPIInt icompleted = 0;
1258   PetscInt               **buf_rj, **buf_ri, **buf_ri_k, row, ncols, *cols;
1259   PetscInt                 len, *dnz, *onz, *owners, nzi;
1260   PetscInt                 nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1261   MPI_Request             *swaits, *rwaits;
1262   MPI_Status              *sstatus, rstatus;
1263   PetscLayout              rowmap;
1264   PetscInt                *owners_co, *coi, *coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1265   PetscMPIInt             *len_r, *id_r;          /* array of length of comm->size, store send/recv matrix values */
1266   PetscInt                *Jptr, *prmap = p->garray, con, j, Crmax;
1267   Mat_SeqAIJ              *a_loc, *c_loc, *c_oth;
1268   PetscHMapI               ta;
1269   MatType                  mtype;
1270   const char              *prefix;
1271 
1272   PetscFunctionBegin;
1273   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1274   PetscCallMPI(MPI_Comm_size(comm, &size));
1275   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1276 
1277   /* create symbolic parallel matrix C */
1278   PetscCall(MatGetType(A, &mtype));
1279   PetscCall(MatSetType(C, mtype));
1280 
1281   C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1282 
1283   /* create struct MatProductCtx_APMPI and attached it to C later */
1284   PetscCall(PetscNew(&ptap));
1285 
1286   /* (0) compute Rd = Pd^T, Ro = Po^T  */
1287   PetscCall(MatTranspose(p->A, MAT_INITIAL_MATRIX, &ptap->Rd));
1288   PetscCall(MatTranspose(p->B, MAT_INITIAL_MATRIX, &ptap->Ro));
1289 
1290   /* (1) compute symbolic A_loc */
1291   PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &ptap->A_loc));
1292 
1293   /* (2-1) compute symbolic C_oth = Ro*A_loc  */
1294   PetscCall(MatGetOptionsPrefix(A, &prefix));
1295   PetscCall(MatSetOptionsPrefix(ptap->Ro, prefix));
1296   PetscCall(MatAppendOptionsPrefix(ptap->Ro, "inner_offdiag_"));
1297   PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_oth));
1298   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro, ptap->A_loc, fill, ptap->C_oth));
1299 
1300   /* (3) send coj of C_oth to other processors  */
1301   /* determine row ownership */
1302   PetscCall(PetscLayoutCreate(comm, &rowmap));
1303   rowmap->n  = pn;
1304   rowmap->bs = 1;
1305   PetscCall(PetscLayoutSetUp(rowmap));
1306   owners = rowmap->range;
1307 
1308   /* determine the number of messages to send, their lengths */
1309   PetscCall(PetscMalloc4(size, &len_s, size, &len_si, size, &sstatus, size + 1, &owners_co));
1310   PetscCall(PetscArrayzero(len_s, size));
1311   PetscCall(PetscArrayzero(len_si, size));
1312 
1313   c_oth = (Mat_SeqAIJ *)ptap->C_oth->data;
1314   coi   = c_oth->i;
1315   coj   = c_oth->j;
1316   con   = ptap->C_oth->rmap->n;
1317   proc  = 0;
1318   for (i = 0; i < con; i++) {
1319     while (prmap[i] >= owners[proc + 1]) proc++;
1320     len_si[proc]++;                     /* num of rows in Co(=Pt*A) to be sent to [proc] */
1321     len_s[proc] += coi[i + 1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1322   }
1323 
1324   len          = 0; /* max length of buf_si[], see (4) */
1325   owners_co[0] = 0;
1326   nsend        = 0;
1327   for (proc = 0; proc < size; proc++) {
1328     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1329     if (len_s[proc]) {
1330       nsend++;
1331       len_si[proc] = 2 * (len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1332       len += len_si[proc];
1333     }
1334   }
1335 
1336   /* determine the number and length of messages to receive for coi and coj  */
1337   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &nrecv));
1338   PetscCall(PetscGatherMessageLengths2(comm, nsend, nrecv, len_s, len_si, &id_r, &len_r, &len_ri));
1339 
1340   /* post the Irecv and Isend of coj */
1341   PetscCall(PetscCommGetNewTag(comm, &tagj));
1342   PetscCall(PetscPostIrecvInt(comm, tagj, nrecv, id_r, len_r, &buf_rj, &rwaits));
1343   PetscCall(PetscMalloc1(nsend, &swaits));
1344   for (proc = 0, k = 0; proc < size; proc++) {
1345     if (!len_s[proc]) continue;
1346     i = owners_co[proc];
1347     PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1348     k++;
1349   }
1350 
1351   /* (2-2) compute symbolic C_loc = Rd*A_loc */
1352   PetscCall(MatSetOptionsPrefix(ptap->Rd, prefix));
1353   PetscCall(MatAppendOptionsPrefix(ptap->Rd, "inner_diag_"));
1354   PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_loc));
1355   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd, ptap->A_loc, fill, ptap->C_loc));
1356   c_loc = (Mat_SeqAIJ *)ptap->C_loc->data;
1357 
1358   /* receives coj are complete */
1359   for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1360   PetscCall(PetscFree(rwaits));
1361   if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));
1362 
1363   /* add received column indices into ta to update Crmax */
1364   a_loc = (Mat_SeqAIJ *)ptap->A_loc->data;
1365 
1366   /* create and initialize a linked list */
1367   PetscCall(PetscHMapICreateWithSize(an, &ta)); /* for compute Crmax */
1368   MatRowMergeMax_SeqAIJ(a_loc, ptap->A_loc->rmap->N, ta);
1369 
1370   for (k = 0; k < nrecv; k++) { /* k-th received message */
1371     Jptr = buf_rj[k];
1372     for (j = 0; j < len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1373   }
1374   PetscCall(PetscHMapIGetSize(ta, &Crmax));
1375   PetscCall(PetscHMapIDestroy(&ta));
1376 
1377   /* (4) send and recv coi */
1378   PetscCall(PetscCommGetNewTag(comm, &tagi));
1379   PetscCall(PetscPostIrecvInt(comm, tagi, nrecv, id_r, len_ri, &buf_ri, &rwaits));
1380   PetscCall(PetscMalloc1(len, &buf_s));
1381   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1382   for (proc = 0, k = 0; proc < size; proc++) {
1383     if (!len_s[proc]) continue;
1384     /* form outgoing message for i-structure:
1385          buf_si[0]:                 nrows to be sent
1386                [1:nrows]:           row index (global)
1387                [nrows+1:2*nrows+1]: i-structure index
1388     */
1389     nrows       = len_si[proc] / 2 - 1; /* num of rows in Co to be sent to [proc] */
1390     buf_si_i    = buf_si + nrows + 1;
1391     buf_si[0]   = nrows;
1392     buf_si_i[0] = 0;
1393     nrows       = 0;
1394     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1395       nzi                 = coi[i + 1] - coi[i];
1396       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1397       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1398       nrows++;
1399     }
1400     PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1401     k++;
1402     buf_si += len_si[proc];
1403   }
1404   for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1405   PetscCall(PetscFree(rwaits));
1406   if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));
1407 
1408   PetscCall(PetscFree4(len_s, len_si, sstatus, owners_co));
1409   PetscCall(PetscFree(len_ri));
1410   PetscCall(PetscFree(swaits));
1411   PetscCall(PetscFree(buf_s));
1412 
1413   /* (5) compute the local portion of C      */
1414   /* set initial free space to be Crmax, sufficient for holding nonzeros in each row of C */
1415   PetscCall(PetscFreeSpaceGet(Crmax, &free_space));
1416   current_space = free_space;
1417 
1418   PetscCall(PetscMalloc3(nrecv, &buf_ri_k, nrecv, &nextrow, nrecv, &nextci));
1419   for (k = 0; k < nrecv; k++) {
1420     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1421     nrows       = *buf_ri_k[k];
1422     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1423     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1424   }
1425 
1426   MatPreallocateBegin(comm, pn, an, dnz, onz);
1427   PetscCall(PetscLLCondensedCreate(Crmax, aN, &lnk, &lnkbt));
1428   for (i = 0; i < pn; i++) { /* for each local row of C */
1429     /* add C_loc into C */
1430     nzi  = c_loc->i[i + 1] - c_loc->i[i];
1431     Jptr = c_loc->j + c_loc->i[i];
1432     PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));
1433 
1434     /* add received col data into lnk */
1435     for (k = 0; k < nrecv; k++) { /* k-th received message */
1436       if (i == *nextrow[k]) {     /* i-th row */
1437         nzi  = *(nextci[k] + 1) - *nextci[k];
1438         Jptr = buf_rj[k] + *nextci[k];
1439         PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));
1440         nextrow[k]++;
1441         nextci[k]++;
1442       }
1443     }
1444 
1445     /* add missing diagonal entry */
1446     if (C->force_diagonals) {
1447       k = i + owners[rank]; /* column index */
1448       PetscCall(PetscLLCondensedAddSorted(1, &k, lnk, lnkbt));
1449     }
1450 
1451     nzi = lnk[0];
1452 
1453     /* copy data into free space, then initialize lnk */
1454     PetscCall(PetscLLCondensedClean(aN, nzi, current_space->array, lnk, lnkbt));
1455     PetscCall(MatPreallocateSet(i + owners[rank], nzi, current_space->array, dnz, onz));
1456   }
1457   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1458   PetscCall(PetscLLDestroy(lnk, lnkbt));
1459   PetscCall(PetscFreeSpaceDestroy(free_space));
1460 
1461   /* local sizes and preallocation */
1462   PetscCall(MatSetSizes(C, pn, an, PETSC_DETERMINE, PETSC_DETERMINE));
1463   PetscCall(PetscLayoutSetBlockSize(C->rmap, P->cmap->bs));
1464   PetscCall(PetscLayoutSetBlockSize(C->cmap, A->cmap->bs));
1465   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1466   MatPreallocateEnd(dnz, onz);
1467 
1468   /* add C_loc and C_oth to C */
1469   PetscCall(MatGetOwnershipRange(C, &rstart, NULL));
1470   for (i = 0; i < pn; i++) {
1471     ncols = c_loc->i[i + 1] - c_loc->i[i];
1472     cols  = c_loc->j + c_loc->i[i];
1473     row   = rstart + i;
1474     PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));
1475 
1476     if (C->force_diagonals) PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, 1, (const PetscInt *)&row, NULL, INSERT_VALUES));
1477   }
1478   for (i = 0; i < con; i++) {
1479     ncols = c_oth->i[i + 1] - c_oth->i[i];
1480     cols  = c_oth->j + c_oth->i[i];
1481     row   = prmap[i];
1482     PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));
1483   }
1484   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1485   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1486   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1487 
1488   /* members in merge */
1489   PetscCall(PetscFree(id_r));
1490   PetscCall(PetscFree(len_r));
1491   PetscCall(PetscFree(buf_ri[0]));
1492   PetscCall(PetscFree(buf_ri));
1493   PetscCall(PetscFree(buf_rj[0]));
1494   PetscCall(PetscFree(buf_rj));
1495   PetscCall(PetscLayoutDestroy(&rowmap));
1496 
1497   /* attach the supporting struct to C for reuse */
1498   C->product->data    = ptap;
1499   C->product->destroy = MatProductCtxDestroy_MPIAIJ_PtAP;
1500   PetscFunctionReturn(PETSC_SUCCESS);
1501 }
1502 
1503 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, Mat C)
1504 {
1505   Mat_MPIAIJ          *p = (Mat_MPIAIJ *)P->data;
1506   Mat_SeqAIJ          *c_seq;
1507   MatProductCtx_APMPI *ptap;
1508   Mat                  A_loc, C_loc, C_oth;
1509   PetscInt             i, rstart, rend, cm, ncols, row;
1510   const PetscInt      *cols;
1511   const PetscScalar   *vals;
1512 
1513   PetscFunctionBegin;
1514   MatCheckProduct(C, 3);
1515   ptap = (MatProductCtx_APMPI *)C->product->data;
1516   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1517   PetscCheck(ptap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1518   PetscCall(MatZeroEntries(C));
1519 
1520   /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1521   /* 1) get R = Pd^T, Ro = Po^T */
1522   PetscCall(MatTransposeSetPrecursor(p->A, ptap->Rd));
1523   PetscCall(MatTranspose(p->A, MAT_REUSE_MATRIX, &ptap->Rd));
1524   PetscCall(MatTransposeSetPrecursor(p->B, ptap->Ro));
1525   PetscCall(MatTranspose(p->B, MAT_REUSE_MATRIX, &ptap->Ro));
1526 
1527   /* 2) compute numeric A_loc */
1528   PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &ptap->A_loc));
1529 
1530   /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1531   A_loc = ptap->A_loc;
1532   PetscCall(ptap->C_loc->ops->matmultnumeric(ptap->Rd, A_loc, ptap->C_loc));
1533   PetscCall(ptap->C_oth->ops->matmultnumeric(ptap->Ro, A_loc, ptap->C_oth));
1534   C_loc = ptap->C_loc;
1535   C_oth = ptap->C_oth;
1536 
1537   /* add C_loc and C_oth to C */
1538   PetscCall(MatGetOwnershipRange(C, &rstart, &rend));
1539 
1540   /* C_loc -> C */
1541   cm    = C_loc->rmap->N;
1542   c_seq = (Mat_SeqAIJ *)C_loc->data;
1543   cols  = c_seq->j;
1544   vals  = c_seq->a;
1545   for (i = 0; i < cm; i++) {
1546     ncols = c_seq->i[i + 1] - c_seq->i[i];
1547     row   = rstart + i;
1548     PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1549     cols += ncols;
1550     vals += ncols;
1551   }
1552 
1553   /* Co -> C, off-processor part */
1554   cm    = C_oth->rmap->N;
1555   c_seq = (Mat_SeqAIJ *)C_oth->data;
1556   cols  = c_seq->j;
1557   vals  = c_seq->a;
1558   for (i = 0; i < cm; i++) {
1559     ncols = c_seq->i[i + 1] - c_seq->i[i];
1560     row   = p->garray[i];
1561     PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1562     cols += ncols;
1563     vals += ncols;
1564   }
1565   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1566   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1567   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1568   PetscFunctionReturn(PETSC_SUCCESS);
1569 }
1570 
1571 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P, Mat A, Mat C)
1572 {
1573   MatMergeSeqsToMPI   *merge;
1574   Mat_MPIAIJ          *p  = (Mat_MPIAIJ *)P->data;
1575   Mat_SeqAIJ          *pd = (Mat_SeqAIJ *)p->A->data, *po = (Mat_SeqAIJ *)p->B->data;
1576   MatProductCtx_APMPI *ap;
1577   PetscInt            *adj;
1578   PetscInt             i, j, k, anz, pnz, row, *cj, nexta;
1579   MatScalar           *ada, *ca, valtmp;
1580   PetscInt             am = A->rmap->n, cm = C->rmap->n, pon = (p->B)->cmap->n;
1581   MPI_Comm             comm;
1582   PetscMPIInt          size, rank, taga, *len_s, proc;
1583   PetscInt            *owners, nrows, **buf_ri_k, **nextrow, **nextci;
1584   PetscInt           **buf_ri, **buf_rj;
1585   PetscInt             cnz = 0, *bj_i, *bi, *bj, bnz, nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1586   MPI_Request         *s_waits, *r_waits;
1587   MPI_Status          *status;
1588   MatScalar          **abuf_r, *ba_i, *pA, *coa, *ba;
1589   const PetscScalar   *dummy;
1590   PetscInt            *ai, *aj, *coi, *coj, *poJ, *pdJ;
1591   Mat                  A_loc;
1592   Mat_SeqAIJ          *a_loc;
1593 
1594   PetscFunctionBegin;
1595   MatCheckProduct(C, 3);
1596   ap = (MatProductCtx_APMPI *)C->product->data;
1597   PetscCheck(ap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be computed. Missing data");
1598   PetscCheck(ap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1599   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
1600   PetscCallMPI(MPI_Comm_size(comm, &size));
1601   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1602 
1603   merge = ap->merge;
1604 
1605   /* 2) compute numeric C_seq = P_loc^T*A_loc */
1606   /* get data from symbolic products */
1607   coi = merge->coi;
1608   coj = merge->coj;
1609   PetscCall(PetscCalloc1(coi[pon], &coa));
1610   bi     = merge->bi;
1611   bj     = merge->bj;
1612   owners = merge->rowmap->range;
1613   PetscCall(PetscCalloc1(bi[cm], &ba));
1614 
1615   /* get A_loc by taking all local rows of A */
1616   A_loc = ap->A_loc;
1617   PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &A_loc));
1618   a_loc = (Mat_SeqAIJ *)A_loc->data;
1619   ai    = a_loc->i;
1620   aj    = a_loc->j;
1621 
1622   /* trigger copy to CPU */
1623   PetscCall(MatSeqAIJGetArrayRead(p->A, &dummy));
1624   PetscCall(MatSeqAIJRestoreArrayRead(p->A, &dummy));
1625   PetscCall(MatSeqAIJGetArrayRead(p->B, &dummy));
1626   PetscCall(MatSeqAIJRestoreArrayRead(p->B, &dummy));
1627   for (i = 0; i < am; i++) {
1628     anz = ai[i + 1] - ai[i];
1629     adj = aj + ai[i];
1630     ada = a_loc->a + ai[i];
1631 
1632     /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1633     /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1634     pnz = po->i[i + 1] - po->i[i];
1635     poJ = po->j + po->i[i];
1636     pA  = po->a + po->i[i];
1637     for (j = 0; j < pnz; j++) {
1638       row = poJ[j];
1639       cj  = coj + coi[row];
1640       ca  = coa + coi[row];
1641       /* perform sparse axpy */
1642       nexta  = 0;
1643       valtmp = pA[j];
1644       for (k = 0; nexta < anz; k++) {
1645         if (cj[k] == adj[nexta]) {
1646           ca[k] += valtmp * ada[nexta];
1647           nexta++;
1648         }
1649       }
1650       PetscCall(PetscLogFlops(2.0 * anz));
1651     }
1652 
1653     /* put the value into Cd (diagonal part) */
1654     pnz = pd->i[i + 1] - pd->i[i];
1655     pdJ = pd->j + pd->i[i];
1656     pA  = pd->a + pd->i[i];
1657     for (j = 0; j < pnz; j++) {
1658       row = pdJ[j];
1659       cj  = bj + bi[row];
1660       ca  = ba + bi[row];
1661       /* perform sparse axpy */
1662       nexta  = 0;
1663       valtmp = pA[j];
1664       for (k = 0; nexta < anz; k++) {
1665         if (cj[k] == adj[nexta]) {
1666           ca[k] += valtmp * ada[nexta];
1667           nexta++;
1668         }
1669       }
1670       PetscCall(PetscLogFlops(2.0 * anz));
1671     }
1672   }
1673 
1674   /* 3) send and recv matrix values coa */
1675   buf_ri = merge->buf_ri;
1676   buf_rj = merge->buf_rj;
1677   len_s  = merge->len_s;
1678   PetscCall(PetscCommGetNewTag(comm, &taga));
1679   PetscCall(PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits));
1680 
1681   PetscCall(PetscMalloc2(merge->nsend, &s_waits, size, &status));
1682   for (proc = 0, k = 0; proc < size; proc++) {
1683     if (!len_s[proc]) continue;
1684     i = merge->owners_co[proc];
1685     PetscCallMPI(MPIU_Isend(coa + coi[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k));
1686     k++;
1687   }
1688   if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, r_waits, status));
1689   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, s_waits, status));
1690 
1691   PetscCall(PetscFree2(s_waits, status));
1692   PetscCall(PetscFree(r_waits));
1693   PetscCall(PetscFree(coa));
1694 
1695   /* 4) insert local Cseq and received values into Cmpi */
1696   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1697   for (k = 0; k < merge->nrecv; k++) {
1698     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1699     nrows       = *buf_ri_k[k];
1700     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1701     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1702   }
1703 
1704   for (i = 0; i < cm; i++) {
1705     row  = owners[rank] + i; /* global row index of C_seq */
1706     bj_i = bj + bi[i];       /* col indices of the i-th row of C */
1707     ba_i = ba + bi[i];
1708     bnz  = bi[i + 1] - bi[i];
1709     /* add received vals into ba */
1710     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1711       /* i-th row */
1712       if (i == *nextrow[k]) {
1713         cnz    = *(nextci[k] + 1) - *nextci[k];
1714         cj     = buf_rj[k] + *nextci[k];
1715         ca     = abuf_r[k] + *nextci[k];
1716         nextcj = 0;
1717         for (j = 0; nextcj < cnz; j++) {
1718           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1719             ba_i[j] += ca[nextcj++];
1720           }
1721         }
1722         nextrow[k]++;
1723         nextci[k]++;
1724         PetscCall(PetscLogFlops(2.0 * cnz));
1725       }
1726     }
1727     PetscCall(MatSetValues(C, 1, &row, bnz, bj_i, ba_i, INSERT_VALUES));
1728   }
1729   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1730   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1731 
1732   PetscCall(PetscFree(ba));
1733   PetscCall(PetscFree(abuf_r[0]));
1734   PetscCall(PetscFree(abuf_r));
1735   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1736   PetscFunctionReturn(PETSC_SUCCESS);
1737 }
1738 
1739 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P, Mat A, PetscReal fill, Mat C)
1740 {
1741   Mat                  A_loc;
1742   MatProductCtx_APMPI *ap;
1743   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
1744   Mat_MPIAIJ          *p = (Mat_MPIAIJ *)P->data, *a = (Mat_MPIAIJ *)A->data;
1745   PetscInt            *pdti, *pdtj, *poti, *potj, *ptJ;
1746   PetscInt             nnz;
1747   PetscInt            *lnk, *owners_co, *coi, *coj, i, k, pnz, row;
1748   PetscInt             am = A->rmap->n, pn = P->cmap->n;
1749   MPI_Comm             comm;
1750   PetscMPIInt          size, rank, tagi, tagj, *len_si, *len_s, *len_ri, proc;
1751   PetscInt           **buf_rj, **buf_ri, **buf_ri_k;
1752   PetscInt             len, *dnz, *onz, *owners;
1753   PetscInt             nzi, *bi, *bj;
1754   PetscInt             nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1755   MPI_Request         *swaits, *rwaits;
1756   MPI_Status          *sstatus, rstatus;
1757   MatMergeSeqsToMPI   *merge;
1758   PetscInt            *ai, *aj, *Jptr, anz, *prmap = p->garray, pon, nspacedouble = 0, j;
1759   PetscReal            afill  = 1.0, afill_tmp;
1760   PetscInt             rstart = P->cmap->rstart, rmax, Armax;
1761   Mat_SeqAIJ          *a_loc;
1762   PetscHMapI           ta;
1763   MatType              mtype;
1764 
1765   PetscFunctionBegin;
1766   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1767   /* check if matrix local sizes are compatible */
1768   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, comm, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != P (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart,
1769              A->rmap->rend, P->rmap->rstart, P->rmap->rend);
1770 
1771   PetscCallMPI(MPI_Comm_size(comm, &size));
1772   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1773 
1774   /* create struct MatProductCtx_APMPI and attached it to C later */
1775   PetscCall(PetscNew(&ap));
1776 
1777   /* get A_loc by taking all local rows of A */
1778   PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &A_loc));
1779 
1780   ap->A_loc = A_loc;
1781   a_loc     = (Mat_SeqAIJ *)A_loc->data;
1782   ai        = a_loc->i;
1783   aj        = a_loc->j;
1784 
1785   /* determine symbolic Co=(p->B)^T*A - send to others */
1786   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
1787   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));
1788   pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1789                          >= (num of nonzero rows of C_seq) - pn */
1790   PetscCall(PetscMalloc1(pon + 1, &coi));
1791   coi[0] = 0;
1792 
1793   /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1794   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(poti[pon], ai[am]));
1795   PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1796   current_space = free_space;
1797 
1798   /* create and initialize a linked list */
1799   PetscCall(PetscHMapICreateWithSize(A->cmap->n + a->B->cmap->N, &ta));
1800   MatRowMergeMax_SeqAIJ(a_loc, am, ta);
1801   PetscCall(PetscHMapIGetSize(ta, &Armax));
1802 
1803   PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));
1804 
1805   for (i = 0; i < pon; i++) {
1806     pnz = poti[i + 1] - poti[i];
1807     ptJ = potj + poti[i];
1808     for (j = 0; j < pnz; j++) {
1809       row  = ptJ[j]; /* row of A_loc == col of Pot */
1810       anz  = ai[row + 1] - ai[row];
1811       Jptr = aj + ai[row];
1812       /* add non-zero cols of AP into the sorted linked list lnk */
1813       PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1814     }
1815     nnz = lnk[0];
1816 
1817     /* If free space is not available, double the total space in the list */
1818     if (current_space->local_remaining < nnz) {
1819       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space));
1820       nspacedouble++;
1821     }
1822 
1823     /* Copy data into free space, and zero out denserows */
1824     PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));
1825 
1826     current_space->array += nnz;
1827     current_space->local_used += nnz;
1828     current_space->local_remaining -= nnz;
1829 
1830     coi[i + 1] = coi[i] + nnz;
1831   }
1832 
1833   PetscCall(PetscMalloc1(coi[pon], &coj));
1834   PetscCall(PetscFreeSpaceContiguous(&free_space, coj));
1835   PetscCall(PetscLLCondensedDestroy_Scalable(lnk)); /* must destroy to get a new one for C */
1836 
1837   afill_tmp = (PetscReal)coi[pon] / (poti[pon] + ai[am] + 1);
1838   if (afill_tmp > afill) afill = afill_tmp;
1839 
1840   /* send j-array (coj) of Co to other processors */
1841   /* determine row ownership */
1842   PetscCall(PetscNew(&merge));
1843   PetscCall(PetscLayoutCreate(comm, &merge->rowmap));
1844 
1845   merge->rowmap->n  = pn;
1846   merge->rowmap->bs = 1;
1847 
1848   PetscCall(PetscLayoutSetUp(merge->rowmap));
1849   owners = merge->rowmap->range;
1850 
1851   /* determine the number of messages to send, their lengths */
1852   PetscCall(PetscCalloc1(size, &len_si));
1853   PetscCall(PetscCalloc1(size, &merge->len_s));
1854 
1855   len_s        = merge->len_s;
1856   merge->nsend = 0;
1857 
1858   PetscCall(PetscMalloc1(size + 1, &owners_co));
1859 
1860   proc = 0;
1861   for (i = 0; i < pon; i++) {
1862     while (prmap[i] >= owners[proc + 1]) proc++;
1863     len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1864     len_s[proc] += coi[i + 1] - coi[i];
1865   }
1866 
1867   len          = 0; /* max length of buf_si[] */
1868   owners_co[0] = 0;
1869   for (proc = 0; proc < size; proc++) {
1870     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1871     if (len_s[proc]) {
1872       merge->nsend++;
1873       len_si[proc] = 2 * (len_si[proc] + 1);
1874       len += len_si[proc];
1875     }
1876   }
1877 
1878   /* determine the number and length of messages to receive for coi and coj  */
1879   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv));
1880   PetscCall(PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri));
1881 
1882   /* post the Irecv and Isend of coj */
1883   PetscCall(PetscCommGetNewTag(comm, &tagj));
1884   PetscCall(PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rwaits));
1885   PetscCall(PetscMalloc1(merge->nsend, &swaits));
1886   for (proc = 0, k = 0; proc < size; proc++) {
1887     if (!len_s[proc]) continue;
1888     i = owners_co[proc];
1889     PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1890     k++;
1891   }
1892 
1893   /* receives and sends of coj are complete */
1894   PetscCall(PetscMalloc1(size, &sstatus));
1895   for (i = 0; i < merge->nrecv; i++) {
1896     PETSC_UNUSED PetscMPIInt icompleted;
1897     PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1898   }
1899   PetscCall(PetscFree(rwaits));
1900   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));
1901 
1902   /* add received column indices into table to update Armax */
1903   /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1904   for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1905     Jptr = buf_rj[k];
1906     for (j = 0; j < merge->len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1907   }
1908   PetscCall(PetscHMapIGetSize(ta, &Armax));
1909 
1910   /* send and recv coi */
1911   PetscCall(PetscCommGetNewTag(comm, &tagi));
1912   PetscCall(PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &rwaits));
1913   PetscCall(PetscMalloc1(len, &buf_s));
1914   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1915   for (proc = 0, k = 0; proc < size; proc++) {
1916     if (!len_s[proc]) continue;
1917     /* form outgoing message for i-structure:
1918          buf_si[0]:                 nrows to be sent
1919                [1:nrows]:           row index (global)
1920                [nrows+1:2*nrows+1]: i-structure index
1921     */
1922     nrows       = len_si[proc] / 2 - 1;
1923     buf_si_i    = buf_si + nrows + 1;
1924     buf_si[0]   = nrows;
1925     buf_si_i[0] = 0;
1926     nrows       = 0;
1927     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1928       nzi                 = coi[i + 1] - coi[i];
1929       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1930       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1931       nrows++;
1932     }
1933     PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1934     k++;
1935     buf_si += len_si[proc];
1936   }
1937   i = merge->nrecv;
1938   while (i--) {
1939     PETSC_UNUSED PetscMPIInt icompleted;
1940     PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1941   }
1942   PetscCall(PetscFree(rwaits));
1943   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));
1944   PetscCall(PetscFree(len_si));
1945   PetscCall(PetscFree(len_ri));
1946   PetscCall(PetscFree(swaits));
1947   PetscCall(PetscFree(sstatus));
1948   PetscCall(PetscFree(buf_s));
1949 
1950   /* compute the local portion of C (mpi mat) */
1951   /* allocate bi array and free space for accumulating nonzero column info */
1952   PetscCall(PetscMalloc1(pn + 1, &bi));
1953   bi[0] = 0;
1954 
1955   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1956   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(pdti[pn], PetscIntSumTruncate(poti[pon], ai[am])));
1957   PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1958   current_space = free_space;
1959 
1960   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1961   for (k = 0; k < merge->nrecv; k++) {
1962     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1963     nrows       = *buf_ri_k[k];
1964     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1965     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure  */
1966   }
1967 
1968   PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));
1969   MatPreallocateBegin(comm, pn, A->cmap->n, dnz, onz);
1970   rmax = 0;
1971   for (i = 0; i < pn; i++) {
1972     /* add pdt[i,:]*AP into lnk */
1973     pnz = pdti[i + 1] - pdti[i];
1974     ptJ = pdtj + pdti[i];
1975     for (j = 0; j < pnz; j++) {
1976       row  = ptJ[j]; /* row of AP == col of Pt */
1977       anz  = ai[row + 1] - ai[row];
1978       Jptr = aj + ai[row];
1979       /* add non-zero cols of AP into the sorted linked list lnk */
1980       PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1981     }
1982 
1983     /* add received col data into lnk */
1984     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1985       if (i == *nextrow[k]) {            /* i-th row */
1986         nzi  = *(nextci[k] + 1) - *nextci[k];
1987         Jptr = buf_rj[k] + *nextci[k];
1988         PetscCall(PetscLLCondensedAddSorted_Scalable(nzi, Jptr, lnk));
1989         nextrow[k]++;
1990         nextci[k]++;
1991       }
1992     }
1993 
1994     /* add missing diagonal entry */
1995     if (C->force_diagonals) {
1996       k = i + owners[rank]; /* column index */
1997       PetscCall(PetscLLCondensedAddSorted_Scalable(1, &k, lnk));
1998     }
1999 
2000     nnz = lnk[0];
2001 
2002     /* if free space is not available, make more free space */
2003     if (current_space->local_remaining < nnz) {
2004       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space));
2005       nspacedouble++;
2006     }
2007     /* copy data into free space, then initialize lnk */
2008     PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));
2009     PetscCall(MatPreallocateSet(i + owners[rank], nnz, current_space->array, dnz, onz));
2010 
2011     current_space->array += nnz;
2012     current_space->local_used += nnz;
2013     current_space->local_remaining -= nnz;
2014 
2015     bi[i + 1] = bi[i] + nnz;
2016     if (nnz > rmax) rmax = nnz;
2017   }
2018   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
2019 
2020   PetscCall(PetscMalloc1(bi[pn], &bj));
2021   PetscCall(PetscFreeSpaceContiguous(&free_space, bj));
2022   afill_tmp = (PetscReal)bi[pn] / (pdti[pn] + poti[pon] + ai[am] + 1);
2023   if (afill_tmp > afill) afill = afill_tmp;
2024   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
2025   PetscCall(PetscHMapIDestroy(&ta));
2026   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
2027   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));
2028 
2029   /* create symbolic parallel matrix C - why cannot be assembled in Numeric part   */
2030   PetscCall(MatSetSizes(C, pn, A->cmap->n, PETSC_DETERMINE, PETSC_DETERMINE));
2031   PetscCall(MatSetBlockSizes(C, P->cmap->bs, A->cmap->bs));
2032   PetscCall(MatGetType(A, &mtype));
2033   PetscCall(MatSetType(C, mtype));
2034   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
2035   MatPreallocateEnd(dnz, onz);
2036   PetscCall(MatSetBlockSize(C, 1));
2037   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
2038   for (i = 0; i < pn; i++) {
2039     row  = i + rstart;
2040     nnz  = bi[i + 1] - bi[i];
2041     Jptr = bj + bi[i];
2042     PetscCall(MatSetValues(C, 1, &row, nnz, Jptr, NULL, INSERT_VALUES));
2043   }
2044   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
2045   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
2046   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
2047   merge->bi        = bi;
2048   merge->bj        = bj;
2049   merge->coi       = coi;
2050   merge->coj       = coj;
2051   merge->buf_ri    = buf_ri;
2052   merge->buf_rj    = buf_rj;
2053   merge->owners_co = owners_co;
2054 
2055   /* attach the supporting struct to C for reuse */
2056   C->product->data    = ap;
2057   C->product->destroy = MatProductCtxDestroy_MPIAIJ_PtAP;
2058   ap->merge           = merge;
2059 
2060   C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2061 
2062 #if defined(PETSC_USE_INFO)
2063   if (bi[pn] != 0) {
2064     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
2065     PetscCall(PetscInfo(C, "Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n", (double)afill));
2066   } else {
2067     PetscCall(PetscInfo(C, "Empty matrix product\n"));
2068   }
2069 #endif
2070   PetscFunctionReturn(PETSC_SUCCESS);
2071 }
2072 
2073 static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2074 {
2075   Mat_Product *product = C->product;
2076   Mat          A = product->A, B = product->B;
2077   PetscReal    fill = product->fill;
2078   PetscBool    flg;
2079 
2080   PetscFunctionBegin;
2081   /* scalable */
2082   PetscCall(PetscStrcmp(product->alg, "scalable", &flg));
2083   if (flg) {
2084     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
2085     goto next;
2086   }
2087 
2088   /* nonscalable */
2089   PetscCall(PetscStrcmp(product->alg, "nonscalable", &flg));
2090   if (flg) {
2091     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
2092     goto next;
2093   }
2094 
2095   /* matmatmult */
2096   PetscCall(PetscStrcmp(product->alg, "at*b", &flg));
2097   if (flg) {
2098     Mat                  At;
2099     MatProductCtx_APMPI *ptap;
2100 
2101     PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &At));
2102     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(At, B, fill, C));
2103     ptap = (MatProductCtx_APMPI *)C->product->data;
2104     if (ptap) {
2105       ptap->Pt            = At;
2106       C->product->destroy = MatProductCtxDestroy_MPIAIJ_PtAP;
2107     }
2108     C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2109     goto next;
2110   }
2111 
2112   /* backend general code */
2113   PetscCall(PetscStrcmp(product->alg, "backend", &flg));
2114   if (flg) {
2115     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
2116     PetscFunctionReturn(PETSC_SUCCESS);
2117   }
2118 
2119   SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatProduct type is not supported");
2120 
2121 next:
2122   C->ops->productnumeric = MatProductNumeric_AtB;
2123   PetscFunctionReturn(PETSC_SUCCESS);
2124 }
2125 
2126 /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2127 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2128 {
2129   Mat_Product *product = C->product;
2130   Mat          A = product->A, B = product->B;
2131 #if defined(PETSC_HAVE_HYPRE)
2132   const char *algTypes[5] = {"scalable", "nonscalable", "seqmpi", "backend", "hypre"};
2133   PetscInt    nalg        = 5;
2134 #else
2135   const char *algTypes[4] = {
2136     "scalable",
2137     "nonscalable",
2138     "seqmpi",
2139     "backend",
2140   };
2141   PetscInt nalg = 4;
2142 #endif
2143   PetscInt  alg = 1; /* set nonscalable algorithm as default */
2144   PetscBool flg;
2145   MPI_Comm  comm;
2146 
2147   PetscFunctionBegin;
2148   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2149 
2150   /* Set "nonscalable" as default algorithm */
2151   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2152   if (flg) {
2153     PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2154 
2155     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2156     if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2157       MatInfo   Ainfo, Binfo;
2158       PetscInt  nz_local;
2159       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2160 
2161       PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2162       PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2163       nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2164 
2165       if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2166       PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));
2167 
2168       if (alg_scalable) {
2169         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2170         PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2171         PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2172       }
2173     }
2174   }
2175 
2176   /* Get runtime option */
2177   if (product->api_user) {
2178     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat");
2179     PetscCall(PetscOptionsEList("-matmatmult_via", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2180     PetscOptionsEnd();
2181   } else {
2182     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat");
2183     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2184     PetscOptionsEnd();
2185   }
2186   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2187 
2188   C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2189   PetscFunctionReturn(PETSC_SUCCESS);
2190 }
2191 
2192 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABt(Mat C)
2193 {
2194   PetscFunctionBegin;
2195   PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2196   C->ops->productsymbolic = MatProductSymbolic_ABt_MPIAIJ_MPIAIJ;
2197   PetscFunctionReturn(PETSC_SUCCESS);
2198 }
2199 
2200 /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2201 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2202 {
2203   Mat_Product *product = C->product;
2204   Mat          A = product->A, B = product->B;
2205   const char  *algTypes[4] = {"scalable", "nonscalable", "at*b", "backend"};
2206   PetscInt     nalg        = 4;
2207   PetscInt     alg         = 1; /* set default algorithm  */
2208   PetscBool    flg;
2209   MPI_Comm     comm;
2210 
2211   PetscFunctionBegin;
2212   /* Check matrix local sizes */
2213   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2214   PetscCheck(A->rmap->rstart == B->rmap->rstart && A->rmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != B (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2215              A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);
2216 
2217   /* Set default algorithm */
2218   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2219   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2220 
2221   /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2222   if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2223     MatInfo   Ainfo, Binfo;
2224     PetscInt  nz_local;
2225     PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2226 
2227     PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2228     PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2229     nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2230 
2231     if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2232     PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));
2233 
2234     if (alg_scalable) {
2235       alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2236       PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2237       PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2238     }
2239   }
2240 
2241   /* Get runtime option */
2242   if (product->api_user) {
2243     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatTransposeMatMult", "Mat");
2244     PetscCall(PetscOptionsEList("-mattransposematmult_via", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2245     PetscOptionsEnd();
2246   } else {
2247     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AtB", "Mat");
2248     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2249     PetscOptionsEnd();
2250   }
2251   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2252 
2253   C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2254   PetscFunctionReturn(PETSC_SUCCESS);
2255 }
2256 
2257 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2258 {
2259   Mat_Product *product = C->product;
2260   Mat          A = product->A, P = product->B;
2261   MPI_Comm     comm;
2262   PetscBool    flg;
2263   PetscInt     alg = 1; /* set default algorithm */
2264 #if !defined(PETSC_HAVE_HYPRE)
2265   const char *algTypes[5] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend"};
2266   PetscInt    nalg        = 5;
2267 #else
2268   const char *algTypes[6] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend", "hypre"};
2269   PetscInt    nalg        = 6;
2270 #endif
2271   PetscInt pN = P->cmap->N;
2272 
2273   PetscFunctionBegin;
2274   /* Check matrix local sizes */
2275   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2276   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Arow (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2277              A->rmap->rstart, A->rmap->rend, P->rmap->rstart, P->rmap->rend);
2278   PetscCheck(A->cmap->rstart == P->rmap->rstart && A->cmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Acol (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2279              A->cmap->rstart, A->cmap->rend, P->rmap->rstart, P->rmap->rend);
2280 
2281   /* Set "nonscalable" as default algorithm */
2282   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2283   if (flg) {
2284     PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2285 
2286     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2287     if (pN > 100000) {
2288       MatInfo   Ainfo, Pinfo;
2289       PetscInt  nz_local;
2290       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2291 
2292       PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2293       PetscCall(MatGetInfo(P, MAT_LOCAL, &Pinfo));
2294       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2295 
2296       if (pN > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2297       PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));
2298 
2299       if (alg_scalable) {
2300         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2301         PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2302       }
2303     }
2304   }
2305 
2306   /* Get runtime option */
2307   if (product->api_user) {
2308     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat");
2309     PetscCall(PetscOptionsEList("-matptap_via", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2310     PetscOptionsEnd();
2311   } else {
2312     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat");
2313     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2314     PetscOptionsEnd();
2315   }
2316   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2317 
2318   C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2319   PetscFunctionReturn(PETSC_SUCCESS);
2320 }
2321 
2322 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2323 {
2324   Mat_Product *product = C->product;
2325   Mat          A = product->A, R = product->B;
2326 
2327   PetscFunctionBegin;
2328   /* Check matrix local sizes */
2329   PetscCheck(A->cmap->n == R->cmap->n && A->rmap->n == R->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A local (%" PetscInt_FMT ", %" PetscInt_FMT "), R local (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->n,
2330              A->rmap->n, R->rmap->n, R->cmap->n);
2331 
2332   C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2333   PetscFunctionReturn(PETSC_SUCCESS);
2334 }
2335 
2336 /*
2337  Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2338 */
2339 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2340 {
2341   Mat_Product *product     = C->product;
2342   PetscBool    flg         = PETSC_FALSE;
2343   PetscInt     alg         = 1; /* default algorithm */
2344   const char  *algTypes[3] = {"scalable", "nonscalable", "seqmpi"};
2345   PetscInt     nalg        = 3;
2346 
2347   PetscFunctionBegin;
2348   /* Set default algorithm */
2349   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2350   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2351 
2352   /* Get runtime option */
2353   if (product->api_user) {
2354     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMatMult", "Mat");
2355     PetscCall(PetscOptionsEList("-matmatmatmult_via", "Algorithmic approach", "MatMatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2356     PetscOptionsEnd();
2357   } else {
2358     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_ABC", "Mat");
2359     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatProduct_ABC", algTypes, nalg, algTypes[alg], &alg, &flg));
2360     PetscOptionsEnd();
2361   }
2362   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2363 
2364   C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2365   C->ops->productsymbolic    = MatProductSymbolic_ABC;
2366   PetscFunctionReturn(PETSC_SUCCESS);
2367 }
2368 
2369 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2370 {
2371   Mat_Product *product = C->product;
2372 
2373   PetscFunctionBegin;
2374   switch (product->type) {
2375   case MATPRODUCT_AB:
2376     PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2377     break;
2378   case MATPRODUCT_ABt:
2379     PetscCall(MatProductSetFromOptions_MPIAIJ_ABt(C));
2380     break;
2381   case MATPRODUCT_AtB:
2382     PetscCall(MatProductSetFromOptions_MPIAIJ_AtB(C));
2383     break;
2384   case MATPRODUCT_PtAP:
2385     PetscCall(MatProductSetFromOptions_MPIAIJ_PtAP(C));
2386     break;
2387   case MATPRODUCT_RARt:
2388     PetscCall(MatProductSetFromOptions_MPIAIJ_RARt(C));
2389     break;
2390   case MATPRODUCT_ABC:
2391     PetscCall(MatProductSetFromOptions_MPIAIJ_ABC(C));
2392     break;
2393   default:
2394     break;
2395   }
2396   PetscFunctionReturn(PETSC_SUCCESS);
2397 }
2398