xref: /petsc/src/mat/impls/aij/mpi/mpimatmatmult.c (revision badd099fb2ece77d080fc02aefe95d4a02e75697)
1 /*
2   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
3           C = A * B
4 */
5 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
6 #include <../src/mat/utils/freespace.h>
7 #include <../src/mat/impls/aij/mpi/mpiaij.h>
8 #include <petscbt.h>
9 #include <../src/mat/impls/dense/mpi/mpidense.h>
10 #include <petsc/private/vecimpl.h>
11 #include <petsc/private/sfimpl.h>
12 
13 #if defined(PETSC_HAVE_HYPRE)
14 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat, Mat, PetscReal, Mat);
15 #endif
16 
17 PETSC_INTERN PetscErrorCode MatProductSymbolic_ABt_MPIAIJ_MPIAIJ(Mat C)
18 {
19   Mat_Product *product = C->product;
20   Mat          B       = product->B;
21 
22   PetscFunctionBegin;
23   PetscCall(MatTranspose(B, MAT_INITIAL_MATRIX, &product->B));
24   PetscCall(MatDestroy(&B));
25   PetscCall(MatProductSymbolic_AB_MPIAIJ_MPIAIJ(C));
26   PetscFunctionReturn(PETSC_SUCCESS);
27 }
28 
29 PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
30 {
31   Mat_Product        *product = C->product;
32   Mat                 A = product->A, B = product->B;
33   MatProductAlgorithm alg  = product->alg;
34   PetscReal           fill = product->fill;
35   PetscBool           flg;
36 
37   PetscFunctionBegin;
38   /* scalable */
39   PetscCall(PetscStrcmp(alg, "scalable", &flg));
40   if (flg) {
41     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
42     PetscFunctionReturn(PETSC_SUCCESS);
43   }
44 
45   /* nonscalable */
46   PetscCall(PetscStrcmp(alg, "nonscalable", &flg));
47   if (flg) {
48     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
49     PetscFunctionReturn(PETSC_SUCCESS);
50   }
51 
52   /* seqmpi */
53   PetscCall(PetscStrcmp(alg, "seqmpi", &flg));
54   if (flg) {
55     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A, B, fill, C));
56     PetscFunctionReturn(PETSC_SUCCESS);
57   }
58 
59   /* backend general code */
60   PetscCall(PetscStrcmp(alg, "backend", &flg));
61   if (flg) {
62     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
63     PetscFunctionReturn(PETSC_SUCCESS);
64   }
65 
66 #if defined(PETSC_HAVE_HYPRE)
67   PetscCall(PetscStrcmp(alg, "hypre", &flg));
68   if (flg) {
69     PetscCall(MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A, B, fill, C));
70     PetscFunctionReturn(PETSC_SUCCESS);
71   }
72 #endif
73   SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_SUP, "Mat Product Algorithm is not supported");
74 }
75 
76 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data)
77 {
78   Mat_APMPI *ptap = (Mat_APMPI *)data;
79 
80   PetscFunctionBegin;
81   PetscCall(PetscFree2(ptap->startsj_s, ptap->startsj_r));
82   PetscCall(PetscFree(ptap->bufa));
83   PetscCall(MatDestroy(&ptap->P_loc));
84   PetscCall(MatDestroy(&ptap->P_oth));
85   PetscCall(MatDestroy(&ptap->Pt));
86   PetscCall(PetscFree(ptap->api));
87   PetscCall(PetscFree(ptap->apj));
88   PetscCall(PetscFree(ptap->apa));
89   PetscCall(PetscFree(ptap));
90   PetscFunctionReturn(PETSC_SUCCESS);
91 }
92 
93 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, Mat C)
94 {
95   Mat_MPIAIJ        *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
96   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
97   Mat_SeqAIJ        *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
98   PetscScalar       *cda, *coa;
99   Mat_SeqAIJ        *p_loc, *p_oth;
100   PetscScalar       *apa, *ca;
101   PetscInt           cm = C->rmap->n;
102   Mat_APMPI         *ptap;
103   PetscInt          *api, *apj, *apJ, i, k;
104   PetscInt           cstart = C->cmap->rstart;
105   PetscInt           cdnz, conz, k0, k1;
106   const PetscScalar *dummy1, *dummy2, *dummy3, *dummy4;
107   MPI_Comm           comm;
108   PetscMPIInt        size;
109 
110   PetscFunctionBegin;
111   MatCheckProduct(C, 3);
112   ptap = (Mat_APMPI *)C->product->data;
113   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
114   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
115   PetscCallMPI(MPI_Comm_size(comm, &size));
116   PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");
117 
118   /* flag CPU mask for C */
119 #if defined(PETSC_HAVE_DEVICE)
120   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
121   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
122   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
123 #endif
124 
125   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
126   /* update numerical values of P_oth and P_loc */
127   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
128   PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));
129 
130   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
131   /* get data from symbolic products */
132   p_loc = (Mat_SeqAIJ *)ptap->P_loc->data;
133   p_oth = NULL;
134   if (size > 1) p_oth = (Mat_SeqAIJ *)ptap->P_oth->data;
135 
136   /* get apa for storing dense row A[i,:]*P */
137   apa = ptap->apa;
138 
139   api = ptap->api;
140   apj = ptap->apj;
141   /* trigger copy to CPU */
142   PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy1));
143   PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy2));
144   PetscCall(MatSeqAIJGetArrayRead(ptap->P_loc, &dummy3));
145   if (ptap->P_oth) PetscCall(MatSeqAIJGetArrayRead(ptap->P_oth, &dummy4));
146   PetscCall(MatSeqAIJGetArrayWrite(c->A, &cda));
147   PetscCall(MatSeqAIJGetArrayWrite(c->B, &coa));
148   for (i = 0; i < cm; i++) {
149     /* compute apa = A[i,:]*P */
150     AProw_nonscalable(i, ad, ao, p_loc, p_oth, apa);
151 
152     /* set values in C */
153     apJ  = PetscSafePointerPlusOffset(apj, api[i]);
154     cdnz = cd->i[i + 1] - cd->i[i];
155     conz = co->i[i + 1] - co->i[i];
156 
157     /* 1st off-diagonal part of C */
158     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
159     k  = 0;
160     for (k0 = 0; k0 < conz; k0++) {
161       if (apJ[k] >= cstart) break;
162       ca[k0]        = apa[apJ[k]];
163       apa[apJ[k++]] = 0.0;
164     }
165 
166     /* diagonal part of C */
167     ca = PetscSafePointerPlusOffset(cda, cd->i[i]);
168     for (k1 = 0; k1 < cdnz; k1++) {
169       ca[k1]        = apa[apJ[k]];
170       apa[apJ[k++]] = 0.0;
171     }
172 
173     /* 2nd off-diagonal part of C */
174     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
175     for (; k0 < conz; k0++) {
176       ca[k0]        = apa[apJ[k]];
177       apa[apJ[k++]] = 0.0;
178     }
179   }
180   PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy1));
181   PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy2));
182   PetscCall(MatSeqAIJRestoreArrayRead(ptap->P_loc, &dummy3));
183   if (ptap->P_oth) PetscCall(MatSeqAIJRestoreArrayRead(ptap->P_oth, &dummy4));
184   PetscCall(MatSeqAIJRestoreArrayWrite(c->A, &cda));
185   PetscCall(MatSeqAIJRestoreArrayWrite(c->B, &coa));
186 
187   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
188   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
189   PetscFunctionReturn(PETSC_SUCCESS);
190 }
191 
192 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, PetscReal fill, Mat C)
193 {
194   MPI_Comm           comm;
195   PetscMPIInt        size;
196   Mat_APMPI         *ptap;
197   PetscFreeSpaceList free_space = NULL, current_space = NULL;
198   Mat_MPIAIJ        *a  = (Mat_MPIAIJ *)A->data;
199   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
200   PetscInt          *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
201   PetscInt          *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
202   PetscInt          *lnk, i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi;
203   PetscInt           am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n;
204   PetscBT            lnkbt;
205   PetscReal          afill;
206   MatType            mtype;
207 
208   PetscFunctionBegin;
209   MatCheckProduct(C, 4);
210   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
211   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
212   PetscCallMPI(MPI_Comm_size(comm, &size));
213 
214   /* create struct Mat_APMPI and attached it to C later */
215   PetscCall(PetscNew(&ptap));
216 
217   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
218   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
219 
220   /* get P_loc by taking all local rows of P */
221   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
222 
223   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
224   pi_loc = p_loc->i;
225   pj_loc = p_loc->j;
226   if (size > 1) {
227     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
228     pi_oth = p_oth->i;
229     pj_oth = p_oth->j;
230   } else {
231     p_oth  = NULL;
232     pi_oth = NULL;
233     pj_oth = NULL;
234   }
235 
236   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
237   PetscCall(PetscMalloc1(am + 1, &api));
238   ptap->api = api;
239   api[0]    = 0;
240 
241   /* create and initialize a linked list */
242   PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));
243 
244   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
245   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
246   current_space = free_space;
247 
248   MatPreallocateBegin(comm, am, pn, dnz, onz);
249   for (i = 0; i < am; i++) {
250     /* diagonal portion of A */
251     nzi = adi[i + 1] - adi[i];
252     for (j = 0; j < nzi; j++) {
253       row  = *adj++;
254       pnz  = pi_loc[row + 1] - pi_loc[row];
255       Jptr = pj_loc + pi_loc[row];
256       /* add non-zero cols of P into the sorted linked list lnk */
257       PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
258     }
259     /* off-diagonal portion of A */
260     nzi = aoi[i + 1] - aoi[i];
261     for (j = 0; j < nzi; j++) {
262       row  = *aoj++;
263       pnz  = pi_oth[row + 1] - pi_oth[row];
264       Jptr = pj_oth + pi_oth[row];
265       PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
266     }
267     /* add possible missing diagonal entry */
268     if (C->force_diagonals) {
269       j = i + rstart; /* column index */
270       PetscCall(PetscLLCondensedAddSorted(1, &j, lnk, lnkbt));
271     }
272 
273     apnz       = lnk[0];
274     api[i + 1] = api[i] + apnz;
275 
276     /* if free space is not available, double the total space in the list */
277     if (current_space->local_remaining < apnz) {
278       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space));
279       nspacedouble++;
280     }
281 
282     /* Copy data into free space, then initialize lnk */
283     PetscCall(PetscLLCondensedClean(pN, apnz, current_space->array, lnk, lnkbt));
284     PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));
285 
286     current_space->array += apnz;
287     current_space->local_used += apnz;
288     current_space->local_remaining -= apnz;
289   }
290 
291   /* Allocate space for apj, initialize apj, and */
292   /* destroy list of free space and other temporary array(s) */
293   PetscCall(PetscMalloc1(api[am], &ptap->apj));
294   apj = ptap->apj;
295   PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
296   PetscCall(PetscLLDestroy(lnk, lnkbt));
297 
298   /* malloc apa to store dense row A[i,:]*P */
299   PetscCall(PetscCalloc1(pN, &ptap->apa));
300 
301   /* set and assemble symbolic parallel matrix C */
302   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
303   PetscCall(MatSetBlockSizesFromMats(C, A, P));
304 
305   PetscCall(MatGetType(A, &mtype));
306   PetscCall(MatSetType(C, mtype));
307   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
308   MatPreallocateEnd(dnz, onz);
309 
310   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
311   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
312   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
313   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
314 
315   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
316   C->ops->productnumeric = MatProductNumeric_AB;
317 
318   /* attach the supporting struct to C for reuse */
319   C->product->data    = ptap;
320   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
321 
322   /* set MatInfo */
323   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
324   if (afill < 1.0) afill = 1.0;
325   C->info.mallocs           = nspacedouble;
326   C->info.fill_ratio_given  = fill;
327   C->info.fill_ratio_needed = afill;
328 
329 #if defined(PETSC_USE_INFO)
330   if (api[am]) {
331     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
332     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
333   } else {
334     PetscCall(PetscInfo(C, "Empty matrix product\n"));
335   }
336 #endif
337   PetscFunctionReturn(PETSC_SUCCESS);
338 }
339 
340 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat, Mat, PetscReal, Mat);
341 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat, Mat, Mat);
342 
343 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
344 {
345   Mat_Product *product = C->product;
346   Mat          A = product->A, B = product->B;
347 
348   PetscFunctionBegin;
349   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
350     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
351 
352   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
353   C->ops->productsymbolic = MatProductSymbolic_AB;
354   PetscFunctionReturn(PETSC_SUCCESS);
355 }
356 
357 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
358 {
359   Mat_Product *product = C->product;
360   Mat          A = product->A, B = product->B;
361 
362   PetscFunctionBegin;
363   if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
364     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);
365 
366   C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
367   C->ops->productsymbolic          = MatProductSymbolic_AtB;
368   PetscFunctionReturn(PETSC_SUCCESS);
369 }
370 
371 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
372 {
373   Mat_Product *product = C->product;
374 
375   PetscFunctionBegin;
376   switch (product->type) {
377   case MATPRODUCT_AB:
378     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C));
379     break;
380   case MATPRODUCT_AtB:
381     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C));
382     break;
383   default:
384     break;
385   }
386   PetscFunctionReturn(PETSC_SUCCESS);
387 }
388 
389 typedef struct {
390   Mat           workB, workB1;
391   MPI_Request  *rwaits, *swaits;
392   PetscInt      nsends, nrecvs;
393   MPI_Datatype *stype, *rtype;
394   PetscInt      blda;
395 } MPIAIJ_MPIDense;
396 
397 static PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
398 {
399   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense *)ctx;
400   PetscInt         i;
401 
402   PetscFunctionBegin;
403   PetscCall(MatDestroy(&contents->workB));
404   PetscCall(MatDestroy(&contents->workB1));
405   for (i = 0; i < contents->nsends; i++) PetscCallMPI(MPI_Type_free(&contents->stype[i]));
406   for (i = 0; i < contents->nrecvs; i++) PetscCallMPI(MPI_Type_free(&contents->rtype[i]));
407   PetscCall(PetscFree4(contents->stype, contents->rtype, contents->rwaits, contents->swaits));
408   PetscCall(PetscFree(contents));
409   PetscFunctionReturn(PETSC_SUCCESS);
410 }
411 
412 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A, Mat B, PetscReal fill, Mat C)
413 {
414   Mat_MPIAIJ      *aij = (Mat_MPIAIJ *)A->data;
415   PetscInt         nz  = aij->B->cmap->n, blda, m, M, n, N;
416   MPIAIJ_MPIDense *contents;
417   VecScatter       ctx = aij->Mvctx;
418   PetscInt         Am = A->rmap->n, Bm = B->rmap->n, BN = B->cmap->N, Bbn, Bbn1, bs, numBb;
419   MPI_Comm         comm;
420   MPI_Datatype     type1, *stype, *rtype;
421   const PetscInt  *sindices, *sstarts, *rstarts;
422   PetscMPIInt     *disp, nsends, nrecvs, nrows_to, nrows_from;
423   PetscBool        cisdense;
424 
425   PetscFunctionBegin;
426   MatCheckProduct(C, 4);
427   PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data not empty");
428   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
429   PetscCall(PetscObjectBaseTypeCompare((PetscObject)C, MATMPIDENSE, &cisdense));
430   if (!cisdense) PetscCall(MatSetType(C, ((PetscObject)B)->type_name));
431   PetscCall(MatGetLocalSize(C, &m, &n));
432   PetscCall(MatGetSize(C, &M, &N));
433   if (m == PETSC_DECIDE || n == PETSC_DECIDE || M == PETSC_DECIDE || N == PETSC_DECIDE) PetscCall(MatSetSizes(C, Am, B->cmap->n, A->rmap->N, BN));
434   PetscCall(MatSetBlockSizesFromMats(C, A, B));
435   PetscCall(MatSetUp(C));
436   PetscCall(MatDenseGetLDA(B, &blda));
437   PetscCall(PetscNew(&contents));
438 
439   PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
440   PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));
441 
442   /* Create column block of B and C for memory scalability when BN is too large */
443   /* Estimate Bbn, column size of Bb */
444   if (nz) {
445     Bbn1 = 2 * Am * BN / nz;
446     if (!Bbn1) Bbn1 = 1;
447   } else Bbn1 = BN;
448 
449   bs   = B->cmap->bs;
450   Bbn1 = Bbn1 / bs * bs; /* Bbn1 is a multiple of bs */
451   if (Bbn1 > BN) Bbn1 = BN;
452   PetscCallMPI(MPIU_Allreduce(&Bbn1, &Bbn, 1, MPIU_INT, MPI_MAX, comm));
453 
454   /* Enable runtime option for Bbn */
455   PetscOptionsBegin(comm, ((PetscObject)C)->prefix, "MatMatMult", "Mat");
456   PetscCall(PetscOptionsInt("-matmatmult_Bbn", "Number of columns in Bb", "MatMatMult", Bbn, &Bbn, NULL));
457   PetscOptionsEnd();
458   Bbn = PetscMin(Bbn, BN);
459 
460   if (Bbn > 0 && Bbn < BN) {
461     numBb = BN / Bbn;
462     Bbn1  = BN - numBb * Bbn;
463   } else numBb = 0;
464 
465   if (numBb) {
466     PetscCall(PetscInfo(C, "use Bb, BN=%" PetscInt_FMT ", Bbn=%" PetscInt_FMT "; numBb=%" PetscInt_FMT "\n", BN, Bbn, numBb));
467     if (Bbn1) { /* Create workB1 for the remaining columns */
468       PetscCall(PetscInfo(C, "use Bb1, BN=%" PetscInt_FMT ", Bbn1=%" PetscInt_FMT "\n", BN, Bbn1));
469       /* Create work matrix used to store off processor rows of B needed for local product */
470       PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn1, NULL, &contents->workB1));
471     } else contents->workB1 = NULL;
472   }
473 
474   /* Create work matrix used to store off processor rows of B needed for local product */
475   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn, NULL, &contents->workB));
476 
477   /* Use MPI derived data type to reduce memory required by the send/recv buffers */
478   PetscCall(PetscMalloc4(nsends, &stype, nrecvs, &rtype, nrecvs, &contents->rwaits, nsends, &contents->swaits));
479   contents->stype  = stype;
480   contents->nsends = nsends;
481 
482   contents->rtype  = rtype;
483   contents->nrecvs = nrecvs;
484   contents->blda   = blda;
485 
486   PetscCall(PetscMalloc1(Bm + 1, &disp));
487   for (PetscMPIInt i = 0; i < nsends; i++) {
488     PetscCall(PetscMPIIntCast(sstarts[i + 1] - sstarts[i], &nrows_to));
489     for (PetscInt j = 0; j < nrows_to; j++) PetscCall(PetscMPIIntCast(sindices[sstarts[i] + j], &disp[j])); /* rowB to be sent */
490     PetscCallMPI(MPI_Type_create_indexed_block(nrows_to, 1, disp, MPIU_SCALAR, &type1));
491     PetscCallMPI(MPI_Type_create_resized(type1, 0, blda * sizeof(PetscScalar), &stype[i]));
492     PetscCallMPI(MPI_Type_commit(&stype[i]));
493     PetscCallMPI(MPI_Type_free(&type1));
494   }
495 
496   for (PetscMPIInt i = 0; i < nrecvs; i++) {
497     /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
498     PetscCall(PetscMPIIntCast(rstarts[i + 1] - rstarts[i], &nrows_from));
499     disp[0] = 0;
500     PetscCallMPI(MPI_Type_create_indexed_block(1, nrows_from, disp, MPIU_SCALAR, &type1));
501     PetscCallMPI(MPI_Type_create_resized(type1, 0, nz * sizeof(PetscScalar), &rtype[i]));
502     PetscCallMPI(MPI_Type_commit(&rtype[i]));
503     PetscCallMPI(MPI_Type_free(&type1));
504   }
505 
506   PetscCall(PetscFree(disp));
507   PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
508   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));
509   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
510   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
511   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
512 
513   C->product->data       = contents;
514   C->product->destroy    = MatMPIAIJ_MPIDenseDestroy;
515   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
516   PetscFunctionReturn(PETSC_SUCCESS);
517 }
518 
519 PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat, Mat, Mat, const PetscBool);
520 
521 /*
522     Performs an efficient scatter on the rows of B needed by this process; this is
523     a modification of the VecScatterBegin_() routines.
524 
525     Input: If Bbidx = 0, uses B = Bb, else B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
526 */
527 
528 static PetscErrorCode MatMPIDenseScatter(Mat A, Mat B, PetscInt Bbidx, Mat C, Mat *outworkB)
529 {
530   Mat_MPIAIJ        *aij = (Mat_MPIAIJ *)A->data;
531   const PetscScalar *b;
532   PetscScalar       *rvalues;
533   VecScatter         ctx = aij->Mvctx;
534   const PetscInt    *sindices, *sstarts, *rstarts;
535   const PetscMPIInt *sprocs, *rprocs;
536   PetscMPIInt        nsends, nrecvs;
537   MPI_Request       *swaits, *rwaits;
538   MPI_Comm           comm;
539   PetscMPIInt        tag = ((PetscObject)ctx)->tag, ncols, nrows, nsends_mpi, nrecvs_mpi;
540   MPIAIJ_MPIDense   *contents;
541   Mat                workB;
542   MPI_Datatype      *stype, *rtype;
543   PetscInt           blda;
544 
545   PetscFunctionBegin;
546   MatCheckProduct(C, 4);
547   PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
548   PetscCall(PetscMPIIntCast(B->cmap->N, &ncols));
549   PetscCall(PetscMPIIntCast(aij->B->cmap->n, &nrows));
550   contents = (MPIAIJ_MPIDense *)C->product->data;
551   PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL /*bs*/));
552   PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL /*bs*/));
553   PetscCall(PetscMPIIntCast(nsends, &nsends_mpi));
554   PetscCall(PetscMPIIntCast(nrecvs, &nrecvs_mpi));
555   if (Bbidx == 0) workB = *outworkB = contents->workB;
556   else workB = *outworkB = contents->workB1;
557   PetscCheck(nrows == workB->rmap->n, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Number of rows of workB %" PetscInt_FMT " not equal to columns of aij->B %d", workB->cmap->n, nrows);
558   swaits = contents->swaits;
559   rwaits = contents->rwaits;
560 
561   PetscCall(MatDenseGetArrayRead(B, &b));
562   PetscCall(MatDenseGetLDA(B, &blda));
563   PetscCheck(blda == contents->blda, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot reuse an input matrix with lda %" PetscInt_FMT " != %" PetscInt_FMT, blda, contents->blda);
564   PetscCall(MatDenseGetArray(workB, &rvalues));
565 
566   /* Post recv, use MPI derived data type to save memory */
567   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
568   rtype = contents->rtype;
569   for (PetscMPIInt i = 0; i < nrecvs; i++) PetscCallMPI(MPIU_Irecv(rvalues + (rstarts[i] - rstarts[0]), ncols, rtype[i], rprocs[i], tag, comm, rwaits + i));
570 
571   stype = contents->stype;
572   for (PetscMPIInt i = 0; i < nsends; i++) PetscCallMPI(MPIU_Isend(b, ncols, stype[i], sprocs[i], tag, comm, swaits + i));
573 
574   if (nrecvs) PetscCallMPI(MPI_Waitall(nrecvs_mpi, rwaits, MPI_STATUSES_IGNORE));
575   if (nsends) PetscCallMPI(MPI_Waitall(nsends_mpi, swaits, MPI_STATUSES_IGNORE));
576 
577   PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL));
578   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL));
579   PetscCall(MatDenseRestoreArrayRead(B, &b));
580   PetscCall(MatDenseRestoreArray(workB, &rvalues));
581   PetscFunctionReturn(PETSC_SUCCESS);
582 }
583 
584 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A, Mat B, Mat C)
585 {
586   Mat_MPIAIJ      *aij    = (Mat_MPIAIJ *)A->data;
587   Mat_MPIDense    *bdense = (Mat_MPIDense *)B->data;
588   Mat_MPIDense    *cdense = (Mat_MPIDense *)C->data;
589   Mat              workB;
590   MPIAIJ_MPIDense *contents;
591 
592   PetscFunctionBegin;
593   MatCheckProduct(C, 3);
594   PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
595   contents = (MPIAIJ_MPIDense *)C->product->data;
596   /* diagonal block of A times all local rows of B */
597   /* TODO: this calls a symbolic multiplication every time, which could be avoided */
598   PetscCall(MatMatMult(aij->A, bdense->A, MAT_REUSE_MATRIX, PETSC_CURRENT, &cdense->A));
599   if (contents->workB->cmap->n == B->cmap->N) {
600     /* get off processor parts of B needed to complete C=A*B */
601     PetscCall(MatMPIDenseScatter(A, B, 0, C, &workB));
602 
603     /* off-diagonal block of A times nonlocal rows of B */
604     PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
605   } else {
606     Mat       Bb, Cb;
607     PetscInt  BN = B->cmap->N, n = contents->workB->cmap->n;
608     PetscBool ccpu;
609 
610     PetscCheck(n > 0, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Column block size %" PetscInt_FMT " must be positive", n);
611     /* Prevent from unneeded copies back and forth from the GPU
612        when getting and restoring the submatrix
613        We need a proper GPU code for AIJ * dense in parallel */
614     PetscCall(MatBoundToCPU(C, &ccpu));
615     PetscCall(MatBindToCPU(C, PETSC_TRUE));
616     for (PetscInt i = 0; i < BN; i += n) {
617       PetscCall(MatDenseGetSubMatrix(B, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Bb));
618       PetscCall(MatDenseGetSubMatrix(C, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Cb));
619 
620       /* get off processor parts of B needed to complete C=A*B */
621       PetscCall(MatMPIDenseScatter(A, Bb, (i + n) > BN, C, &workB));
622 
623       /* off-diagonal block of A times nonlocal rows of B */
624       cdense = (Mat_MPIDense *)Cb->data;
625       PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
626       PetscCall(MatDenseRestoreSubMatrix(B, &Bb));
627       PetscCall(MatDenseRestoreSubMatrix(C, &Cb));
628     }
629     PetscCall(MatBindToCPU(C, ccpu));
630   }
631   PetscFunctionReturn(PETSC_SUCCESS);
632 }
633 
634 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A, Mat P, Mat C)
635 {
636   Mat_MPIAIJ        *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
637   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
638   Mat_SeqAIJ        *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
639   PetscInt          *adi = ad->i, *adj, *aoi = ao->i, *aoj;
640   PetscScalar       *ada, *aoa, *cda = cd->a, *coa = co->a;
641   Mat_SeqAIJ        *p_loc, *p_oth;
642   PetscInt          *pi_loc, *pj_loc, *pi_oth, *pj_oth, *pj;
643   PetscScalar       *pa_loc, *pa_oth, *pa, valtmp, *ca;
644   PetscInt           cm = C->rmap->n, anz, pnz;
645   Mat_APMPI         *ptap;
646   PetscScalar       *apa_sparse;
647   const PetscScalar *dummy;
648   PetscInt          *api, *apj, *apJ, i, j, k, row;
649   PetscInt           cstart = C->cmap->rstart;
650   PetscInt           cdnz, conz, k0, k1, nextp;
651   MPI_Comm           comm;
652   PetscMPIInt        size;
653 
654   PetscFunctionBegin;
655   MatCheckProduct(C, 3);
656   ptap = (Mat_APMPI *)C->product->data;
657   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
658   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
659   PetscCallMPI(MPI_Comm_size(comm, &size));
660   PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");
661 
662   /* flag CPU mask for C */
663 #if defined(PETSC_HAVE_DEVICE)
664   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
665   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
666   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
667 #endif
668   apa_sparse = ptap->apa;
669 
670   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
671   /* update numerical values of P_oth and P_loc */
672   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
673   PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));
674 
675   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
676   /* get data from symbolic products */
677   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
678   pi_loc = p_loc->i;
679   pj_loc = p_loc->j;
680   pa_loc = p_loc->a;
681   if (size > 1) {
682     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
683     pi_oth = p_oth->i;
684     pj_oth = p_oth->j;
685     pa_oth = p_oth->a;
686   } else {
687     p_oth  = NULL;
688     pi_oth = NULL;
689     pj_oth = NULL;
690     pa_oth = NULL;
691   }
692 
693   /* trigger copy to CPU */
694   PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy));
695   PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy));
696   PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy));
697   PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy));
698   api = ptap->api;
699   apj = ptap->apj;
700   for (i = 0; i < cm; i++) {
701     apJ = apj + api[i];
702 
703     /* diagonal portion of A */
704     anz = adi[i + 1] - adi[i];
705     adj = ad->j + adi[i];
706     ada = ad->a + adi[i];
707     for (j = 0; j < anz; j++) {
708       row = adj[j];
709       pnz = pi_loc[row + 1] - pi_loc[row];
710       pj  = pj_loc + pi_loc[row];
711       pa  = pa_loc + pi_loc[row];
712       /* perform sparse axpy */
713       valtmp = ada[j];
714       nextp  = 0;
715       for (k = 0; nextp < pnz; k++) {
716         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
717           apa_sparse[k] += valtmp * pa[nextp++];
718         }
719       }
720       PetscCall(PetscLogFlops(2.0 * pnz));
721     }
722 
723     /* off-diagonal portion of A */
724     anz = aoi[i + 1] - aoi[i];
725     aoj = PetscSafePointerPlusOffset(ao->j, aoi[i]);
726     aoa = PetscSafePointerPlusOffset(ao->a, aoi[i]);
727     for (j = 0; j < anz; j++) {
728       row = aoj[j];
729       pnz = pi_oth[row + 1] - pi_oth[row];
730       pj  = pj_oth + pi_oth[row];
731       pa  = pa_oth + pi_oth[row];
732       /* perform sparse axpy */
733       valtmp = aoa[j];
734       nextp  = 0;
735       for (k = 0; nextp < pnz; k++) {
736         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
737           apa_sparse[k] += valtmp * pa[nextp++];
738         }
739       }
740       PetscCall(PetscLogFlops(2.0 * pnz));
741     }
742 
743     /* set values in C */
744     cdnz = cd->i[i + 1] - cd->i[i];
745     conz = co->i[i + 1] - co->i[i];
746 
747     /* 1st off-diagonal part of C */
748     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
749     k  = 0;
750     for (k0 = 0; k0 < conz; k0++) {
751       if (apJ[k] >= cstart) break;
752       ca[k0]        = apa_sparse[k];
753       apa_sparse[k] = 0.0;
754       k++;
755     }
756 
757     /* diagonal part of C */
758     ca = cda + cd->i[i];
759     for (k1 = 0; k1 < cdnz; k1++) {
760       ca[k1]        = apa_sparse[k];
761       apa_sparse[k] = 0.0;
762       k++;
763     }
764 
765     /* 2nd off-diagonal part of C */
766     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
767     for (; k0 < conz; k0++) {
768       ca[k0]        = apa_sparse[k];
769       apa_sparse[k] = 0.0;
770       k++;
771     }
772   }
773   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
774   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
775   PetscFunctionReturn(PETSC_SUCCESS);
776 }
777 
778 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
779 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A, Mat P, PetscReal fill, Mat C)
780 {
781   MPI_Comm           comm;
782   PetscMPIInt        size;
783   Mat_APMPI         *ptap;
784   PetscFreeSpaceList free_space = NULL, current_space = NULL;
785   Mat_MPIAIJ        *a  = (Mat_MPIAIJ *)A->data;
786   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
787   PetscInt          *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
788   PetscInt          *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
789   PetscInt           i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi, *lnk, apnz_max = 1;
790   PetscInt           am = A->rmap->n, pn = P->cmap->n, pm = P->rmap->n, lsize = pn + 20;
791   PetscReal          afill;
792   MatType            mtype;
793 
794   PetscFunctionBegin;
795   MatCheckProduct(C, 4);
796   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
797   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
798   PetscCallMPI(MPI_Comm_size(comm, &size));
799 
800   /* create struct Mat_APMPI and attached it to C later */
801   PetscCall(PetscNew(&ptap));
802 
803   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
804   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
805 
806   /* get P_loc by taking all local rows of P */
807   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
808 
809   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
810   pi_loc = p_loc->i;
811   pj_loc = p_loc->j;
812   if (size > 1) {
813     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
814     pi_oth = p_oth->i;
815     pj_oth = p_oth->j;
816   } else {
817     p_oth  = NULL;
818     pi_oth = NULL;
819     pj_oth = NULL;
820   }
821 
822   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
823   PetscCall(PetscMalloc1(am + 1, &api));
824   ptap->api = api;
825   api[0]    = 0;
826 
827   PetscCall(PetscLLCondensedCreate_Scalable(lsize, &lnk));
828 
829   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
830   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
831   current_space = free_space;
832   MatPreallocateBegin(comm, am, pn, dnz, onz);
833   for (i = 0; i < am; i++) {
834     /* diagonal portion of A */
835     nzi = adi[i + 1] - adi[i];
836     for (j = 0; j < nzi; j++) {
837       row  = *adj++;
838       pnz  = pi_loc[row + 1] - pi_loc[row];
839       Jptr = pj_loc + pi_loc[row];
840       /* Expand list if it is not long enough */
841       if (pnz + apnz_max > lsize) {
842         lsize = pnz + apnz_max;
843         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
844       }
845       /* add non-zero cols of P into the sorted linked list lnk */
846       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
847       apnz       = *lnk; /* The first element in the list is the number of items in the list */
848       api[i + 1] = api[i] + apnz;
849       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
850     }
851     /* off-diagonal portion of A */
852     nzi = aoi[i + 1] - aoi[i];
853     for (j = 0; j < nzi; j++) {
854       row  = *aoj++;
855       pnz  = pi_oth[row + 1] - pi_oth[row];
856       Jptr = pj_oth + pi_oth[row];
857       /* Expand list if it is not long enough */
858       if (pnz + apnz_max > lsize) {
859         lsize = pnz + apnz_max;
860         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
861       }
862       /* add non-zero cols of P into the sorted linked list lnk */
863       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
864       apnz       = *lnk; /* The first element in the list is the number of items in the list */
865       api[i + 1] = api[i] + apnz;
866       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
867     }
868 
869     /* add missing diagonal entry */
870     if (C->force_diagonals) {
871       j = i + rstart; /* column index */
872       PetscCall(PetscLLCondensedAddSorted_Scalable(1, &j, lnk));
873     }
874 
875     apnz       = *lnk;
876     api[i + 1] = api[i] + apnz;
877     if (apnz > apnz_max) apnz_max = apnz;
878 
879     /* if free space is not available, double the total space in the list */
880     if (current_space->local_remaining < apnz) {
881       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space));
882       nspacedouble++;
883     }
884 
885     /* Copy data into free space, then initialize lnk */
886     PetscCall(PetscLLCondensedClean_Scalable(apnz, current_space->array, lnk));
887     PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));
888 
889     current_space->array += apnz;
890     current_space->local_used += apnz;
891     current_space->local_remaining -= apnz;
892   }
893 
894   /* Allocate space for apj, initialize apj, and */
895   /* destroy list of free space and other temporary array(s) */
896   PetscCall(PetscMalloc1(api[am], &ptap->apj));
897   apj = ptap->apj;
898   PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
899   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
900 
901   /* create and assemble symbolic parallel matrix C */
902   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
903   PetscCall(MatSetBlockSizesFromMats(C, A, P));
904   PetscCall(MatGetType(A, &mtype));
905   PetscCall(MatSetType(C, mtype));
906   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
907   MatPreallocateEnd(dnz, onz);
908 
909   /* malloc apa for assembly C */
910   PetscCall(PetscCalloc1(apnz_max, &ptap->apa));
911 
912   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
913   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
914   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
915   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
916 
917   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
918   C->ops->productnumeric = MatProductNumeric_AB;
919 
920   /* attach the supporting struct to C for reuse */
921   C->product->data    = ptap;
922   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
923 
924   /* set MatInfo */
925   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
926   if (afill < 1.0) afill = 1.0;
927   C->info.mallocs           = nspacedouble;
928   C->info.fill_ratio_given  = fill;
929   C->info.fill_ratio_needed = afill;
930 
931 #if defined(PETSC_USE_INFO)
932   if (api[am]) {
933     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
934     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
935   } else {
936     PetscCall(PetscInfo(C, "Empty matrix product\n"));
937   }
938 #endif
939   PetscFunctionReturn(PETSC_SUCCESS);
940 }
941 
942 /* This function is needed for the seqMPI matrix-matrix multiplication.  */
943 /* Three input arrays are merged to one output array. The size of the    */
944 /* output array is also output. Duplicate entries only show up once.     */
945 static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, PetscInt size2, PetscInt *in2, PetscInt size3, PetscInt *in3, PetscInt *size4, PetscInt *out)
946 {
947   int i = 0, j = 0, k = 0, l = 0;
948 
949   /* Traverse all three arrays */
950   while (i < size1 && j < size2 && k < size3) {
951     if (in1[i] < in2[j] && in1[i] < in3[k]) {
952       out[l++] = in1[i++];
953     } else if (in2[j] < in1[i] && in2[j] < in3[k]) {
954       out[l++] = in2[j++];
955     } else if (in3[k] < in1[i] && in3[k] < in2[j]) {
956       out[l++] = in3[k++];
957     } else if (in1[i] == in2[j] && in1[i] < in3[k]) {
958       out[l++] = in1[i];
959       i++, j++;
960     } else if (in1[i] == in3[k] && in1[i] < in2[j]) {
961       out[l++] = in1[i];
962       i++, k++;
963     } else if (in3[k] == in2[j] && in2[j] < in1[i]) {
964       out[l++] = in2[j];
965       k++, j++;
966     } else if (in1[i] == in2[j] && in1[i] == in3[k]) {
967       out[l++] = in1[i];
968       i++, j++, k++;
969     }
970   }
971 
972   /* Traverse two remaining arrays */
973   while (i < size1 && j < size2) {
974     if (in1[i] < in2[j]) {
975       out[l++] = in1[i++];
976     } else if (in1[i] > in2[j]) {
977       out[l++] = in2[j++];
978     } else {
979       out[l++] = in1[i];
980       i++, j++;
981     }
982   }
983 
984   while (i < size1 && k < size3) {
985     if (in1[i] < in3[k]) {
986       out[l++] = in1[i++];
987     } else if (in1[i] > in3[k]) {
988       out[l++] = in3[k++];
989     } else {
990       out[l++] = in1[i];
991       i++, k++;
992     }
993   }
994 
995   while (k < size3 && j < size2) {
996     if (in3[k] < in2[j]) {
997       out[l++] = in3[k++];
998     } else if (in3[k] > in2[j]) {
999       out[l++] = in2[j++];
1000     } else {
1001       out[l++] = in3[k];
1002       k++, j++;
1003     }
1004   }
1005 
1006   /* Traverse one remaining array */
1007   while (i < size1) out[l++] = in1[i++];
1008   while (j < size2) out[l++] = in2[j++];
1009   while (k < size3) out[l++] = in3[k++];
1010 
1011   *size4 = l;
1012 }
1013 
1014 /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and  */
1015 /* adds up the products. Two of these three multiplications are performed with existing (sequential)      */
1016 /* matrix-matrix multiplications.  */
1017 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
1018 {
1019   MPI_Comm           comm;
1020   PetscMPIInt        size;
1021   Mat_APMPI         *ptap;
1022   PetscFreeSpaceList free_space_diag = NULL, current_space = NULL;
1023   Mat_MPIAIJ        *a  = (Mat_MPIAIJ *)A->data;
1024   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc;
1025   Mat_MPIAIJ        *p = (Mat_MPIAIJ *)P->data;
1026   Mat_SeqAIJ        *adpd_seq, *p_off, *aopoth_seq;
1027   PetscInt           adponz, adpdnz;
1028   PetscInt          *pi_loc, *dnz, *onz;
1029   PetscInt          *adi = ad->i, *adj = ad->j, *aoi = ao->i, rstart = A->rmap->rstart;
1030   PetscInt          *lnk, i, i1 = 0, pnz, row, *adpoi, *adpoj, *api, *adpoJ, *aopJ, *apJ, *Jptr, aopnz, nspacedouble = 0, j, nzi, *apj, apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1031   PetscInt           am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n, p_colstart, p_colend;
1032   PetscBT            lnkbt;
1033   PetscReal          afill;
1034   PetscMPIInt        rank;
1035   Mat                adpd, aopoth;
1036   MatType            mtype;
1037   const char        *prefix;
1038 
1039   PetscFunctionBegin;
1040   MatCheckProduct(C, 4);
1041   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
1042   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1043   PetscCallMPI(MPI_Comm_size(comm, &size));
1044   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1045   PetscCall(MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend));
1046 
1047   /* create struct Mat_APMPI and attached it to C later */
1048   PetscCall(PetscNew(&ptap));
1049 
1050   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1051   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
1052 
1053   /* get P_loc by taking all local rows of P */
1054   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
1055 
1056   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
1057   pi_loc = p_loc->i;
1058 
1059   /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1060   PetscCall(PetscMalloc1(am + 1, &api));
1061   PetscCall(PetscMalloc1(am + 1, &adpoi));
1062 
1063   adpoi[0]  = 0;
1064   ptap->api = api;
1065   api[0]    = 0;
1066 
1067   /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1068   PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));
1069   MatPreallocateBegin(comm, am, pn, dnz, onz);
1070 
1071   /* Symbolic calc of A_loc_diag * P_loc_diag */
1072   PetscCall(MatGetOptionsPrefix(A, &prefix));
1073   PetscCall(MatProductCreate(a->A, p->A, NULL, &adpd));
1074   PetscCall(MatGetOptionsPrefix(A, &prefix));
1075   PetscCall(MatSetOptionsPrefix(adpd, prefix));
1076   PetscCall(MatAppendOptionsPrefix(adpd, "inner_diag_"));
1077 
1078   PetscCall(MatProductSetType(adpd, MATPRODUCT_AB));
1079   PetscCall(MatProductSetAlgorithm(adpd, "sorted"));
1080   PetscCall(MatProductSetFill(adpd, fill));
1081   PetscCall(MatProductSetFromOptions(adpd));
1082 
1083   adpd->force_diagonals = C->force_diagonals;
1084   PetscCall(MatProductSymbolic(adpd));
1085 
1086   adpd_seq = (Mat_SeqAIJ *)((adpd)->data);
1087   adpdi    = adpd_seq->i;
1088   adpdj    = adpd_seq->j;
1089   p_off    = (Mat_SeqAIJ *)p->B->data;
1090   poff_i   = p_off->i;
1091   poff_j   = p_off->j;
1092 
1093   /* j_temp stores indices of a result row before they are added to the linked list */
1094   PetscCall(PetscMalloc1(pN, &j_temp));
1095 
1096   /* Symbolic calc of the A_diag * p_loc_off */
1097   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1098   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space_diag));
1099   current_space = free_space_diag;
1100 
1101   for (i = 0; i < am; i++) {
1102     /* A_diag * P_loc_off */
1103     nzi = adi[i + 1] - adi[i];
1104     for (j = 0; j < nzi; j++) {
1105       row  = *adj++;
1106       pnz  = poff_i[row + 1] - poff_i[row];
1107       Jptr = poff_j + poff_i[row];
1108       for (i1 = 0; i1 < pnz; i1++) j_temp[i1] = p->garray[Jptr[i1]];
1109       /* add non-zero cols of P into the sorted linked list lnk */
1110       PetscCall(PetscLLCondensedAddSorted(pnz, j_temp, lnk, lnkbt));
1111     }
1112 
1113     adponz       = lnk[0];
1114     adpoi[i + 1] = adpoi[i] + adponz;
1115 
1116     /* if free space is not available, double the total space in the list */
1117     if (current_space->local_remaining < adponz) {
1118       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(adponz, current_space->total_array_size), &current_space));
1119       nspacedouble++;
1120     }
1121 
1122     /* Copy data into free space, then initialize lnk */
1123     PetscCall(PetscLLCondensedClean(pN, adponz, current_space->array, lnk, lnkbt));
1124 
1125     current_space->array += adponz;
1126     current_space->local_used += adponz;
1127     current_space->local_remaining -= adponz;
1128   }
1129 
1130   /* Symbolic calc of A_off * P_oth */
1131   PetscCall(MatSetOptionsPrefix(a->B, prefix));
1132   PetscCall(MatAppendOptionsPrefix(a->B, "inner_offdiag_"));
1133   PetscCall(MatCreate(PETSC_COMM_SELF, &aopoth));
1134   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth));
1135   aopoth_seq = (Mat_SeqAIJ *)((aopoth)->data);
1136   aopothi    = aopoth_seq->i;
1137   aopothj    = aopoth_seq->j;
1138 
1139   /* Allocate space for apj, adpj, aopj, ... */
1140   /* destroy lists of free space and other temporary array(s) */
1141 
1142   PetscCall(PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am], &ptap->apj));
1143   PetscCall(PetscMalloc1(adpoi[am], &adpoj));
1144 
1145   /* Copy from linked list to j-array */
1146   PetscCall(PetscFreeSpaceContiguous(&free_space_diag, adpoj));
1147   PetscCall(PetscLLDestroy(lnk, lnkbt));
1148 
1149   adpoJ = adpoj;
1150   adpdJ = adpdj;
1151   aopJ  = aopothj;
1152   apj   = ptap->apj;
1153   apJ   = apj; /* still empty */
1154 
1155   /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1156   /* A_diag * P_loc_diag to get A*P */
1157   for (i = 0; i < am; i++) {
1158     aopnz  = aopothi[i + 1] - aopothi[i];
1159     adponz = adpoi[i + 1] - adpoi[i];
1160     adpdnz = adpdi[i + 1] - adpdi[i];
1161 
1162     /* Correct indices from A_diag*P_diag */
1163     for (i1 = 0; i1 < adpdnz; i1++) adpdJ[i1] += p_colstart;
1164     /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1165     Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1166     PetscCall(MatPreallocateSet(i + rstart, apnz, apJ, dnz, onz));
1167 
1168     aopJ += aopnz;
1169     adpoJ += adponz;
1170     adpdJ += adpdnz;
1171     apJ += apnz;
1172     api[i + 1] = api[i] + apnz;
1173   }
1174 
1175   /* malloc apa to store dense row A[i,:]*P */
1176   PetscCall(PetscCalloc1(pN, &ptap->apa));
1177 
1178   /* create and assemble symbolic parallel matrix C */
1179   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
1180   PetscCall(MatSetBlockSizesFromMats(C, A, P));
1181   PetscCall(MatGetType(A, &mtype));
1182   PetscCall(MatSetType(C, mtype));
1183   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1184   MatPreallocateEnd(dnz, onz);
1185 
1186   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
1187   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1188   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1189   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1190 
1191   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1192   C->ops->productnumeric = MatProductNumeric_AB;
1193 
1194   /* attach the supporting struct to C for reuse */
1195   C->product->data    = ptap;
1196   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
1197 
1198   /* set MatInfo */
1199   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
1200   if (afill < 1.0) afill = 1.0;
1201   C->info.mallocs           = nspacedouble;
1202   C->info.fill_ratio_given  = fill;
1203   C->info.fill_ratio_needed = afill;
1204 
1205 #if defined(PETSC_USE_INFO)
1206   if (api[am]) {
1207     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
1208     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
1209   } else {
1210     PetscCall(PetscInfo(C, "Empty matrix product\n"));
1211   }
1212 #endif
1213 
1214   PetscCall(MatDestroy(&aopoth));
1215   PetscCall(MatDestroy(&adpd));
1216   PetscCall(PetscFree(j_temp));
1217   PetscCall(PetscFree(adpoj));
1218   PetscCall(PetscFree(adpoi));
1219   PetscFunctionReturn(PETSC_SUCCESS);
1220 }
1221 
1222 /* This routine only works when scall=MAT_REUSE_MATRIX! */
1223 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P, Mat A, Mat C)
1224 {
1225   Mat_APMPI *ptap;
1226   Mat        Pt;
1227 
1228   PetscFunctionBegin;
1229   MatCheckProduct(C, 3);
1230   ptap = (Mat_APMPI *)C->product->data;
1231   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1232   PetscCheck(ptap->Pt, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1233 
1234   Pt = ptap->Pt;
1235   PetscCall(MatTransposeSetPrecursor(P, Pt));
1236   PetscCall(MatTranspose(P, MAT_REUSE_MATRIX, &Pt));
1237   PetscCall(MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt, A, C));
1238   PetscFunctionReturn(PETSC_SUCCESS);
1239 }
1240 
1241 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1242 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, PetscReal fill, Mat C)
1243 {
1244   Mat_APMPI               *ptap;
1245   Mat_MPIAIJ              *p = (Mat_MPIAIJ *)P->data;
1246   MPI_Comm                 comm;
1247   PetscMPIInt              size, rank;
1248   PetscFreeSpaceList       free_space = NULL, current_space = NULL;
1249   PetscInt                 pn = P->cmap->n, aN = A->cmap->N, an = A->cmap->n;
1250   PetscInt                *lnk, i, k, rstart;
1251   PetscBT                  lnkbt;
1252   PetscMPIInt              tagi, tagj, *len_si, *len_s, *len_ri, nrecv, proc, nsend;
1253   PETSC_UNUSED PetscMPIInt icompleted = 0;
1254   PetscInt               **buf_rj, **buf_ri, **buf_ri_k, row, ncols, *cols;
1255   PetscInt                 len, *dnz, *onz, *owners, nzi;
1256   PetscInt                 nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1257   MPI_Request             *swaits, *rwaits;
1258   MPI_Status              *sstatus, rstatus;
1259   PetscLayout              rowmap;
1260   PetscInt                *owners_co, *coi, *coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1261   PetscMPIInt             *len_r, *id_r;          /* array of length of comm->size, store send/recv matrix values */
1262   PetscInt                *Jptr, *prmap = p->garray, con, j, Crmax;
1263   Mat_SeqAIJ              *a_loc, *c_loc, *c_oth;
1264   PetscHMapI               ta;
1265   MatType                  mtype;
1266   const char              *prefix;
1267 
1268   PetscFunctionBegin;
1269   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1270   PetscCallMPI(MPI_Comm_size(comm, &size));
1271   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1272 
1273   /* create symbolic parallel matrix C */
1274   PetscCall(MatGetType(A, &mtype));
1275   PetscCall(MatSetType(C, mtype));
1276 
1277   C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1278 
1279   /* create struct Mat_APMPI and attached it to C later */
1280   PetscCall(PetscNew(&ptap));
1281 
1282   /* (0) compute Rd = Pd^T, Ro = Po^T  */
1283   PetscCall(MatTranspose(p->A, MAT_INITIAL_MATRIX, &ptap->Rd));
1284   PetscCall(MatTranspose(p->B, MAT_INITIAL_MATRIX, &ptap->Ro));
1285 
1286   /* (1) compute symbolic A_loc */
1287   PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &ptap->A_loc));
1288 
1289   /* (2-1) compute symbolic C_oth = Ro*A_loc  */
1290   PetscCall(MatGetOptionsPrefix(A, &prefix));
1291   PetscCall(MatSetOptionsPrefix(ptap->Ro, prefix));
1292   PetscCall(MatAppendOptionsPrefix(ptap->Ro, "inner_offdiag_"));
1293   PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_oth));
1294   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro, ptap->A_loc, fill, ptap->C_oth));
1295 
1296   /* (3) send coj of C_oth to other processors  */
1297   /* determine row ownership */
1298   PetscCall(PetscLayoutCreate(comm, &rowmap));
1299   rowmap->n  = pn;
1300   rowmap->bs = 1;
1301   PetscCall(PetscLayoutSetUp(rowmap));
1302   owners = rowmap->range;
1303 
1304   /* determine the number of messages to send, their lengths */
1305   PetscCall(PetscMalloc4(size, &len_s, size, &len_si, size, &sstatus, size + 1, &owners_co));
1306   PetscCall(PetscArrayzero(len_s, size));
1307   PetscCall(PetscArrayzero(len_si, size));
1308 
1309   c_oth = (Mat_SeqAIJ *)ptap->C_oth->data;
1310   coi   = c_oth->i;
1311   coj   = c_oth->j;
1312   con   = ptap->C_oth->rmap->n;
1313   proc  = 0;
1314   for (i = 0; i < con; i++) {
1315     while (prmap[i] >= owners[proc + 1]) proc++;
1316     len_si[proc]++;                     /* num of rows in Co(=Pt*A) to be sent to [proc] */
1317     len_s[proc] += coi[i + 1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1318   }
1319 
1320   len          = 0; /* max length of buf_si[], see (4) */
1321   owners_co[0] = 0;
1322   nsend        = 0;
1323   for (proc = 0; proc < size; proc++) {
1324     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1325     if (len_s[proc]) {
1326       nsend++;
1327       len_si[proc] = 2 * (len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1328       len += len_si[proc];
1329     }
1330   }
1331 
1332   /* determine the number and length of messages to receive for coi and coj  */
1333   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &nrecv));
1334   PetscCall(PetscGatherMessageLengths2(comm, nsend, nrecv, len_s, len_si, &id_r, &len_r, &len_ri));
1335 
1336   /* post the Irecv and Isend of coj */
1337   PetscCall(PetscCommGetNewTag(comm, &tagj));
1338   PetscCall(PetscPostIrecvInt(comm, tagj, nrecv, id_r, len_r, &buf_rj, &rwaits));
1339   PetscCall(PetscMalloc1(nsend, &swaits));
1340   for (proc = 0, k = 0; proc < size; proc++) {
1341     if (!len_s[proc]) continue;
1342     i = owners_co[proc];
1343     PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1344     k++;
1345   }
1346 
1347   /* (2-2) compute symbolic C_loc = Rd*A_loc */
1348   PetscCall(MatSetOptionsPrefix(ptap->Rd, prefix));
1349   PetscCall(MatAppendOptionsPrefix(ptap->Rd, "inner_diag_"));
1350   PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_loc));
1351   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd, ptap->A_loc, fill, ptap->C_loc));
1352   c_loc = (Mat_SeqAIJ *)ptap->C_loc->data;
1353 
1354   /* receives coj are complete */
1355   for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1356   PetscCall(PetscFree(rwaits));
1357   if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));
1358 
1359   /* add received column indices into ta to update Crmax */
1360   a_loc = (Mat_SeqAIJ *)ptap->A_loc->data;
1361 
1362   /* create and initialize a linked list */
1363   PetscCall(PetscHMapICreateWithSize(an, &ta)); /* for compute Crmax */
1364   MatRowMergeMax_SeqAIJ(a_loc, ptap->A_loc->rmap->N, ta);
1365 
1366   for (k = 0; k < nrecv; k++) { /* k-th received message */
1367     Jptr = buf_rj[k];
1368     for (j = 0; j < len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1369   }
1370   PetscCall(PetscHMapIGetSize(ta, &Crmax));
1371   PetscCall(PetscHMapIDestroy(&ta));
1372 
1373   /* (4) send and recv coi */
1374   PetscCall(PetscCommGetNewTag(comm, &tagi));
1375   PetscCall(PetscPostIrecvInt(comm, tagi, nrecv, id_r, len_ri, &buf_ri, &rwaits));
1376   PetscCall(PetscMalloc1(len, &buf_s));
1377   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1378   for (proc = 0, k = 0; proc < size; proc++) {
1379     if (!len_s[proc]) continue;
1380     /* form outgoing message for i-structure:
1381          buf_si[0]:                 nrows to be sent
1382                [1:nrows]:           row index (global)
1383                [nrows+1:2*nrows+1]: i-structure index
1384     */
1385     nrows       = len_si[proc] / 2 - 1; /* num of rows in Co to be sent to [proc] */
1386     buf_si_i    = buf_si + nrows + 1;
1387     buf_si[0]   = nrows;
1388     buf_si_i[0] = 0;
1389     nrows       = 0;
1390     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1391       nzi                 = coi[i + 1] - coi[i];
1392       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1393       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1394       nrows++;
1395     }
1396     PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1397     k++;
1398     buf_si += len_si[proc];
1399   }
1400   for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1401   PetscCall(PetscFree(rwaits));
1402   if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));
1403 
1404   PetscCall(PetscFree4(len_s, len_si, sstatus, owners_co));
1405   PetscCall(PetscFree(len_ri));
1406   PetscCall(PetscFree(swaits));
1407   PetscCall(PetscFree(buf_s));
1408 
1409   /* (5) compute the local portion of C      */
1410   /* set initial free space to be Crmax, sufficient for holding nonzeros in each row of C */
1411   PetscCall(PetscFreeSpaceGet(Crmax, &free_space));
1412   current_space = free_space;
1413 
1414   PetscCall(PetscMalloc3(nrecv, &buf_ri_k, nrecv, &nextrow, nrecv, &nextci));
1415   for (k = 0; k < nrecv; k++) {
1416     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1417     nrows       = *buf_ri_k[k];
1418     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1419     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1420   }
1421 
1422   MatPreallocateBegin(comm, pn, an, dnz, onz);
1423   PetscCall(PetscLLCondensedCreate(Crmax, aN, &lnk, &lnkbt));
1424   for (i = 0; i < pn; i++) { /* for each local row of C */
1425     /* add C_loc into C */
1426     nzi  = c_loc->i[i + 1] - c_loc->i[i];
1427     Jptr = c_loc->j + c_loc->i[i];
1428     PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));
1429 
1430     /* add received col data into lnk */
1431     for (k = 0; k < nrecv; k++) { /* k-th received message */
1432       if (i == *nextrow[k]) {     /* i-th row */
1433         nzi  = *(nextci[k] + 1) - *nextci[k];
1434         Jptr = buf_rj[k] + *nextci[k];
1435         PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));
1436         nextrow[k]++;
1437         nextci[k]++;
1438       }
1439     }
1440 
1441     /* add missing diagonal entry */
1442     if (C->force_diagonals) {
1443       k = i + owners[rank]; /* column index */
1444       PetscCall(PetscLLCondensedAddSorted(1, &k, lnk, lnkbt));
1445     }
1446 
1447     nzi = lnk[0];
1448 
1449     /* copy data into free space, then initialize lnk */
1450     PetscCall(PetscLLCondensedClean(aN, nzi, current_space->array, lnk, lnkbt));
1451     PetscCall(MatPreallocateSet(i + owners[rank], nzi, current_space->array, dnz, onz));
1452   }
1453   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1454   PetscCall(PetscLLDestroy(lnk, lnkbt));
1455   PetscCall(PetscFreeSpaceDestroy(free_space));
1456 
1457   /* local sizes and preallocation */
1458   PetscCall(MatSetSizes(C, pn, an, PETSC_DETERMINE, PETSC_DETERMINE));
1459   PetscCall(PetscLayoutSetBlockSize(C->rmap, P->cmap->bs));
1460   PetscCall(PetscLayoutSetBlockSize(C->cmap, A->cmap->bs));
1461   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1462   MatPreallocateEnd(dnz, onz);
1463 
1464   /* add C_loc and C_oth to C */
1465   PetscCall(MatGetOwnershipRange(C, &rstart, NULL));
1466   for (i = 0; i < pn; i++) {
1467     ncols = c_loc->i[i + 1] - c_loc->i[i];
1468     cols  = c_loc->j + c_loc->i[i];
1469     row   = rstart + i;
1470     PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));
1471 
1472     if (C->force_diagonals) PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, 1, (const PetscInt *)&row, NULL, INSERT_VALUES));
1473   }
1474   for (i = 0; i < con; i++) {
1475     ncols = c_oth->i[i + 1] - c_oth->i[i];
1476     cols  = c_oth->j + c_oth->i[i];
1477     row   = prmap[i];
1478     PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));
1479   }
1480   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1481   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1482   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1483 
1484   /* members in merge */
1485   PetscCall(PetscFree(id_r));
1486   PetscCall(PetscFree(len_r));
1487   PetscCall(PetscFree(buf_ri[0]));
1488   PetscCall(PetscFree(buf_ri));
1489   PetscCall(PetscFree(buf_rj[0]));
1490   PetscCall(PetscFree(buf_rj));
1491   PetscCall(PetscLayoutDestroy(&rowmap));
1492 
1493   /* attach the supporting struct to C for reuse */
1494   C->product->data    = ptap;
1495   C->product->destroy = MatDestroy_MPIAIJ_PtAP;
1496   PetscFunctionReturn(PETSC_SUCCESS);
1497 }
1498 
1499 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, Mat C)
1500 {
1501   Mat_MPIAIJ        *p = (Mat_MPIAIJ *)P->data;
1502   Mat_SeqAIJ        *c_seq;
1503   Mat_APMPI         *ptap;
1504   Mat                A_loc, C_loc, C_oth;
1505   PetscInt           i, rstart, rend, cm, ncols, row;
1506   const PetscInt    *cols;
1507   const PetscScalar *vals;
1508 
1509   PetscFunctionBegin;
1510   MatCheckProduct(C, 3);
1511   ptap = (Mat_APMPI *)C->product->data;
1512   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1513   PetscCheck(ptap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1514   PetscCall(MatZeroEntries(C));
1515 
1516   /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1517   /* 1) get R = Pd^T, Ro = Po^T */
1518   PetscCall(MatTransposeSetPrecursor(p->A, ptap->Rd));
1519   PetscCall(MatTranspose(p->A, MAT_REUSE_MATRIX, &ptap->Rd));
1520   PetscCall(MatTransposeSetPrecursor(p->B, ptap->Ro));
1521   PetscCall(MatTranspose(p->B, MAT_REUSE_MATRIX, &ptap->Ro));
1522 
1523   /* 2) compute numeric A_loc */
1524   PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &ptap->A_loc));
1525 
1526   /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1527   A_loc = ptap->A_loc;
1528   PetscCall(ptap->C_loc->ops->matmultnumeric(ptap->Rd, A_loc, ptap->C_loc));
1529   PetscCall(ptap->C_oth->ops->matmultnumeric(ptap->Ro, A_loc, ptap->C_oth));
1530   C_loc = ptap->C_loc;
1531   C_oth = ptap->C_oth;
1532 
1533   /* add C_loc and C_oth to C */
1534   PetscCall(MatGetOwnershipRange(C, &rstart, &rend));
1535 
1536   /* C_loc -> C */
1537   cm    = C_loc->rmap->N;
1538   c_seq = (Mat_SeqAIJ *)C_loc->data;
1539   cols  = c_seq->j;
1540   vals  = c_seq->a;
1541   for (i = 0; i < cm; i++) {
1542     ncols = c_seq->i[i + 1] - c_seq->i[i];
1543     row   = rstart + i;
1544     PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1545     cols += ncols;
1546     vals += ncols;
1547   }
1548 
1549   /* Co -> C, off-processor part */
1550   cm    = C_oth->rmap->N;
1551   c_seq = (Mat_SeqAIJ *)C_oth->data;
1552   cols  = c_seq->j;
1553   vals  = c_seq->a;
1554   for (i = 0; i < cm; i++) {
1555     ncols = c_seq->i[i + 1] - c_seq->i[i];
1556     row   = p->garray[i];
1557     PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1558     cols += ncols;
1559     vals += ncols;
1560   }
1561   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1562   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1563   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1564   PetscFunctionReturn(PETSC_SUCCESS);
1565 }
1566 
1567 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P, Mat A, Mat C)
1568 {
1569   Mat_Merge_SeqsToMPI *merge;
1570   Mat_MPIAIJ          *p  = (Mat_MPIAIJ *)P->data;
1571   Mat_SeqAIJ          *pd = (Mat_SeqAIJ *)p->A->data, *po = (Mat_SeqAIJ *)p->B->data;
1572   Mat_APMPI           *ap;
1573   PetscInt            *adj;
1574   PetscInt             i, j, k, anz, pnz, row, *cj, nexta;
1575   MatScalar           *ada, *ca, valtmp;
1576   PetscInt             am = A->rmap->n, cm = C->rmap->n, pon = (p->B)->cmap->n;
1577   MPI_Comm             comm;
1578   PetscMPIInt          size, rank, taga, *len_s, proc;
1579   PetscInt            *owners, nrows, **buf_ri_k, **nextrow, **nextci;
1580   PetscInt           **buf_ri, **buf_rj;
1581   PetscInt             cnz = 0, *bj_i, *bi, *bj, bnz, nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1582   MPI_Request         *s_waits, *r_waits;
1583   MPI_Status          *status;
1584   MatScalar          **abuf_r, *ba_i, *pA, *coa, *ba;
1585   const PetscScalar   *dummy;
1586   PetscInt            *ai, *aj, *coi, *coj, *poJ, *pdJ;
1587   Mat                  A_loc;
1588   Mat_SeqAIJ          *a_loc;
1589 
1590   PetscFunctionBegin;
1591   MatCheckProduct(C, 3);
1592   ap = (Mat_APMPI *)C->product->data;
1593   PetscCheck(ap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be computed. Missing data");
1594   PetscCheck(ap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1595   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
1596   PetscCallMPI(MPI_Comm_size(comm, &size));
1597   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1598 
1599   merge = ap->merge;
1600 
1601   /* 2) compute numeric C_seq = P_loc^T*A_loc */
1602   /* get data from symbolic products */
1603   coi = merge->coi;
1604   coj = merge->coj;
1605   PetscCall(PetscCalloc1(coi[pon], &coa));
1606   bi     = merge->bi;
1607   bj     = merge->bj;
1608   owners = merge->rowmap->range;
1609   PetscCall(PetscCalloc1(bi[cm], &ba));
1610 
1611   /* get A_loc by taking all local rows of A */
1612   A_loc = ap->A_loc;
1613   PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &A_loc));
1614   a_loc = (Mat_SeqAIJ *)A_loc->data;
1615   ai    = a_loc->i;
1616   aj    = a_loc->j;
1617 
1618   /* trigger copy to CPU */
1619   PetscCall(MatSeqAIJGetArrayRead(p->A, &dummy));
1620   PetscCall(MatSeqAIJRestoreArrayRead(p->A, &dummy));
1621   PetscCall(MatSeqAIJGetArrayRead(p->B, &dummy));
1622   PetscCall(MatSeqAIJRestoreArrayRead(p->B, &dummy));
1623   for (i = 0; i < am; i++) {
1624     anz = ai[i + 1] - ai[i];
1625     adj = aj + ai[i];
1626     ada = a_loc->a + ai[i];
1627 
1628     /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1629     /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1630     pnz = po->i[i + 1] - po->i[i];
1631     poJ = po->j + po->i[i];
1632     pA  = po->a + po->i[i];
1633     for (j = 0; j < pnz; j++) {
1634       row = poJ[j];
1635       cj  = coj + coi[row];
1636       ca  = coa + coi[row];
1637       /* perform sparse axpy */
1638       nexta  = 0;
1639       valtmp = pA[j];
1640       for (k = 0; nexta < anz; k++) {
1641         if (cj[k] == adj[nexta]) {
1642           ca[k] += valtmp * ada[nexta];
1643           nexta++;
1644         }
1645       }
1646       PetscCall(PetscLogFlops(2.0 * anz));
1647     }
1648 
1649     /* put the value into Cd (diagonal part) */
1650     pnz = pd->i[i + 1] - pd->i[i];
1651     pdJ = pd->j + pd->i[i];
1652     pA  = pd->a + pd->i[i];
1653     for (j = 0; j < pnz; j++) {
1654       row = pdJ[j];
1655       cj  = bj + bi[row];
1656       ca  = ba + bi[row];
1657       /* perform sparse axpy */
1658       nexta  = 0;
1659       valtmp = pA[j];
1660       for (k = 0; nexta < anz; k++) {
1661         if (cj[k] == adj[nexta]) {
1662           ca[k] += valtmp * ada[nexta];
1663           nexta++;
1664         }
1665       }
1666       PetscCall(PetscLogFlops(2.0 * anz));
1667     }
1668   }
1669 
1670   /* 3) send and recv matrix values coa */
1671   buf_ri = merge->buf_ri;
1672   buf_rj = merge->buf_rj;
1673   len_s  = merge->len_s;
1674   PetscCall(PetscCommGetNewTag(comm, &taga));
1675   PetscCall(PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits));
1676 
1677   PetscCall(PetscMalloc2(merge->nsend, &s_waits, size, &status));
1678   for (proc = 0, k = 0; proc < size; proc++) {
1679     if (!len_s[proc]) continue;
1680     i = merge->owners_co[proc];
1681     PetscCallMPI(MPIU_Isend(coa + coi[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k));
1682     k++;
1683   }
1684   if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, r_waits, status));
1685   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, s_waits, status));
1686 
1687   PetscCall(PetscFree2(s_waits, status));
1688   PetscCall(PetscFree(r_waits));
1689   PetscCall(PetscFree(coa));
1690 
1691   /* 4) insert local Cseq and received values into Cmpi */
1692   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1693   for (k = 0; k < merge->nrecv; k++) {
1694     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1695     nrows       = *buf_ri_k[k];
1696     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1697     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1698   }
1699 
1700   for (i = 0; i < cm; i++) {
1701     row  = owners[rank] + i; /* global row index of C_seq */
1702     bj_i = bj + bi[i];       /* col indices of the i-th row of C */
1703     ba_i = ba + bi[i];
1704     bnz  = bi[i + 1] - bi[i];
1705     /* add received vals into ba */
1706     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1707       /* i-th row */
1708       if (i == *nextrow[k]) {
1709         cnz    = *(nextci[k] + 1) - *nextci[k];
1710         cj     = buf_rj[k] + *nextci[k];
1711         ca     = abuf_r[k] + *nextci[k];
1712         nextcj = 0;
1713         for (j = 0; nextcj < cnz; j++) {
1714           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1715             ba_i[j] += ca[nextcj++];
1716           }
1717         }
1718         nextrow[k]++;
1719         nextci[k]++;
1720         PetscCall(PetscLogFlops(2.0 * cnz));
1721       }
1722     }
1723     PetscCall(MatSetValues(C, 1, &row, bnz, bj_i, ba_i, INSERT_VALUES));
1724   }
1725   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1726   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1727 
1728   PetscCall(PetscFree(ba));
1729   PetscCall(PetscFree(abuf_r[0]));
1730   PetscCall(PetscFree(abuf_r));
1731   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1732   PetscFunctionReturn(PETSC_SUCCESS);
1733 }
1734 
1735 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P, Mat A, PetscReal fill, Mat C)
1736 {
1737   Mat                  A_loc;
1738   Mat_APMPI           *ap;
1739   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
1740   Mat_MPIAIJ          *p = (Mat_MPIAIJ *)P->data, *a = (Mat_MPIAIJ *)A->data;
1741   PetscInt            *pdti, *pdtj, *poti, *potj, *ptJ;
1742   PetscInt             nnz;
1743   PetscInt            *lnk, *owners_co, *coi, *coj, i, k, pnz, row;
1744   PetscInt             am = A->rmap->n, pn = P->cmap->n;
1745   MPI_Comm             comm;
1746   PetscMPIInt          size, rank, tagi, tagj, *len_si, *len_s, *len_ri, proc;
1747   PetscInt           **buf_rj, **buf_ri, **buf_ri_k;
1748   PetscInt             len, *dnz, *onz, *owners;
1749   PetscInt             nzi, *bi, *bj;
1750   PetscInt             nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1751   MPI_Request         *swaits, *rwaits;
1752   MPI_Status          *sstatus, rstatus;
1753   Mat_Merge_SeqsToMPI *merge;
1754   PetscInt            *ai, *aj, *Jptr, anz, *prmap = p->garray, pon, nspacedouble = 0, j;
1755   PetscReal            afill  = 1.0, afill_tmp;
1756   PetscInt             rstart = P->cmap->rstart, rmax, Armax;
1757   Mat_SeqAIJ          *a_loc;
1758   PetscHMapI           ta;
1759   MatType              mtype;
1760 
1761   PetscFunctionBegin;
1762   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1763   /* check if matrix local sizes are compatible */
1764   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, comm, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != P (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart,
1765              A->rmap->rend, P->rmap->rstart, P->rmap->rend);
1766 
1767   PetscCallMPI(MPI_Comm_size(comm, &size));
1768   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1769 
1770   /* create struct Mat_APMPI and attached it to C later */
1771   PetscCall(PetscNew(&ap));
1772 
1773   /* get A_loc by taking all local rows of A */
1774   PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &A_loc));
1775 
1776   ap->A_loc = A_loc;
1777   a_loc     = (Mat_SeqAIJ *)A_loc->data;
1778   ai        = a_loc->i;
1779   aj        = a_loc->j;
1780 
1781   /* determine symbolic Co=(p->B)^T*A - send to others */
1782   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
1783   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));
1784   pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1785                          >= (num of nonzero rows of C_seq) - pn */
1786   PetscCall(PetscMalloc1(pon + 1, &coi));
1787   coi[0] = 0;
1788 
1789   /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1790   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(poti[pon], ai[am]));
1791   PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1792   current_space = free_space;
1793 
1794   /* create and initialize a linked list */
1795   PetscCall(PetscHMapICreateWithSize(A->cmap->n + a->B->cmap->N, &ta));
1796   MatRowMergeMax_SeqAIJ(a_loc, am, ta);
1797   PetscCall(PetscHMapIGetSize(ta, &Armax));
1798 
1799   PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));
1800 
1801   for (i = 0; i < pon; i++) {
1802     pnz = poti[i + 1] - poti[i];
1803     ptJ = potj + poti[i];
1804     for (j = 0; j < pnz; j++) {
1805       row  = ptJ[j]; /* row of A_loc == col of Pot */
1806       anz  = ai[row + 1] - ai[row];
1807       Jptr = aj + ai[row];
1808       /* add non-zero cols of AP into the sorted linked list lnk */
1809       PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1810     }
1811     nnz = lnk[0];
1812 
1813     /* If free space is not available, double the total space in the list */
1814     if (current_space->local_remaining < nnz) {
1815       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space));
1816       nspacedouble++;
1817     }
1818 
1819     /* Copy data into free space, and zero out denserows */
1820     PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));
1821 
1822     current_space->array += nnz;
1823     current_space->local_used += nnz;
1824     current_space->local_remaining -= nnz;
1825 
1826     coi[i + 1] = coi[i] + nnz;
1827   }
1828 
1829   PetscCall(PetscMalloc1(coi[pon], &coj));
1830   PetscCall(PetscFreeSpaceContiguous(&free_space, coj));
1831   PetscCall(PetscLLCondensedDestroy_Scalable(lnk)); /* must destroy to get a new one for C */
1832 
1833   afill_tmp = (PetscReal)coi[pon] / (poti[pon] + ai[am] + 1);
1834   if (afill_tmp > afill) afill = afill_tmp;
1835 
1836   /* send j-array (coj) of Co to other processors */
1837   /* determine row ownership */
1838   PetscCall(PetscNew(&merge));
1839   PetscCall(PetscLayoutCreate(comm, &merge->rowmap));
1840 
1841   merge->rowmap->n  = pn;
1842   merge->rowmap->bs = 1;
1843 
1844   PetscCall(PetscLayoutSetUp(merge->rowmap));
1845   owners = merge->rowmap->range;
1846 
1847   /* determine the number of messages to send, their lengths */
1848   PetscCall(PetscCalloc1(size, &len_si));
1849   PetscCall(PetscCalloc1(size, &merge->len_s));
1850 
1851   len_s        = merge->len_s;
1852   merge->nsend = 0;
1853 
1854   PetscCall(PetscMalloc1(size + 1, &owners_co));
1855 
1856   proc = 0;
1857   for (i = 0; i < pon; i++) {
1858     while (prmap[i] >= owners[proc + 1]) proc++;
1859     len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1860     len_s[proc] += coi[i + 1] - coi[i];
1861   }
1862 
1863   len          = 0; /* max length of buf_si[] */
1864   owners_co[0] = 0;
1865   for (proc = 0; proc < size; proc++) {
1866     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1867     if (len_s[proc]) {
1868       merge->nsend++;
1869       len_si[proc] = 2 * (len_si[proc] + 1);
1870       len += len_si[proc];
1871     }
1872   }
1873 
1874   /* determine the number and length of messages to receive for coi and coj  */
1875   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv));
1876   PetscCall(PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri));
1877 
1878   /* post the Irecv and Isend of coj */
1879   PetscCall(PetscCommGetNewTag(comm, &tagj));
1880   PetscCall(PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rwaits));
1881   PetscCall(PetscMalloc1(merge->nsend, &swaits));
1882   for (proc = 0, k = 0; proc < size; proc++) {
1883     if (!len_s[proc]) continue;
1884     i = owners_co[proc];
1885     PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1886     k++;
1887   }
1888 
1889   /* receives and sends of coj are complete */
1890   PetscCall(PetscMalloc1(size, &sstatus));
1891   for (i = 0; i < merge->nrecv; i++) {
1892     PETSC_UNUSED PetscMPIInt icompleted;
1893     PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1894   }
1895   PetscCall(PetscFree(rwaits));
1896   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));
1897 
1898   /* add received column indices into table to update Armax */
1899   /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1900   for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1901     Jptr = buf_rj[k];
1902     for (j = 0; j < merge->len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1903   }
1904   PetscCall(PetscHMapIGetSize(ta, &Armax));
1905 
1906   /* send and recv coi */
1907   PetscCall(PetscCommGetNewTag(comm, &tagi));
1908   PetscCall(PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &rwaits));
1909   PetscCall(PetscMalloc1(len, &buf_s));
1910   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1911   for (proc = 0, k = 0; proc < size; proc++) {
1912     if (!len_s[proc]) continue;
1913     /* form outgoing message for i-structure:
1914          buf_si[0]:                 nrows to be sent
1915                [1:nrows]:           row index (global)
1916                [nrows+1:2*nrows+1]: i-structure index
1917     */
1918     nrows       = len_si[proc] / 2 - 1;
1919     buf_si_i    = buf_si + nrows + 1;
1920     buf_si[0]   = nrows;
1921     buf_si_i[0] = 0;
1922     nrows       = 0;
1923     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1924       nzi                 = coi[i + 1] - coi[i];
1925       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1926       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1927       nrows++;
1928     }
1929     PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1930     k++;
1931     buf_si += len_si[proc];
1932   }
1933   i = merge->nrecv;
1934   while (i--) {
1935     PETSC_UNUSED PetscMPIInt icompleted;
1936     PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1937   }
1938   PetscCall(PetscFree(rwaits));
1939   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));
1940   PetscCall(PetscFree(len_si));
1941   PetscCall(PetscFree(len_ri));
1942   PetscCall(PetscFree(swaits));
1943   PetscCall(PetscFree(sstatus));
1944   PetscCall(PetscFree(buf_s));
1945 
1946   /* compute the local portion of C (mpi mat) */
1947   /* allocate bi array and free space for accumulating nonzero column info */
1948   PetscCall(PetscMalloc1(pn + 1, &bi));
1949   bi[0] = 0;
1950 
1951   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1952   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(pdti[pn], PetscIntSumTruncate(poti[pon], ai[am])));
1953   PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1954   current_space = free_space;
1955 
1956   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1957   for (k = 0; k < merge->nrecv; k++) {
1958     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1959     nrows       = *buf_ri_k[k];
1960     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1961     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure  */
1962   }
1963 
1964   PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));
1965   MatPreallocateBegin(comm, pn, A->cmap->n, dnz, onz);
1966   rmax = 0;
1967   for (i = 0; i < pn; i++) {
1968     /* add pdt[i,:]*AP into lnk */
1969     pnz = pdti[i + 1] - pdti[i];
1970     ptJ = pdtj + pdti[i];
1971     for (j = 0; j < pnz; j++) {
1972       row  = ptJ[j]; /* row of AP == col of Pt */
1973       anz  = ai[row + 1] - ai[row];
1974       Jptr = aj + ai[row];
1975       /* add non-zero cols of AP into the sorted linked list lnk */
1976       PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1977     }
1978 
1979     /* add received col data into lnk */
1980     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1981       if (i == *nextrow[k]) {            /* i-th row */
1982         nzi  = *(nextci[k] + 1) - *nextci[k];
1983         Jptr = buf_rj[k] + *nextci[k];
1984         PetscCall(PetscLLCondensedAddSorted_Scalable(nzi, Jptr, lnk));
1985         nextrow[k]++;
1986         nextci[k]++;
1987       }
1988     }
1989 
1990     /* add missing diagonal entry */
1991     if (C->force_diagonals) {
1992       k = i + owners[rank]; /* column index */
1993       PetscCall(PetscLLCondensedAddSorted_Scalable(1, &k, lnk));
1994     }
1995 
1996     nnz = lnk[0];
1997 
1998     /* if free space is not available, make more free space */
1999     if (current_space->local_remaining < nnz) {
2000       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space));
2001       nspacedouble++;
2002     }
2003     /* copy data into free space, then initialize lnk */
2004     PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));
2005     PetscCall(MatPreallocateSet(i + owners[rank], nnz, current_space->array, dnz, onz));
2006 
2007     current_space->array += nnz;
2008     current_space->local_used += nnz;
2009     current_space->local_remaining -= nnz;
2010 
2011     bi[i + 1] = bi[i] + nnz;
2012     if (nnz > rmax) rmax = nnz;
2013   }
2014   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
2015 
2016   PetscCall(PetscMalloc1(bi[pn], &bj));
2017   PetscCall(PetscFreeSpaceContiguous(&free_space, bj));
2018   afill_tmp = (PetscReal)bi[pn] / (pdti[pn] + poti[pon] + ai[am] + 1);
2019   if (afill_tmp > afill) afill = afill_tmp;
2020   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
2021   PetscCall(PetscHMapIDestroy(&ta));
2022   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
2023   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));
2024 
2025   /* create symbolic parallel matrix C - why cannot be assembled in Numeric part   */
2026   PetscCall(MatSetSizes(C, pn, A->cmap->n, PETSC_DETERMINE, PETSC_DETERMINE));
2027   PetscCall(MatSetBlockSizes(C, P->cmap->bs, A->cmap->bs));
2028   PetscCall(MatGetType(A, &mtype));
2029   PetscCall(MatSetType(C, mtype));
2030   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
2031   MatPreallocateEnd(dnz, onz);
2032   PetscCall(MatSetBlockSize(C, 1));
2033   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
2034   for (i = 0; i < pn; i++) {
2035     row  = i + rstart;
2036     nnz  = bi[i + 1] - bi[i];
2037     Jptr = bj + bi[i];
2038     PetscCall(MatSetValues(C, 1, &row, nnz, Jptr, NULL, INSERT_VALUES));
2039   }
2040   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
2041   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
2042   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
2043   merge->bi        = bi;
2044   merge->bj        = bj;
2045   merge->coi       = coi;
2046   merge->coj       = coj;
2047   merge->buf_ri    = buf_ri;
2048   merge->buf_rj    = buf_rj;
2049   merge->owners_co = owners_co;
2050 
2051   /* attach the supporting struct to C for reuse */
2052   C->product->data    = ap;
2053   C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2054   ap->merge           = merge;
2055 
2056   C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2057 
2058 #if defined(PETSC_USE_INFO)
2059   if (bi[pn] != 0) {
2060     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
2061     PetscCall(PetscInfo(C, "Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n", (double)afill));
2062   } else {
2063     PetscCall(PetscInfo(C, "Empty matrix product\n"));
2064   }
2065 #endif
2066   PetscFunctionReturn(PETSC_SUCCESS);
2067 }
2068 
2069 static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2070 {
2071   Mat_Product *product = C->product;
2072   Mat          A = product->A, B = product->B;
2073   PetscReal    fill = product->fill;
2074   PetscBool    flg;
2075 
2076   PetscFunctionBegin;
2077   /* scalable */
2078   PetscCall(PetscStrcmp(product->alg, "scalable", &flg));
2079   if (flg) {
2080     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
2081     goto next;
2082   }
2083 
2084   /* nonscalable */
2085   PetscCall(PetscStrcmp(product->alg, "nonscalable", &flg));
2086   if (flg) {
2087     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
2088     goto next;
2089   }
2090 
2091   /* matmatmult */
2092   PetscCall(PetscStrcmp(product->alg, "at*b", &flg));
2093   if (flg) {
2094     Mat        At;
2095     Mat_APMPI *ptap;
2096 
2097     PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &At));
2098     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(At, B, fill, C));
2099     ptap = (Mat_APMPI *)C->product->data;
2100     if (ptap) {
2101       ptap->Pt            = At;
2102       C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2103     }
2104     C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2105     goto next;
2106   }
2107 
2108   /* backend general code */
2109   PetscCall(PetscStrcmp(product->alg, "backend", &flg));
2110   if (flg) {
2111     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
2112     PetscFunctionReturn(PETSC_SUCCESS);
2113   }
2114 
2115   SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatProduct type is not supported");
2116 
2117 next:
2118   C->ops->productnumeric = MatProductNumeric_AtB;
2119   PetscFunctionReturn(PETSC_SUCCESS);
2120 }
2121 
2122 /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2123 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2124 {
2125   Mat_Product *product = C->product;
2126   Mat          A = product->A, B = product->B;
2127 #if defined(PETSC_HAVE_HYPRE)
2128   const char *algTypes[5] = {"scalable", "nonscalable", "seqmpi", "backend", "hypre"};
2129   PetscInt    nalg        = 5;
2130 #else
2131   const char *algTypes[4] = {
2132     "scalable",
2133     "nonscalable",
2134     "seqmpi",
2135     "backend",
2136   };
2137   PetscInt nalg = 4;
2138 #endif
2139   PetscInt  alg = 1; /* set nonscalable algorithm as default */
2140   PetscBool flg;
2141   MPI_Comm  comm;
2142 
2143   PetscFunctionBegin;
2144   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2145 
2146   /* Set "nonscalable" as default algorithm */
2147   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2148   if (flg) {
2149     PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2150 
2151     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2152     if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2153       MatInfo   Ainfo, Binfo;
2154       PetscInt  nz_local;
2155       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2156 
2157       PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2158       PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2159       nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2160 
2161       if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2162       PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));
2163 
2164       if (alg_scalable) {
2165         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2166         PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2167         PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2168       }
2169     }
2170   }
2171 
2172   /* Get runtime option */
2173   if (product->api_user) {
2174     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat");
2175     PetscCall(PetscOptionsEList("-matmatmult_via", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2176     PetscOptionsEnd();
2177   } else {
2178     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat");
2179     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2180     PetscOptionsEnd();
2181   }
2182   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2183 
2184   C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2185   PetscFunctionReturn(PETSC_SUCCESS);
2186 }
2187 
2188 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABt(Mat C)
2189 {
2190   PetscFunctionBegin;
2191   PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2192   C->ops->productsymbolic = MatProductSymbolic_ABt_MPIAIJ_MPIAIJ;
2193   PetscFunctionReturn(PETSC_SUCCESS);
2194 }
2195 
2196 /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2197 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2198 {
2199   Mat_Product *product = C->product;
2200   Mat          A = product->A, B = product->B;
2201   const char  *algTypes[4] = {"scalable", "nonscalable", "at*b", "backend"};
2202   PetscInt     nalg        = 4;
2203   PetscInt     alg         = 1; /* set default algorithm  */
2204   PetscBool    flg;
2205   MPI_Comm     comm;
2206 
2207   PetscFunctionBegin;
2208   /* Check matrix local sizes */
2209   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2210   PetscCheck(A->rmap->rstart == B->rmap->rstart && A->rmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != B (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2211              A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);
2212 
2213   /* Set default algorithm */
2214   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2215   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2216 
2217   /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2218   if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2219     MatInfo   Ainfo, Binfo;
2220     PetscInt  nz_local;
2221     PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2222 
2223     PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2224     PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2225     nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2226 
2227     if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2228     PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));
2229 
2230     if (alg_scalable) {
2231       alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2232       PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2233       PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2234     }
2235   }
2236 
2237   /* Get runtime option */
2238   if (product->api_user) {
2239     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatTransposeMatMult", "Mat");
2240     PetscCall(PetscOptionsEList("-mattransposematmult_via", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2241     PetscOptionsEnd();
2242   } else {
2243     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AtB", "Mat");
2244     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2245     PetscOptionsEnd();
2246   }
2247   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2248 
2249   C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2250   PetscFunctionReturn(PETSC_SUCCESS);
2251 }
2252 
2253 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2254 {
2255   Mat_Product *product = C->product;
2256   Mat          A = product->A, P = product->B;
2257   MPI_Comm     comm;
2258   PetscBool    flg;
2259   PetscInt     alg = 1; /* set default algorithm */
2260 #if !defined(PETSC_HAVE_HYPRE)
2261   const char *algTypes[5] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend"};
2262   PetscInt    nalg        = 5;
2263 #else
2264   const char *algTypes[6] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend", "hypre"};
2265   PetscInt    nalg        = 6;
2266 #endif
2267   PetscInt pN = P->cmap->N;
2268 
2269   PetscFunctionBegin;
2270   /* Check matrix local sizes */
2271   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2272   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Arow (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2273              A->rmap->rstart, A->rmap->rend, P->rmap->rstart, P->rmap->rend);
2274   PetscCheck(A->cmap->rstart == P->rmap->rstart && A->cmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Acol (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2275              A->cmap->rstart, A->cmap->rend, P->rmap->rstart, P->rmap->rend);
2276 
2277   /* Set "nonscalable" as default algorithm */
2278   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2279   if (flg) {
2280     PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2281 
2282     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2283     if (pN > 100000) {
2284       MatInfo   Ainfo, Pinfo;
2285       PetscInt  nz_local;
2286       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2287 
2288       PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2289       PetscCall(MatGetInfo(P, MAT_LOCAL, &Pinfo));
2290       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2291 
2292       if (pN > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2293       PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPI_C_BOOL, MPI_LOR, comm));
2294 
2295       if (alg_scalable) {
2296         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2297         PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2298       }
2299     }
2300   }
2301 
2302   /* Get runtime option */
2303   if (product->api_user) {
2304     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat");
2305     PetscCall(PetscOptionsEList("-matptap_via", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2306     PetscOptionsEnd();
2307   } else {
2308     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat");
2309     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2310     PetscOptionsEnd();
2311   }
2312   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2313 
2314   C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2315   PetscFunctionReturn(PETSC_SUCCESS);
2316 }
2317 
2318 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2319 {
2320   Mat_Product *product = C->product;
2321   Mat          A = product->A, R = product->B;
2322 
2323   PetscFunctionBegin;
2324   /* Check matrix local sizes */
2325   PetscCheck(A->cmap->n == R->cmap->n && A->rmap->n == R->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A local (%" PetscInt_FMT ", %" PetscInt_FMT "), R local (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->n,
2326              A->rmap->n, R->rmap->n, R->cmap->n);
2327 
2328   C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2329   PetscFunctionReturn(PETSC_SUCCESS);
2330 }
2331 
2332 /*
2333  Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2334 */
2335 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2336 {
2337   Mat_Product *product     = C->product;
2338   PetscBool    flg         = PETSC_FALSE;
2339   PetscInt     alg         = 1; /* default algorithm */
2340   const char  *algTypes[3] = {"scalable", "nonscalable", "seqmpi"};
2341   PetscInt     nalg        = 3;
2342 
2343   PetscFunctionBegin;
2344   /* Set default algorithm */
2345   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2346   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2347 
2348   /* Get runtime option */
2349   if (product->api_user) {
2350     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMatMult", "Mat");
2351     PetscCall(PetscOptionsEList("-matmatmatmult_via", "Algorithmic approach", "MatMatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2352     PetscOptionsEnd();
2353   } else {
2354     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_ABC", "Mat");
2355     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatProduct_ABC", algTypes, nalg, algTypes[alg], &alg, &flg));
2356     PetscOptionsEnd();
2357   }
2358   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2359 
2360   C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2361   C->ops->productsymbolic    = MatProductSymbolic_ABC;
2362   PetscFunctionReturn(PETSC_SUCCESS);
2363 }
2364 
2365 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2366 {
2367   Mat_Product *product = C->product;
2368 
2369   PetscFunctionBegin;
2370   switch (product->type) {
2371   case MATPRODUCT_AB:
2372     PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2373     break;
2374   case MATPRODUCT_ABt:
2375     PetscCall(MatProductSetFromOptions_MPIAIJ_ABt(C));
2376     break;
2377   case MATPRODUCT_AtB:
2378     PetscCall(MatProductSetFromOptions_MPIAIJ_AtB(C));
2379     break;
2380   case MATPRODUCT_PtAP:
2381     PetscCall(MatProductSetFromOptions_MPIAIJ_PtAP(C));
2382     break;
2383   case MATPRODUCT_RARt:
2384     PetscCall(MatProductSetFromOptions_MPIAIJ_RARt(C));
2385     break;
2386   case MATPRODUCT_ABC:
2387     PetscCall(MatProductSetFromOptions_MPIAIJ_ABC(C));
2388     break;
2389   default:
2390     break;
2391   }
2392   PetscFunctionReturn(PETSC_SUCCESS);
2393 }
2394