xref: /petsc/src/mat/impls/aij/mpi/mpimatmatmult.c (revision bfe80ac4a46d58cb7760074b25f5e81b2f541d8a) !
1 /*
2   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
3           C = A * B
4 */
5 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
6 #include <../src/mat/utils/freespace.h>
7 #include <../src/mat/impls/aij/mpi/mpiaij.h>
8 #include <petscbt.h>
9 #include <../src/mat/impls/dense/mpi/mpidense.h>
10 #include <petsc/private/vecimpl.h>
11 #include <petsc/private/sfimpl.h>
12 
13 #if defined(PETSC_HAVE_HYPRE)
14 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat, Mat, PetscReal, Mat);
15 #endif
16 
17 PETSC_INTERN PetscErrorCode MatProductSymbolic_ABt_MPIAIJ_MPIAIJ(Mat C)
18 {
19   Mat_Product *product = C->product;
20   Mat          B       = product->B;
21 
22   PetscFunctionBegin;
23   PetscCall(MatTranspose(B, MAT_INITIAL_MATRIX, &product->B));
24   PetscCall(MatDestroy(&B));
25   PetscCall(MatProductSymbolic_AB_MPIAIJ_MPIAIJ(C));
26   PetscFunctionReturn(PETSC_SUCCESS);
27 }
28 
29 PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
30 {
31   Mat_Product        *product = C->product;
32   Mat                 A = product->A, B = product->B;
33   MatProductAlgorithm alg  = product->alg;
34   PetscReal           fill = product->fill;
35   PetscBool           flg;
36 
37   PetscFunctionBegin;
38   /* scalable */
39   PetscCall(PetscStrcmp(alg, "scalable", &flg));
40   if (flg) {
41     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
42     PetscFunctionReturn(PETSC_SUCCESS);
43   }
44 
45   /* nonscalable */
46   PetscCall(PetscStrcmp(alg, "nonscalable", &flg));
47   if (flg) {
48     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
49     PetscFunctionReturn(PETSC_SUCCESS);
50   }
51 
52   /* seqmpi */
53   PetscCall(PetscStrcmp(alg, "seqmpi", &flg));
54   if (flg) {
55     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A, B, fill, C));
56     PetscFunctionReturn(PETSC_SUCCESS);
57   }
58 
59   /* backend general code */
60   PetscCall(PetscStrcmp(alg, "backend", &flg));
61   if (flg) {
62     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
63     PetscFunctionReturn(PETSC_SUCCESS);
64   }
65 
66 #if defined(PETSC_HAVE_HYPRE)
67   PetscCall(PetscStrcmp(alg, "hypre", &flg));
68   if (flg) {
69     PetscCall(MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A, B, fill, C));
70     PetscFunctionReturn(PETSC_SUCCESS);
71   }
72 #endif
73   SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_SUP, "Mat Product Algorithm is not supported");
74 }
75 
76 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data)
77 {
78   Mat_APMPI *ptap = (Mat_APMPI *)data;
79 
80   PetscFunctionBegin;
81   PetscCall(PetscFree2(ptap->startsj_s, ptap->startsj_r));
82   PetscCall(PetscFree(ptap->bufa));
83   PetscCall(MatDestroy(&ptap->P_loc));
84   PetscCall(MatDestroy(&ptap->P_oth));
85   PetscCall(MatDestroy(&ptap->Pt));
86   PetscCall(PetscFree(ptap->api));
87   PetscCall(PetscFree(ptap->apj));
88   PetscCall(PetscFree(ptap->apa));
89   PetscCall(PetscFree(ptap));
90   PetscFunctionReturn(PETSC_SUCCESS);
91 }
92 
93 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, Mat C)
94 {
95   Mat_MPIAIJ        *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
96   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
97   Mat_SeqAIJ        *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
98   PetscScalar       *cda = cd->a, *coa = co->a;
99   Mat_SeqAIJ        *p_loc, *p_oth;
100   PetscScalar       *apa, *ca;
101   PetscInt           cm = C->rmap->n;
102   Mat_APMPI         *ptap;
103   PetscInt          *api, *apj, *apJ, i, k;
104   PetscInt           cstart = C->cmap->rstart;
105   PetscInt           cdnz, conz, k0, k1;
106   const PetscScalar *dummy;
107   MPI_Comm           comm;
108   PetscMPIInt        size;
109 
110   PetscFunctionBegin;
111   MatCheckProduct(C, 3);
112   ptap = (Mat_APMPI *)C->product->data;
113   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
114   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
115   PetscCallMPI(MPI_Comm_size(comm, &size));
116   PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");
117 
118   /* flag CPU mask for C */
119 #if defined(PETSC_HAVE_DEVICE)
120   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
121   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
122   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
123 #endif
124 
125   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
126   /* update numerical values of P_oth and P_loc */
127   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
128   PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));
129 
130   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
131   /* get data from symbolic products */
132   p_loc = (Mat_SeqAIJ *)ptap->P_loc->data;
133   p_oth = NULL;
134   if (size > 1) p_oth = (Mat_SeqAIJ *)ptap->P_oth->data;
135 
136   /* get apa for storing dense row A[i,:]*P */
137   apa = ptap->apa;
138 
139   api = ptap->api;
140   apj = ptap->apj;
141   /* trigger copy to CPU */
142   PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy));
143   PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy));
144   PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy));
145   PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy));
146   for (i = 0; i < cm; i++) {
147     /* compute apa = A[i,:]*P */
148     AProw_nonscalable(i, ad, ao, p_loc, p_oth, apa);
149 
150     /* set values in C */
151     apJ  = apj + api[i];
152     cdnz = cd->i[i + 1] - cd->i[i];
153     conz = co->i[i + 1] - co->i[i];
154 
155     /* 1st off-diagonal part of C */
156     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
157     k  = 0;
158     for (k0 = 0; k0 < conz; k0++) {
159       if (apJ[k] >= cstart) break;
160       ca[k0]        = apa[apJ[k]];
161       apa[apJ[k++]] = 0.0;
162     }
163 
164     /* diagonal part of C */
165     ca = PetscSafePointerPlusOffset(cda, cd->i[i]);
166     for (k1 = 0; k1 < cdnz; k1++) {
167       ca[k1]        = apa[apJ[k]];
168       apa[apJ[k++]] = 0.0;
169     }
170 
171     /* 2nd off-diagonal part of C */
172     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
173     for (; k0 < conz; k0++) {
174       ca[k0]        = apa[apJ[k]];
175       apa[apJ[k++]] = 0.0;
176     }
177   }
178   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
179   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
180   PetscFunctionReturn(PETSC_SUCCESS);
181 }
182 
183 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, PetscReal fill, Mat C)
184 {
185   MPI_Comm           comm;
186   PetscMPIInt        size;
187   Mat_APMPI         *ptap;
188   PetscFreeSpaceList free_space = NULL, current_space = NULL;
189   Mat_MPIAIJ        *a  = (Mat_MPIAIJ *)A->data;
190   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
191   PetscInt          *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
192   PetscInt          *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
193   PetscInt          *lnk, i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi;
194   PetscInt           am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n;
195   PetscBT            lnkbt;
196   PetscReal          afill;
197   MatType            mtype;
198 
199   PetscFunctionBegin;
200   MatCheckProduct(C, 4);
201   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
202   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
203   PetscCallMPI(MPI_Comm_size(comm, &size));
204 
205   /* create struct Mat_APMPI and attached it to C later */
206   PetscCall(PetscNew(&ptap));
207 
208   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
209   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
210 
211   /* get P_loc by taking all local rows of P */
212   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
213 
214   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
215   pi_loc = p_loc->i;
216   pj_loc = p_loc->j;
217   if (size > 1) {
218     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
219     pi_oth = p_oth->i;
220     pj_oth = p_oth->j;
221   } else {
222     p_oth  = NULL;
223     pi_oth = NULL;
224     pj_oth = NULL;
225   }
226 
227   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
228   PetscCall(PetscMalloc1(am + 2, &api));
229   ptap->api = api;
230   api[0]    = 0;
231 
232   /* create and initialize a linked list */
233   PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));
234 
235   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
236   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
237   current_space = free_space;
238 
239   MatPreallocateBegin(comm, am, pn, dnz, onz);
240   for (i = 0; i < am; i++) {
241     /* diagonal portion of A */
242     nzi = adi[i + 1] - adi[i];
243     for (j = 0; j < nzi; j++) {
244       row  = *adj++;
245       pnz  = pi_loc[row + 1] - pi_loc[row];
246       Jptr = pj_loc + pi_loc[row];
247       /* add non-zero cols of P into the sorted linked list lnk */
248       PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
249     }
250     /* off-diagonal portion of A */
251     nzi = aoi[i + 1] - aoi[i];
252     for (j = 0; j < nzi; j++) {
253       row  = *aoj++;
254       pnz  = pi_oth[row + 1] - pi_oth[row];
255       Jptr = pj_oth + pi_oth[row];
256       PetscCall(PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt));
257     }
258     /* add possible missing diagonal entry */
259     if (C->force_diagonals) {
260       j = i + rstart; /* column index */
261       PetscCall(PetscLLCondensedAddSorted(1, &j, lnk, lnkbt));
262     }
263 
264     apnz       = lnk[0];
265     api[i + 1] = api[i] + apnz;
266 
267     /* if free space is not available, double the total space in the list */
268     if (current_space->local_remaining < apnz) {
269       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space));
270       nspacedouble++;
271     }
272 
273     /* Copy data into free space, then initialize lnk */
274     PetscCall(PetscLLCondensedClean(pN, apnz, current_space->array, lnk, lnkbt));
275     PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));
276 
277     current_space->array += apnz;
278     current_space->local_used += apnz;
279     current_space->local_remaining -= apnz;
280   }
281 
282   /* Allocate space for apj, initialize apj, and */
283   /* destroy list of free space and other temporary array(s) */
284   PetscCall(PetscMalloc1(api[am] + 1, &ptap->apj));
285   apj = ptap->apj;
286   PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
287   PetscCall(PetscLLDestroy(lnk, lnkbt));
288 
289   /* malloc apa to store dense row A[i,:]*P */
290   PetscCall(PetscCalloc1(pN, &ptap->apa));
291 
292   /* set and assemble symbolic parallel matrix C */
293   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
294   PetscCall(MatSetBlockSizesFromMats(C, A, P));
295 
296   PetscCall(MatGetType(A, &mtype));
297   PetscCall(MatSetType(C, mtype));
298   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
299   MatPreallocateEnd(dnz, onz);
300 
301   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
302   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
303   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
304   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
305 
306   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
307   C->ops->productnumeric = MatProductNumeric_AB;
308 
309   /* attach the supporting struct to C for reuse */
310   C->product->data    = ptap;
311   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
312 
313   /* set MatInfo */
314   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
315   if (afill < 1.0) afill = 1.0;
316   C->info.mallocs           = nspacedouble;
317   C->info.fill_ratio_given  = fill;
318   C->info.fill_ratio_needed = afill;
319 
320 #if defined(PETSC_USE_INFO)
321   if (api[am]) {
322     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
323     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
324   } else {
325     PetscCall(PetscInfo(C, "Empty matrix product\n"));
326   }
327 #endif
328   PetscFunctionReturn(PETSC_SUCCESS);
329 }
330 
331 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat, Mat, PetscReal, Mat);
332 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat, Mat, Mat);
333 
334 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
335 {
336   Mat_Product *product = C->product;
337   Mat          A = product->A, B = product->B;
338 
339   PetscFunctionBegin;
340   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
341     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
342 
343   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
344   C->ops->productsymbolic = MatProductSymbolic_AB;
345   PetscFunctionReturn(PETSC_SUCCESS);
346 }
347 
348 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
349 {
350   Mat_Product *product = C->product;
351   Mat          A = product->A, B = product->B;
352 
353   PetscFunctionBegin;
354   if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
355     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);
356 
357   C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
358   C->ops->productsymbolic          = MatProductSymbolic_AtB;
359   PetscFunctionReturn(PETSC_SUCCESS);
360 }
361 
362 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
363 {
364   Mat_Product *product = C->product;
365 
366   PetscFunctionBegin;
367   switch (product->type) {
368   case MATPRODUCT_AB:
369     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C));
370     break;
371   case MATPRODUCT_AtB:
372     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C));
373     break;
374   default:
375     break;
376   }
377   PetscFunctionReturn(PETSC_SUCCESS);
378 }
379 
380 typedef struct {
381   Mat           workB, workB1;
382   MPI_Request  *rwaits, *swaits;
383   PetscInt      nsends, nrecvs;
384   MPI_Datatype *stype, *rtype;
385   PetscInt      blda;
386 } MPIAIJ_MPIDense;
387 
388 static PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
389 {
390   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense *)ctx;
391   PetscInt         i;
392 
393   PetscFunctionBegin;
394   PetscCall(MatDestroy(&contents->workB));
395   PetscCall(MatDestroy(&contents->workB1));
396   for (i = 0; i < contents->nsends; i++) PetscCallMPI(MPI_Type_free(&contents->stype[i]));
397   for (i = 0; i < contents->nrecvs; i++) PetscCallMPI(MPI_Type_free(&contents->rtype[i]));
398   PetscCall(PetscFree4(contents->stype, contents->rtype, contents->rwaits, contents->swaits));
399   PetscCall(PetscFree(contents));
400   PetscFunctionReturn(PETSC_SUCCESS);
401 }
402 
403 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A, Mat B, PetscReal fill, Mat C)
404 {
405   Mat_MPIAIJ      *aij = (Mat_MPIAIJ *)A->data;
406   PetscInt         nz  = aij->B->cmap->n, blda, m, M, n, N;
407   MPIAIJ_MPIDense *contents;
408   VecScatter       ctx = aij->Mvctx;
409   PetscInt         Am = A->rmap->n, Bm = B->rmap->n, BN = B->cmap->N, Bbn, Bbn1, bs, numBb;
410   MPI_Comm         comm;
411   MPI_Datatype     type1, *stype, *rtype;
412   const PetscInt  *sindices, *sstarts, *rstarts;
413   PetscMPIInt     *disp, nsends, nrecvs, nrows_to, nrows_from;
414   PetscBool        cisdense;
415 
416   PetscFunctionBegin;
417   MatCheckProduct(C, 4);
418   PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data not empty");
419   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
420   PetscCall(PetscObjectBaseTypeCompare((PetscObject)C, MATMPIDENSE, &cisdense));
421   if (!cisdense) PetscCall(MatSetType(C, ((PetscObject)B)->type_name));
422   PetscCall(MatGetLocalSize(C, &m, &n));
423   PetscCall(MatGetSize(C, &M, &N));
424   if (m == PETSC_DECIDE || n == PETSC_DECIDE || M == PETSC_DECIDE || N == PETSC_DECIDE) PetscCall(MatSetSizes(C, Am, B->cmap->n, A->rmap->N, BN));
425   PetscCall(MatSetBlockSizesFromMats(C, A, B));
426   PetscCall(MatSetUp(C));
427   PetscCall(MatDenseGetLDA(B, &blda));
428   PetscCall(PetscNew(&contents));
429 
430   PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
431   PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));
432 
433   /* Create column block of B and C for memory scalability when BN is too large */
434   /* Estimate Bbn, column size of Bb */
435   if (nz) {
436     Bbn1 = 2 * Am * BN / nz;
437     if (!Bbn1) Bbn1 = 1;
438   } else Bbn1 = BN;
439 
440   bs   = B->cmap->bs;
441   Bbn1 = Bbn1 / bs * bs; /* Bbn1 is a multiple of bs */
442   if (Bbn1 > BN) Bbn1 = BN;
443   PetscCallMPI(MPIU_Allreduce(&Bbn1, &Bbn, 1, MPIU_INT, MPI_MAX, comm));
444 
445   /* Enable runtime option for Bbn */
446   PetscOptionsBegin(comm, ((PetscObject)C)->prefix, "MatMatMult", "Mat");
447   PetscCall(PetscOptionsInt("-matmatmult_Bbn", "Number of columns in Bb", "MatMatMult", Bbn, &Bbn, NULL));
448   PetscOptionsEnd();
449   Bbn = PetscMin(Bbn, BN);
450 
451   if (Bbn > 0 && Bbn < BN) {
452     numBb = BN / Bbn;
453     Bbn1  = BN - numBb * Bbn;
454   } else numBb = 0;
455 
456   if (numBb) {
457     PetscCall(PetscInfo(C, "use Bb, BN=%" PetscInt_FMT ", Bbn=%" PetscInt_FMT "; numBb=%" PetscInt_FMT "\n", BN, Bbn, numBb));
458     if (Bbn1) { /* Create workB1 for the remaining columns */
459       PetscCall(PetscInfo(C, "use Bb1, BN=%" PetscInt_FMT ", Bbn1=%" PetscInt_FMT "\n", BN, Bbn1));
460       /* Create work matrix used to store off processor rows of B needed for local product */
461       PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn1, NULL, &contents->workB1));
462     } else contents->workB1 = NULL;
463   }
464 
465   /* Create work matrix used to store off processor rows of B needed for local product */
466   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn, NULL, &contents->workB));
467 
468   /* Use MPI derived data type to reduce memory required by the send/recv buffers */
469   PetscCall(PetscMalloc4(nsends, &stype, nrecvs, &rtype, nrecvs, &contents->rwaits, nsends, &contents->swaits));
470   contents->stype  = stype;
471   contents->nsends = nsends;
472 
473   contents->rtype  = rtype;
474   contents->nrecvs = nrecvs;
475   contents->blda   = blda;
476 
477   PetscCall(PetscMalloc1(Bm + 1, &disp));
478   for (PetscMPIInt i = 0; i < nsends; i++) {
479     PetscCall(PetscMPIIntCast(sstarts[i + 1] - sstarts[i], &nrows_to));
480     for (PetscInt j = 0; j < nrows_to; j++) PetscCall(PetscMPIIntCast(sindices[sstarts[i] + j], &disp[j])); /* rowB to be sent */
481     PetscCallMPI(MPI_Type_create_indexed_block(nrows_to, 1, disp, MPIU_SCALAR, &type1));
482     PetscCallMPI(MPI_Type_create_resized(type1, 0, blda * sizeof(PetscScalar), &stype[i]));
483     PetscCallMPI(MPI_Type_commit(&stype[i]));
484     PetscCallMPI(MPI_Type_free(&type1));
485   }
486 
487   for (PetscMPIInt i = 0; i < nrecvs; i++) {
488     /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
489     PetscCall(PetscMPIIntCast(rstarts[i + 1] - rstarts[i], &nrows_from));
490     disp[0] = 0;
491     PetscCallMPI(MPI_Type_create_indexed_block(1, nrows_from, disp, MPIU_SCALAR, &type1));
492     PetscCallMPI(MPI_Type_create_resized(type1, 0, nz * sizeof(PetscScalar), &rtype[i]));
493     PetscCallMPI(MPI_Type_commit(&rtype[i]));
494     PetscCallMPI(MPI_Type_free(&type1));
495   }
496 
497   PetscCall(PetscFree(disp));
498   PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL));
499   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL));
500   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
501   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
502   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
503 
504   C->product->data       = contents;
505   C->product->destroy    = MatMPIAIJ_MPIDenseDestroy;
506   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
507   PetscFunctionReturn(PETSC_SUCCESS);
508 }
509 
510 PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat, Mat, Mat, const PetscBool);
511 
512 /*
513     Performs an efficient scatter on the rows of B needed by this process; this is
514     a modification of the VecScatterBegin_() routines.
515 
516     Input: If Bbidx = 0, uses B = Bb, else B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
517 */
518 
519 static PetscErrorCode MatMPIDenseScatter(Mat A, Mat B, PetscInt Bbidx, Mat C, Mat *outworkB)
520 {
521   Mat_MPIAIJ        *aij = (Mat_MPIAIJ *)A->data;
522   const PetscScalar *b;
523   PetscScalar       *rvalues;
524   VecScatter         ctx = aij->Mvctx;
525   const PetscInt    *sindices, *sstarts, *rstarts;
526   const PetscMPIInt *sprocs, *rprocs;
527   PetscMPIInt        nsends, nrecvs;
528   MPI_Request       *swaits, *rwaits;
529   MPI_Comm           comm;
530   PetscMPIInt        tag = ((PetscObject)ctx)->tag, ncols, nrows, nsends_mpi, nrecvs_mpi;
531   MPIAIJ_MPIDense   *contents;
532   Mat                workB;
533   MPI_Datatype      *stype, *rtype;
534   PetscInt           blda;
535 
536   PetscFunctionBegin;
537   MatCheckProduct(C, 4);
538   PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
539   PetscCall(PetscMPIIntCast(B->cmap->N, &ncols));
540   PetscCall(PetscMPIIntCast(aij->B->cmap->n, &nrows));
541   contents = (MPIAIJ_MPIDense *)C->product->data;
542   PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL /*bs*/));
543   PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL /*bs*/));
544   PetscCall(PetscMPIIntCast(nsends, &nsends_mpi));
545   PetscCall(PetscMPIIntCast(nrecvs, &nrecvs_mpi));
546   if (Bbidx == 0) workB = *outworkB = contents->workB;
547   else workB = *outworkB = contents->workB1;
548   PetscCheck(nrows == workB->rmap->n, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Number of rows of workB %" PetscInt_FMT " not equal to columns of aij->B %d", workB->cmap->n, nrows);
549   swaits = contents->swaits;
550   rwaits = contents->rwaits;
551 
552   PetscCall(MatDenseGetArrayRead(B, &b));
553   PetscCall(MatDenseGetLDA(B, &blda));
554   PetscCheck(blda == contents->blda, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Cannot reuse an input matrix with lda %" PetscInt_FMT " != %" PetscInt_FMT, blda, contents->blda);
555   PetscCall(MatDenseGetArray(workB, &rvalues));
556 
557   /* Post recv, use MPI derived data type to save memory */
558   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
559   rtype = contents->rtype;
560   for (PetscMPIInt i = 0; i < nrecvs; i++) PetscCallMPI(MPIU_Irecv(rvalues + (rstarts[i] - rstarts[0]), ncols, rtype[i], rprocs[i], tag, comm, rwaits + i));
561 
562   stype = contents->stype;
563   for (PetscMPIInt i = 0; i < nsends; i++) PetscCallMPI(MPIU_Isend(b, ncols, stype[i], sprocs[i], tag, comm, swaits + i));
564 
565   if (nrecvs) PetscCallMPI(MPI_Waitall(nrecvs_mpi, rwaits, MPI_STATUSES_IGNORE));
566   if (nsends) PetscCallMPI(MPI_Waitall(nsends_mpi, swaits, MPI_STATUSES_IGNORE));
567 
568   PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL));
569   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL));
570   PetscCall(MatDenseRestoreArrayRead(B, &b));
571   PetscCall(MatDenseRestoreArray(workB, &rvalues));
572   PetscFunctionReturn(PETSC_SUCCESS);
573 }
574 
575 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A, Mat B, Mat C)
576 {
577   Mat_MPIAIJ      *aij    = (Mat_MPIAIJ *)A->data;
578   Mat_MPIDense    *bdense = (Mat_MPIDense *)B->data;
579   Mat_MPIDense    *cdense = (Mat_MPIDense *)C->data;
580   Mat              workB;
581   MPIAIJ_MPIDense *contents;
582 
583   PetscFunctionBegin;
584   MatCheckProduct(C, 3);
585   PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
586   contents = (MPIAIJ_MPIDense *)C->product->data;
587   /* diagonal block of A times all local rows of B */
588   /* TODO: this calls a symbolic multiplication every time, which could be avoided */
589   PetscCall(MatMatMult(aij->A, bdense->A, MAT_REUSE_MATRIX, PETSC_CURRENT, &cdense->A));
590   if (contents->workB->cmap->n == B->cmap->N) {
591     /* get off processor parts of B needed to complete C=A*B */
592     PetscCall(MatMPIDenseScatter(A, B, 0, C, &workB));
593 
594     /* off-diagonal block of A times nonlocal rows of B */
595     PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
596   } else {
597     Mat       Bb, Cb;
598     PetscInt  BN = B->cmap->N, n = contents->workB->cmap->n;
599     PetscBool ccpu;
600 
601     PetscCheck(n > 0, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Column block size %" PetscInt_FMT " must be positive", n);
602     /* Prevent from unneeded copies back and forth from the GPU
603        when getting and restoring the submatrix
604        We need a proper GPU code for AIJ * dense in parallel */
605     PetscCall(MatBoundToCPU(C, &ccpu));
606     PetscCall(MatBindToCPU(C, PETSC_TRUE));
607     for (PetscInt i = 0; i < BN; i += n) {
608       PetscCall(MatDenseGetSubMatrix(B, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Bb));
609       PetscCall(MatDenseGetSubMatrix(C, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Cb));
610 
611       /* get off processor parts of B needed to complete C=A*B */
612       PetscCall(MatMPIDenseScatter(A, Bb, (i + n) > BN, C, &workB));
613 
614       /* off-diagonal block of A times nonlocal rows of B */
615       cdense = (Mat_MPIDense *)Cb->data;
616       PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE));
617       PetscCall(MatDenseRestoreSubMatrix(B, &Bb));
618       PetscCall(MatDenseRestoreSubMatrix(C, &Cb));
619     }
620     PetscCall(MatBindToCPU(C, ccpu));
621   }
622   PetscFunctionReturn(PETSC_SUCCESS);
623 }
624 
625 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A, Mat P, Mat C)
626 {
627   Mat_MPIAIJ        *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
628   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data;
629   Mat_SeqAIJ        *cd = (Mat_SeqAIJ *)c->A->data, *co = (Mat_SeqAIJ *)c->B->data;
630   PetscInt          *adi = ad->i, *adj, *aoi = ao->i, *aoj;
631   PetscScalar       *ada, *aoa, *cda = cd->a, *coa = co->a;
632   Mat_SeqAIJ        *p_loc, *p_oth;
633   PetscInt          *pi_loc, *pj_loc, *pi_oth, *pj_oth, *pj;
634   PetscScalar       *pa_loc, *pa_oth, *pa, valtmp, *ca;
635   PetscInt           cm = C->rmap->n, anz, pnz;
636   Mat_APMPI         *ptap;
637   PetscScalar       *apa_sparse;
638   const PetscScalar *dummy;
639   PetscInt          *api, *apj, *apJ, i, j, k, row;
640   PetscInt           cstart = C->cmap->rstart;
641   PetscInt           cdnz, conz, k0, k1, nextp;
642   MPI_Comm           comm;
643   PetscMPIInt        size;
644 
645   PetscFunctionBegin;
646   MatCheckProduct(C, 3);
647   ptap = (Mat_APMPI *)C->product->data;
648   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
649   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
650   PetscCallMPI(MPI_Comm_size(comm, &size));
651   PetscCheck(ptap->P_oth || size <= 1, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "AP cannot be reused. Do not call MatProductClear()");
652 
653   /* flag CPU mask for C */
654 #if defined(PETSC_HAVE_DEVICE)
655   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
656   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
657   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
658 #endif
659   apa_sparse = ptap->apa;
660 
661   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
662   /* update numerical values of P_oth and P_loc */
663   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
664   PetscCall(MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc));
665 
666   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
667   /* get data from symbolic products */
668   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
669   pi_loc = p_loc->i;
670   pj_loc = p_loc->j;
671   pa_loc = p_loc->a;
672   if (size > 1) {
673     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
674     pi_oth = p_oth->i;
675     pj_oth = p_oth->j;
676     pa_oth = p_oth->a;
677   } else {
678     p_oth  = NULL;
679     pi_oth = NULL;
680     pj_oth = NULL;
681     pa_oth = NULL;
682   }
683 
684   /* trigger copy to CPU */
685   PetscCall(MatSeqAIJGetArrayRead(a->A, &dummy));
686   PetscCall(MatSeqAIJRestoreArrayRead(a->A, &dummy));
687   PetscCall(MatSeqAIJGetArrayRead(a->B, &dummy));
688   PetscCall(MatSeqAIJRestoreArrayRead(a->B, &dummy));
689   api = ptap->api;
690   apj = ptap->apj;
691   for (i = 0; i < cm; i++) {
692     apJ = apj + api[i];
693 
694     /* diagonal portion of A */
695     anz = adi[i + 1] - adi[i];
696     adj = ad->j + adi[i];
697     ada = ad->a + adi[i];
698     for (j = 0; j < anz; j++) {
699       row = adj[j];
700       pnz = pi_loc[row + 1] - pi_loc[row];
701       pj  = pj_loc + pi_loc[row];
702       pa  = pa_loc + pi_loc[row];
703       /* perform sparse axpy */
704       valtmp = ada[j];
705       nextp  = 0;
706       for (k = 0; nextp < pnz; k++) {
707         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
708           apa_sparse[k] += valtmp * pa[nextp++];
709         }
710       }
711       PetscCall(PetscLogFlops(2.0 * pnz));
712     }
713 
714     /* off-diagonal portion of A */
715     anz = aoi[i + 1] - aoi[i];
716     aoj = PetscSafePointerPlusOffset(ao->j, aoi[i]);
717     aoa = PetscSafePointerPlusOffset(ao->a, aoi[i]);
718     for (j = 0; j < anz; j++) {
719       row = aoj[j];
720       pnz = pi_oth[row + 1] - pi_oth[row];
721       pj  = pj_oth + pi_oth[row];
722       pa  = pa_oth + pi_oth[row];
723       /* perform sparse axpy */
724       valtmp = aoa[j];
725       nextp  = 0;
726       for (k = 0; nextp < pnz; k++) {
727         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
728           apa_sparse[k] += valtmp * pa[nextp++];
729         }
730       }
731       PetscCall(PetscLogFlops(2.0 * pnz));
732     }
733 
734     /* set values in C */
735     cdnz = cd->i[i + 1] - cd->i[i];
736     conz = co->i[i + 1] - co->i[i];
737 
738     /* 1st off-diagonal part of C */
739     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
740     k  = 0;
741     for (k0 = 0; k0 < conz; k0++) {
742       if (apJ[k] >= cstart) break;
743       ca[k0]        = apa_sparse[k];
744       apa_sparse[k] = 0.0;
745       k++;
746     }
747 
748     /* diagonal part of C */
749     ca = cda + cd->i[i];
750     for (k1 = 0; k1 < cdnz; k1++) {
751       ca[k1]        = apa_sparse[k];
752       apa_sparse[k] = 0.0;
753       k++;
754     }
755 
756     /* 2nd off-diagonal part of C */
757     ca = PetscSafePointerPlusOffset(coa, co->i[i]);
758     for (; k0 < conz; k0++) {
759       ca[k0]        = apa_sparse[k];
760       apa_sparse[k] = 0.0;
761       k++;
762     }
763   }
764   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
765   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
766   PetscFunctionReturn(PETSC_SUCCESS);
767 }
768 
769 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
770 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A, Mat P, PetscReal fill, Mat C)
771 {
772   MPI_Comm           comm;
773   PetscMPIInt        size;
774   Mat_APMPI         *ptap;
775   PetscFreeSpaceList free_space = NULL, current_space = NULL;
776   Mat_MPIAIJ        *a  = (Mat_MPIAIJ *)A->data;
777   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc, *p_oth;
778   PetscInt          *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
779   PetscInt          *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
780   PetscInt           i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi, *lnk, apnz_max = 1;
781   PetscInt           am = A->rmap->n, pn = P->cmap->n, pm = P->rmap->n, lsize = pn + 20;
782   PetscReal          afill;
783   MatType            mtype;
784 
785   PetscFunctionBegin;
786   MatCheckProduct(C, 4);
787   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
788   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
789   PetscCallMPI(MPI_Comm_size(comm, &size));
790 
791   /* create struct Mat_APMPI and attached it to C later */
792   PetscCall(PetscNew(&ptap));
793 
794   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
795   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
796 
797   /* get P_loc by taking all local rows of P */
798   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
799 
800   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
801   pi_loc = p_loc->i;
802   pj_loc = p_loc->j;
803   if (size > 1) {
804     p_oth  = (Mat_SeqAIJ *)ptap->P_oth->data;
805     pi_oth = p_oth->i;
806     pj_oth = p_oth->j;
807   } else {
808     p_oth  = NULL;
809     pi_oth = NULL;
810     pj_oth = NULL;
811   }
812 
813   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
814   PetscCall(PetscMalloc1(am + 2, &api));
815   ptap->api = api;
816   api[0]    = 0;
817 
818   PetscCall(PetscLLCondensedCreate_Scalable(lsize, &lnk));
819 
820   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
821   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space));
822   current_space = free_space;
823   MatPreallocateBegin(comm, am, pn, dnz, onz);
824   for (i = 0; i < am; i++) {
825     /* diagonal portion of A */
826     nzi = adi[i + 1] - adi[i];
827     for (j = 0; j < nzi; j++) {
828       row  = *adj++;
829       pnz  = pi_loc[row + 1] - pi_loc[row];
830       Jptr = pj_loc + pi_loc[row];
831       /* Expand list if it is not long enough */
832       if (pnz + apnz_max > lsize) {
833         lsize = pnz + apnz_max;
834         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
835       }
836       /* add non-zero cols of P into the sorted linked list lnk */
837       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
838       apnz       = *lnk; /* The first element in the list is the number of items in the list */
839       api[i + 1] = api[i] + apnz;
840       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
841     }
842     /* off-diagonal portion of A */
843     nzi = aoi[i + 1] - aoi[i];
844     for (j = 0; j < nzi; j++) {
845       row  = *aoj++;
846       pnz  = pi_oth[row + 1] - pi_oth[row];
847       Jptr = pj_oth + pi_oth[row];
848       /* Expand list if it is not long enough */
849       if (pnz + apnz_max > lsize) {
850         lsize = pnz + apnz_max;
851         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
852       }
853       /* add non-zero cols of P into the sorted linked list lnk */
854       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk));
855       apnz       = *lnk; /* The first element in the list is the number of items in the list */
856       api[i + 1] = api[i] + apnz;
857       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
858     }
859 
860     /* add missing diagonal entry */
861     if (C->force_diagonals) {
862       j = i + rstart; /* column index */
863       PetscCall(PetscLLCondensedAddSorted_Scalable(1, &j, lnk));
864     }
865 
866     apnz       = *lnk;
867     api[i + 1] = api[i] + apnz;
868     if (apnz > apnz_max) apnz_max = apnz;
869 
870     /* if free space is not available, double the total space in the list */
871     if (current_space->local_remaining < apnz) {
872       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space));
873       nspacedouble++;
874     }
875 
876     /* Copy data into free space, then initialize lnk */
877     PetscCall(PetscLLCondensedClean_Scalable(apnz, current_space->array, lnk));
878     PetscCall(MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz));
879 
880     current_space->array += apnz;
881     current_space->local_used += apnz;
882     current_space->local_remaining -= apnz;
883   }
884 
885   /* Allocate space for apj, initialize apj, and */
886   /* destroy list of free space and other temporary array(s) */
887   PetscCall(PetscMalloc1(api[am] + 1, &ptap->apj));
888   apj = ptap->apj;
889   PetscCall(PetscFreeSpaceContiguous(&free_space, ptap->apj));
890   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
891 
892   /* create and assemble symbolic parallel matrix C */
893   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
894   PetscCall(MatSetBlockSizesFromMats(C, A, P));
895   PetscCall(MatGetType(A, &mtype));
896   PetscCall(MatSetType(C, mtype));
897   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
898   MatPreallocateEnd(dnz, onz);
899 
900   /* malloc apa for assembly C */
901   PetscCall(PetscCalloc1(apnz_max, &ptap->apa));
902 
903   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
904   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
905   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
906   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
907 
908   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
909   C->ops->productnumeric = MatProductNumeric_AB;
910 
911   /* attach the supporting struct to C for reuse */
912   C->product->data    = ptap;
913   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
914 
915   /* set MatInfo */
916   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
917   if (afill < 1.0) afill = 1.0;
918   C->info.mallocs           = nspacedouble;
919   C->info.fill_ratio_given  = fill;
920   C->info.fill_ratio_needed = afill;
921 
922 #if defined(PETSC_USE_INFO)
923   if (api[am]) {
924     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
925     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
926   } else {
927     PetscCall(PetscInfo(C, "Empty matrix product\n"));
928   }
929 #endif
930   PetscFunctionReturn(PETSC_SUCCESS);
931 }
932 
933 /* This function is needed for the seqMPI matrix-matrix multiplication.  */
934 /* Three input arrays are merged to one output array. The size of the    */
935 /* output array is also output. Duplicate entries only show up once.     */
936 static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, PetscInt size2, PetscInt *in2, PetscInt size3, PetscInt *in3, PetscInt *size4, PetscInt *out)
937 {
938   int i = 0, j = 0, k = 0, l = 0;
939 
940   /* Traverse all three arrays */
941   while (i < size1 && j < size2 && k < size3) {
942     if (in1[i] < in2[j] && in1[i] < in3[k]) {
943       out[l++] = in1[i++];
944     } else if (in2[j] < in1[i] && in2[j] < in3[k]) {
945       out[l++] = in2[j++];
946     } else if (in3[k] < in1[i] && in3[k] < in2[j]) {
947       out[l++] = in3[k++];
948     } else if (in1[i] == in2[j] && in1[i] < in3[k]) {
949       out[l++] = in1[i];
950       i++, j++;
951     } else if (in1[i] == in3[k] && in1[i] < in2[j]) {
952       out[l++] = in1[i];
953       i++, k++;
954     } else if (in3[k] == in2[j] && in2[j] < in1[i]) {
955       out[l++] = in2[j];
956       k++, j++;
957     } else if (in1[i] == in2[j] && in1[i] == in3[k]) {
958       out[l++] = in1[i];
959       i++, j++, k++;
960     }
961   }
962 
963   /* Traverse two remaining arrays */
964   while (i < size1 && j < size2) {
965     if (in1[i] < in2[j]) {
966       out[l++] = in1[i++];
967     } else if (in1[i] > in2[j]) {
968       out[l++] = in2[j++];
969     } else {
970       out[l++] = in1[i];
971       i++, j++;
972     }
973   }
974 
975   while (i < size1 && k < size3) {
976     if (in1[i] < in3[k]) {
977       out[l++] = in1[i++];
978     } else if (in1[i] > in3[k]) {
979       out[l++] = in3[k++];
980     } else {
981       out[l++] = in1[i];
982       i++, k++;
983     }
984   }
985 
986   while (k < size3 && j < size2) {
987     if (in3[k] < in2[j]) {
988       out[l++] = in3[k++];
989     } else if (in3[k] > in2[j]) {
990       out[l++] = in2[j++];
991     } else {
992       out[l++] = in3[k];
993       k++, j++;
994     }
995   }
996 
997   /* Traverse one remaining array */
998   while (i < size1) out[l++] = in1[i++];
999   while (j < size2) out[l++] = in2[j++];
1000   while (k < size3) out[l++] = in3[k++];
1001 
1002   *size4 = l;
1003 }
1004 
1005 /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and  */
1006 /* adds up the products. Two of these three multiplications are performed with existing (sequential)      */
1007 /* matrix-matrix multiplications.  */
1008 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
1009 {
1010   MPI_Comm           comm;
1011   PetscMPIInt        size;
1012   Mat_APMPI         *ptap;
1013   PetscFreeSpaceList free_space_diag = NULL, current_space = NULL;
1014   Mat_MPIAIJ        *a  = (Mat_MPIAIJ *)A->data;
1015   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)a->A->data, *ao = (Mat_SeqAIJ *)a->B->data, *p_loc;
1016   Mat_MPIAIJ        *p = (Mat_MPIAIJ *)P->data;
1017   Mat_SeqAIJ        *adpd_seq, *p_off, *aopoth_seq;
1018   PetscInt           adponz, adpdnz;
1019   PetscInt          *pi_loc, *dnz, *onz;
1020   PetscInt          *adi = ad->i, *adj = ad->j, *aoi = ao->i, rstart = A->rmap->rstart;
1021   PetscInt          *lnk, i, i1 = 0, pnz, row, *adpoi, *adpoj, *api, *adpoJ, *aopJ, *apJ, *Jptr, aopnz, nspacedouble = 0, j, nzi, *apj, apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1022   PetscInt           am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n, p_colstart, p_colend;
1023   PetscBT            lnkbt;
1024   PetscReal          afill;
1025   PetscMPIInt        rank;
1026   Mat                adpd, aopoth;
1027   MatType            mtype;
1028   const char        *prefix;
1029 
1030   PetscFunctionBegin;
1031   MatCheckProduct(C, 4);
1032   PetscCheck(!C->product->data, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Extra product struct not empty");
1033   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1034   PetscCallMPI(MPI_Comm_size(comm, &size));
1035   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1036   PetscCall(MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend));
1037 
1038   /* create struct Mat_APMPI and attached it to C later */
1039   PetscCall(PetscNew(&ptap));
1040 
1041   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1042   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth));
1043 
1044   /* get P_loc by taking all local rows of P */
1045   PetscCall(MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc));
1046 
1047   p_loc  = (Mat_SeqAIJ *)ptap->P_loc->data;
1048   pi_loc = p_loc->i;
1049 
1050   /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1051   PetscCall(PetscMalloc1(am + 2, &api));
1052   PetscCall(PetscMalloc1(am + 2, &adpoi));
1053 
1054   adpoi[0]  = 0;
1055   ptap->api = api;
1056   api[0]    = 0;
1057 
1058   /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1059   PetscCall(PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt));
1060   MatPreallocateBegin(comm, am, pn, dnz, onz);
1061 
1062   /* Symbolic calc of A_loc_diag * P_loc_diag */
1063   PetscCall(MatGetOptionsPrefix(A, &prefix));
1064   PetscCall(MatProductCreate(a->A, p->A, NULL, &adpd));
1065   PetscCall(MatGetOptionsPrefix(A, &prefix));
1066   PetscCall(MatSetOptionsPrefix(adpd, prefix));
1067   PetscCall(MatAppendOptionsPrefix(adpd, "inner_diag_"));
1068 
1069   PetscCall(MatProductSetType(adpd, MATPRODUCT_AB));
1070   PetscCall(MatProductSetAlgorithm(adpd, "sorted"));
1071   PetscCall(MatProductSetFill(adpd, fill));
1072   PetscCall(MatProductSetFromOptions(adpd));
1073 
1074   adpd->force_diagonals = C->force_diagonals;
1075   PetscCall(MatProductSymbolic(adpd));
1076 
1077   adpd_seq = (Mat_SeqAIJ *)((adpd)->data);
1078   adpdi    = adpd_seq->i;
1079   adpdj    = adpd_seq->j;
1080   p_off    = (Mat_SeqAIJ *)p->B->data;
1081   poff_i   = p_off->i;
1082   poff_j   = p_off->j;
1083 
1084   /* j_temp stores indices of a result row before they are added to the linked list */
1085   PetscCall(PetscMalloc1(pN + 2, &j_temp));
1086 
1087   /* Symbolic calc of the A_diag * p_loc_off */
1088   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1089   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space_diag));
1090   current_space = free_space_diag;
1091 
1092   for (i = 0; i < am; i++) {
1093     /* A_diag * P_loc_off */
1094     nzi = adi[i + 1] - adi[i];
1095     for (j = 0; j < nzi; j++) {
1096       row  = *adj++;
1097       pnz  = poff_i[row + 1] - poff_i[row];
1098       Jptr = poff_j + poff_i[row];
1099       for (i1 = 0; i1 < pnz; i1++) j_temp[i1] = p->garray[Jptr[i1]];
1100       /* add non-zero cols of P into the sorted linked list lnk */
1101       PetscCall(PetscLLCondensedAddSorted(pnz, j_temp, lnk, lnkbt));
1102     }
1103 
1104     adponz       = lnk[0];
1105     adpoi[i + 1] = adpoi[i] + adponz;
1106 
1107     /* if free space is not available, double the total space in the list */
1108     if (current_space->local_remaining < adponz) {
1109       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(adponz, current_space->total_array_size), &current_space));
1110       nspacedouble++;
1111     }
1112 
1113     /* Copy data into free space, then initialize lnk */
1114     PetscCall(PetscLLCondensedClean(pN, adponz, current_space->array, lnk, lnkbt));
1115 
1116     current_space->array += adponz;
1117     current_space->local_used += adponz;
1118     current_space->local_remaining -= adponz;
1119   }
1120 
1121   /* Symbolic calc of A_off * P_oth */
1122   PetscCall(MatSetOptionsPrefix(a->B, prefix));
1123   PetscCall(MatAppendOptionsPrefix(a->B, "inner_offdiag_"));
1124   PetscCall(MatCreate(PETSC_COMM_SELF, &aopoth));
1125   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth));
1126   aopoth_seq = (Mat_SeqAIJ *)((aopoth)->data);
1127   aopothi    = aopoth_seq->i;
1128   aopothj    = aopoth_seq->j;
1129 
1130   /* Allocate space for apj, adpj, aopj, ... */
1131   /* destroy lists of free space and other temporary array(s) */
1132 
1133   PetscCall(PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am] + 2, &ptap->apj));
1134   PetscCall(PetscMalloc1(adpoi[am] + 2, &adpoj));
1135 
1136   /* Copy from linked list to j-array */
1137   PetscCall(PetscFreeSpaceContiguous(&free_space_diag, adpoj));
1138   PetscCall(PetscLLDestroy(lnk, lnkbt));
1139 
1140   adpoJ = adpoj;
1141   adpdJ = adpdj;
1142   aopJ  = aopothj;
1143   apj   = ptap->apj;
1144   apJ   = apj; /* still empty */
1145 
1146   /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1147   /* A_diag * P_loc_diag to get A*P */
1148   for (i = 0; i < am; i++) {
1149     aopnz  = aopothi[i + 1] - aopothi[i];
1150     adponz = adpoi[i + 1] - adpoi[i];
1151     adpdnz = adpdi[i + 1] - adpdi[i];
1152 
1153     /* Correct indices from A_diag*P_diag */
1154     for (i1 = 0; i1 < adpdnz; i1++) adpdJ[i1] += p_colstart;
1155     /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1156     Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1157     PetscCall(MatPreallocateSet(i + rstart, apnz, apJ, dnz, onz));
1158 
1159     aopJ += aopnz;
1160     adpoJ += adponz;
1161     adpdJ += adpdnz;
1162     apJ += apnz;
1163     api[i + 1] = api[i] + apnz;
1164   }
1165 
1166   /* malloc apa to store dense row A[i,:]*P */
1167   PetscCall(PetscCalloc1(pN + 2, &ptap->apa));
1168 
1169   /* create and assemble symbolic parallel matrix C */
1170   PetscCall(MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE));
1171   PetscCall(MatSetBlockSizesFromMats(C, A, P));
1172   PetscCall(MatGetType(A, &mtype));
1173   PetscCall(MatSetType(C, mtype));
1174   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1175   MatPreallocateEnd(dnz, onz);
1176 
1177   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
1178   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1179   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1180   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1181 
1182   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1183   C->ops->productnumeric = MatProductNumeric_AB;
1184 
1185   /* attach the supporting struct to C for reuse */
1186   C->product->data    = ptap;
1187   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
1188 
1189   /* set MatInfo */
1190   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
1191   if (afill < 1.0) afill = 1.0;
1192   C->info.mallocs           = nspacedouble;
1193   C->info.fill_ratio_given  = fill;
1194   C->info.fill_ratio_needed = afill;
1195 
1196 #if defined(PETSC_USE_INFO)
1197   if (api[am]) {
1198     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
1199     PetscCall(PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill));
1200   } else {
1201     PetscCall(PetscInfo(C, "Empty matrix product\n"));
1202   }
1203 #endif
1204 
1205   PetscCall(MatDestroy(&aopoth));
1206   PetscCall(MatDestroy(&adpd));
1207   PetscCall(PetscFree(j_temp));
1208   PetscCall(PetscFree(adpoj));
1209   PetscCall(PetscFree(adpoi));
1210   PetscFunctionReturn(PETSC_SUCCESS);
1211 }
1212 
1213 /* This routine only works when scall=MAT_REUSE_MATRIX! */
1214 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P, Mat A, Mat C)
1215 {
1216   Mat_APMPI *ptap;
1217   Mat        Pt;
1218 
1219   PetscFunctionBegin;
1220   MatCheckProduct(C, 3);
1221   ptap = (Mat_APMPI *)C->product->data;
1222   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1223   PetscCheck(ptap->Pt, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1224 
1225   Pt = ptap->Pt;
1226   PetscCall(MatTransposeSetPrecursor(P, Pt));
1227   PetscCall(MatTranspose(P, MAT_REUSE_MATRIX, &Pt));
1228   PetscCall(MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt, A, C));
1229   PetscFunctionReturn(PETSC_SUCCESS);
1230 }
1231 
1232 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1233 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, PetscReal fill, Mat C)
1234 {
1235   Mat_APMPI               *ptap;
1236   Mat_MPIAIJ              *p = (Mat_MPIAIJ *)P->data;
1237   MPI_Comm                 comm;
1238   PetscMPIInt              size, rank;
1239   PetscFreeSpaceList       free_space = NULL, current_space = NULL;
1240   PetscInt                 pn = P->cmap->n, aN = A->cmap->N, an = A->cmap->n;
1241   PetscInt                *lnk, i, k, rstart;
1242   PetscBT                  lnkbt;
1243   PetscMPIInt              tagi, tagj, *len_si, *len_s, *len_ri, nrecv, proc, nsend;
1244   PETSC_UNUSED PetscMPIInt icompleted = 0;
1245   PetscInt               **buf_rj, **buf_ri, **buf_ri_k, row, ncols, *cols;
1246   PetscInt                 len, *dnz, *onz, *owners, nzi;
1247   PetscInt                 nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1248   MPI_Request             *swaits, *rwaits;
1249   MPI_Status              *sstatus, rstatus;
1250   PetscLayout              rowmap;
1251   PetscInt                *owners_co, *coi, *coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1252   PetscMPIInt             *len_r, *id_r;          /* array of length of comm->size, store send/recv matrix values */
1253   PetscInt                *Jptr, *prmap = p->garray, con, j, Crmax;
1254   Mat_SeqAIJ              *a_loc, *c_loc, *c_oth;
1255   PetscHMapI               ta;
1256   MatType                  mtype;
1257   const char              *prefix;
1258 
1259   PetscFunctionBegin;
1260   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1261   PetscCallMPI(MPI_Comm_size(comm, &size));
1262   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1263 
1264   /* create symbolic parallel matrix C */
1265   PetscCall(MatGetType(A, &mtype));
1266   PetscCall(MatSetType(C, mtype));
1267 
1268   C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1269 
1270   /* create struct Mat_APMPI and attached it to C later */
1271   PetscCall(PetscNew(&ptap));
1272 
1273   /* (0) compute Rd = Pd^T, Ro = Po^T  */
1274   PetscCall(MatTranspose(p->A, MAT_INITIAL_MATRIX, &ptap->Rd));
1275   PetscCall(MatTranspose(p->B, MAT_INITIAL_MATRIX, &ptap->Ro));
1276 
1277   /* (1) compute symbolic A_loc */
1278   PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &ptap->A_loc));
1279 
1280   /* (2-1) compute symbolic C_oth = Ro*A_loc  */
1281   PetscCall(MatGetOptionsPrefix(A, &prefix));
1282   PetscCall(MatSetOptionsPrefix(ptap->Ro, prefix));
1283   PetscCall(MatAppendOptionsPrefix(ptap->Ro, "inner_offdiag_"));
1284   PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_oth));
1285   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro, ptap->A_loc, fill, ptap->C_oth));
1286 
1287   /* (3) send coj of C_oth to other processors  */
1288   /* determine row ownership */
1289   PetscCall(PetscLayoutCreate(comm, &rowmap));
1290   rowmap->n  = pn;
1291   rowmap->bs = 1;
1292   PetscCall(PetscLayoutSetUp(rowmap));
1293   owners = rowmap->range;
1294 
1295   /* determine the number of messages to send, their lengths */
1296   PetscCall(PetscMalloc4(size, &len_s, size, &len_si, size, &sstatus, size + 2, &owners_co));
1297   PetscCall(PetscArrayzero(len_s, size));
1298   PetscCall(PetscArrayzero(len_si, size));
1299 
1300   c_oth = (Mat_SeqAIJ *)ptap->C_oth->data;
1301   coi   = c_oth->i;
1302   coj   = c_oth->j;
1303   con   = ptap->C_oth->rmap->n;
1304   proc  = 0;
1305   for (i = 0; i < con; i++) {
1306     while (prmap[i] >= owners[proc + 1]) proc++;
1307     len_si[proc]++;                     /* num of rows in Co(=Pt*A) to be sent to [proc] */
1308     len_s[proc] += coi[i + 1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1309   }
1310 
1311   len          = 0; /* max length of buf_si[], see (4) */
1312   owners_co[0] = 0;
1313   nsend        = 0;
1314   for (proc = 0; proc < size; proc++) {
1315     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1316     if (len_s[proc]) {
1317       nsend++;
1318       len_si[proc] = 2 * (len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1319       len += len_si[proc];
1320     }
1321   }
1322 
1323   /* determine the number and length of messages to receive for coi and coj  */
1324   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &nrecv));
1325   PetscCall(PetscGatherMessageLengths2(comm, nsend, nrecv, len_s, len_si, &id_r, &len_r, &len_ri));
1326 
1327   /* post the Irecv and Isend of coj */
1328   PetscCall(PetscCommGetNewTag(comm, &tagj));
1329   PetscCall(PetscPostIrecvInt(comm, tagj, nrecv, id_r, len_r, &buf_rj, &rwaits));
1330   PetscCall(PetscMalloc1(nsend + 1, &swaits));
1331   for (proc = 0, k = 0; proc < size; proc++) {
1332     if (!len_s[proc]) continue;
1333     i = owners_co[proc];
1334     PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1335     k++;
1336   }
1337 
1338   /* (2-2) compute symbolic C_loc = Rd*A_loc */
1339   PetscCall(MatSetOptionsPrefix(ptap->Rd, prefix));
1340   PetscCall(MatAppendOptionsPrefix(ptap->Rd, "inner_diag_"));
1341   PetscCall(MatCreate(PETSC_COMM_SELF, &ptap->C_loc));
1342   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd, ptap->A_loc, fill, ptap->C_loc));
1343   c_loc = (Mat_SeqAIJ *)ptap->C_loc->data;
1344 
1345   /* receives coj are complete */
1346   for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1347   PetscCall(PetscFree(rwaits));
1348   if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));
1349 
1350   /* add received column indices into ta to update Crmax */
1351   a_loc = (Mat_SeqAIJ *)ptap->A_loc->data;
1352 
1353   /* create and initialize a linked list */
1354   PetscCall(PetscHMapICreateWithSize(an, &ta)); /* for compute Crmax */
1355   MatRowMergeMax_SeqAIJ(a_loc, ptap->A_loc->rmap->N, ta);
1356 
1357   for (k = 0; k < nrecv; k++) { /* k-th received message */
1358     Jptr = buf_rj[k];
1359     for (j = 0; j < len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1360   }
1361   PetscCall(PetscHMapIGetSize(ta, &Crmax));
1362   PetscCall(PetscHMapIDestroy(&ta));
1363 
1364   /* (4) send and recv coi */
1365   PetscCall(PetscCommGetNewTag(comm, &tagi));
1366   PetscCall(PetscPostIrecvInt(comm, tagi, nrecv, id_r, len_ri, &buf_ri, &rwaits));
1367   PetscCall(PetscMalloc1(len + 1, &buf_s));
1368   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1369   for (proc = 0, k = 0; proc < size; proc++) {
1370     if (!len_s[proc]) continue;
1371     /* form outgoing message for i-structure:
1372          buf_si[0]:                 nrows to be sent
1373                [1:nrows]:           row index (global)
1374                [nrows+1:2*nrows+1]: i-structure index
1375     */
1376     nrows       = len_si[proc] / 2 - 1; /* num of rows in Co to be sent to [proc] */
1377     buf_si_i    = buf_si + nrows + 1;
1378     buf_si[0]   = nrows;
1379     buf_si_i[0] = 0;
1380     nrows       = 0;
1381     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1382       nzi                 = coi[i + 1] - coi[i];
1383       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1384       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1385       nrows++;
1386     }
1387     PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1388     k++;
1389     buf_si += len_si[proc];
1390   }
1391   for (i = 0; i < nrecv; i++) PetscCallMPI(MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus));
1392   PetscCall(PetscFree(rwaits));
1393   if (nsend) PetscCallMPI(MPI_Waitall(nsend, swaits, sstatus));
1394 
1395   PetscCall(PetscFree4(len_s, len_si, sstatus, owners_co));
1396   PetscCall(PetscFree(len_ri));
1397   PetscCall(PetscFree(swaits));
1398   PetscCall(PetscFree(buf_s));
1399 
1400   /* (5) compute the local portion of C      */
1401   /* set initial free space to be Crmax, sufficient for holding nonzeros in each row of C */
1402   PetscCall(PetscFreeSpaceGet(Crmax, &free_space));
1403   current_space = free_space;
1404 
1405   PetscCall(PetscMalloc3(nrecv, &buf_ri_k, nrecv, &nextrow, nrecv, &nextci));
1406   for (k = 0; k < nrecv; k++) {
1407     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1408     nrows       = *buf_ri_k[k];
1409     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1410     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1411   }
1412 
1413   MatPreallocateBegin(comm, pn, an, dnz, onz);
1414   PetscCall(PetscLLCondensedCreate(Crmax, aN, &lnk, &lnkbt));
1415   for (i = 0; i < pn; i++) { /* for each local row of C */
1416     /* add C_loc into C */
1417     nzi  = c_loc->i[i + 1] - c_loc->i[i];
1418     Jptr = c_loc->j + c_loc->i[i];
1419     PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));
1420 
1421     /* add received col data into lnk */
1422     for (k = 0; k < nrecv; k++) { /* k-th received message */
1423       if (i == *nextrow[k]) {     /* i-th row */
1424         nzi  = *(nextci[k] + 1) - *nextci[k];
1425         Jptr = buf_rj[k] + *nextci[k];
1426         PetscCall(PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt));
1427         nextrow[k]++;
1428         nextci[k]++;
1429       }
1430     }
1431 
1432     /* add missing diagonal entry */
1433     if (C->force_diagonals) {
1434       k = i + owners[rank]; /* column index */
1435       PetscCall(PetscLLCondensedAddSorted(1, &k, lnk, lnkbt));
1436     }
1437 
1438     nzi = lnk[0];
1439 
1440     /* copy data into free space, then initialize lnk */
1441     PetscCall(PetscLLCondensedClean(aN, nzi, current_space->array, lnk, lnkbt));
1442     PetscCall(MatPreallocateSet(i + owners[rank], nzi, current_space->array, dnz, onz));
1443   }
1444   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1445   PetscCall(PetscLLDestroy(lnk, lnkbt));
1446   PetscCall(PetscFreeSpaceDestroy(free_space));
1447 
1448   /* local sizes and preallocation */
1449   PetscCall(MatSetSizes(C, pn, an, PETSC_DETERMINE, PETSC_DETERMINE));
1450   PetscCall(PetscLayoutSetBlockSize(C->rmap, P->cmap->bs));
1451   PetscCall(PetscLayoutSetBlockSize(C->cmap, A->cmap->bs));
1452   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
1453   MatPreallocateEnd(dnz, onz);
1454 
1455   /* add C_loc and C_oth to C */
1456   PetscCall(MatGetOwnershipRange(C, &rstart, NULL));
1457   for (i = 0; i < pn; i++) {
1458     ncols = c_loc->i[i + 1] - c_loc->i[i];
1459     cols  = c_loc->j + c_loc->i[i];
1460     row   = rstart + i;
1461     PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));
1462 
1463     if (C->force_diagonals) PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, 1, (const PetscInt *)&row, NULL, INSERT_VALUES));
1464   }
1465   for (i = 0; i < con; i++) {
1466     ncols = c_oth->i[i + 1] - c_oth->i[i];
1467     cols  = c_oth->j + c_oth->i[i];
1468     row   = prmap[i];
1469     PetscCall(MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES));
1470   }
1471   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1472   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1473   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1474 
1475   /* members in merge */
1476   PetscCall(PetscFree(id_r));
1477   PetscCall(PetscFree(len_r));
1478   PetscCall(PetscFree(buf_ri[0]));
1479   PetscCall(PetscFree(buf_ri));
1480   PetscCall(PetscFree(buf_rj[0]));
1481   PetscCall(PetscFree(buf_rj));
1482   PetscCall(PetscLayoutDestroy(&rowmap));
1483 
1484   /* attach the supporting struct to C for reuse */
1485   C->product->data    = ptap;
1486   C->product->destroy = MatDestroy_MPIAIJ_PtAP;
1487   PetscFunctionReturn(PETSC_SUCCESS);
1488 }
1489 
1490 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, Mat C)
1491 {
1492   Mat_MPIAIJ        *p = (Mat_MPIAIJ *)P->data;
1493   Mat_SeqAIJ        *c_seq;
1494   Mat_APMPI         *ptap;
1495   Mat                A_loc, C_loc, C_oth;
1496   PetscInt           i, rstart, rend, cm, ncols, row;
1497   const PetscInt    *cols;
1498   const PetscScalar *vals;
1499 
1500   PetscFunctionBegin;
1501   MatCheckProduct(C, 3);
1502   ptap = (Mat_APMPI *)C->product->data;
1503   PetscCheck(ptap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtAP cannot be computed. Missing data");
1504   PetscCheck(ptap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1505   PetscCall(MatZeroEntries(C));
1506 
1507   /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1508   /* 1) get R = Pd^T, Ro = Po^T */
1509   PetscCall(MatTransposeSetPrecursor(p->A, ptap->Rd));
1510   PetscCall(MatTranspose(p->A, MAT_REUSE_MATRIX, &ptap->Rd));
1511   PetscCall(MatTransposeSetPrecursor(p->B, ptap->Ro));
1512   PetscCall(MatTranspose(p->B, MAT_REUSE_MATRIX, &ptap->Ro));
1513 
1514   /* 2) compute numeric A_loc */
1515   PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &ptap->A_loc));
1516 
1517   /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1518   A_loc = ptap->A_loc;
1519   PetscCall(ptap->C_loc->ops->matmultnumeric(ptap->Rd, A_loc, ptap->C_loc));
1520   PetscCall(ptap->C_oth->ops->matmultnumeric(ptap->Ro, A_loc, ptap->C_oth));
1521   C_loc = ptap->C_loc;
1522   C_oth = ptap->C_oth;
1523 
1524   /* add C_loc and C_oth to C */
1525   PetscCall(MatGetOwnershipRange(C, &rstart, &rend));
1526 
1527   /* C_loc -> C */
1528   cm    = C_loc->rmap->N;
1529   c_seq = (Mat_SeqAIJ *)C_loc->data;
1530   cols  = c_seq->j;
1531   vals  = c_seq->a;
1532   for (i = 0; i < cm; i++) {
1533     ncols = c_seq->i[i + 1] - c_seq->i[i];
1534     row   = rstart + i;
1535     PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1536     cols += ncols;
1537     vals += ncols;
1538   }
1539 
1540   /* Co -> C, off-processor part */
1541   cm    = C_oth->rmap->N;
1542   c_seq = (Mat_SeqAIJ *)C_oth->data;
1543   cols  = c_seq->j;
1544   vals  = c_seq->a;
1545   for (i = 0; i < cm; i++) {
1546     ncols = c_seq->i[i + 1] - c_seq->i[i];
1547     row   = p->garray[i];
1548     PetscCall(MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES));
1549     cols += ncols;
1550     vals += ncols;
1551   }
1552   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1553   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1554   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
1555   PetscFunctionReturn(PETSC_SUCCESS);
1556 }
1557 
1558 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P, Mat A, Mat C)
1559 {
1560   Mat_Merge_SeqsToMPI *merge;
1561   Mat_MPIAIJ          *p  = (Mat_MPIAIJ *)P->data;
1562   Mat_SeqAIJ          *pd = (Mat_SeqAIJ *)p->A->data, *po = (Mat_SeqAIJ *)p->B->data;
1563   Mat_APMPI           *ap;
1564   PetscInt            *adj;
1565   PetscInt             i, j, k, anz, pnz, row, *cj, nexta;
1566   MatScalar           *ada, *ca, valtmp;
1567   PetscInt             am = A->rmap->n, cm = C->rmap->n, pon = (p->B)->cmap->n;
1568   MPI_Comm             comm;
1569   PetscMPIInt          size, rank, taga, *len_s, proc;
1570   PetscInt            *owners, nrows, **buf_ri_k, **nextrow, **nextci;
1571   PetscInt           **buf_ri, **buf_rj;
1572   PetscInt             cnz = 0, *bj_i, *bi, *bj, bnz, nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1573   MPI_Request         *s_waits, *r_waits;
1574   MPI_Status          *status;
1575   MatScalar          **abuf_r, *ba_i, *pA, *coa, *ba;
1576   const PetscScalar   *dummy;
1577   PetscInt            *ai, *aj, *coi, *coj, *poJ, *pdJ;
1578   Mat                  A_loc;
1579   Mat_SeqAIJ          *a_loc;
1580 
1581   PetscFunctionBegin;
1582   MatCheckProduct(C, 3);
1583   ap = (Mat_APMPI *)C->product->data;
1584   PetscCheck(ap, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be computed. Missing data");
1585   PetscCheck(ap->A_loc, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONGSTATE, "PtA cannot be reused. Do not call MatProductClear()");
1586   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
1587   PetscCallMPI(MPI_Comm_size(comm, &size));
1588   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1589 
1590   merge = ap->merge;
1591 
1592   /* 2) compute numeric C_seq = P_loc^T*A_loc */
1593   /* get data from symbolic products */
1594   coi = merge->coi;
1595   coj = merge->coj;
1596   PetscCall(PetscCalloc1(coi[pon] + 1, &coa));
1597   bi     = merge->bi;
1598   bj     = merge->bj;
1599   owners = merge->rowmap->range;
1600   PetscCall(PetscCalloc1(bi[cm] + 1, &ba));
1601 
1602   /* get A_loc by taking all local rows of A */
1603   A_loc = ap->A_loc;
1604   PetscCall(MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &A_loc));
1605   a_loc = (Mat_SeqAIJ *)A_loc->data;
1606   ai    = a_loc->i;
1607   aj    = a_loc->j;
1608 
1609   /* trigger copy to CPU */
1610   PetscCall(MatSeqAIJGetArrayRead(p->A, &dummy));
1611   PetscCall(MatSeqAIJRestoreArrayRead(p->A, &dummy));
1612   PetscCall(MatSeqAIJGetArrayRead(p->B, &dummy));
1613   PetscCall(MatSeqAIJRestoreArrayRead(p->B, &dummy));
1614   for (i = 0; i < am; i++) {
1615     anz = ai[i + 1] - ai[i];
1616     adj = aj + ai[i];
1617     ada = a_loc->a + ai[i];
1618 
1619     /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1620     /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1621     pnz = po->i[i + 1] - po->i[i];
1622     poJ = po->j + po->i[i];
1623     pA  = po->a + po->i[i];
1624     for (j = 0; j < pnz; j++) {
1625       row = poJ[j];
1626       cj  = coj + coi[row];
1627       ca  = coa + coi[row];
1628       /* perform sparse axpy */
1629       nexta  = 0;
1630       valtmp = pA[j];
1631       for (k = 0; nexta < anz; k++) {
1632         if (cj[k] == adj[nexta]) {
1633           ca[k] += valtmp * ada[nexta];
1634           nexta++;
1635         }
1636       }
1637       PetscCall(PetscLogFlops(2.0 * anz));
1638     }
1639 
1640     /* put the value into Cd (diagonal part) */
1641     pnz = pd->i[i + 1] - pd->i[i];
1642     pdJ = pd->j + pd->i[i];
1643     pA  = pd->a + pd->i[i];
1644     for (j = 0; j < pnz; j++) {
1645       row = pdJ[j];
1646       cj  = bj + bi[row];
1647       ca  = ba + bi[row];
1648       /* perform sparse axpy */
1649       nexta  = 0;
1650       valtmp = pA[j];
1651       for (k = 0; nexta < anz; k++) {
1652         if (cj[k] == adj[nexta]) {
1653           ca[k] += valtmp * ada[nexta];
1654           nexta++;
1655         }
1656       }
1657       PetscCall(PetscLogFlops(2.0 * anz));
1658     }
1659   }
1660 
1661   /* 3) send and recv matrix values coa */
1662   buf_ri = merge->buf_ri;
1663   buf_rj = merge->buf_rj;
1664   len_s  = merge->len_s;
1665   PetscCall(PetscCommGetNewTag(comm, &taga));
1666   PetscCall(PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits));
1667 
1668   PetscCall(PetscMalloc2(merge->nsend + 1, &s_waits, size, &status));
1669   for (proc = 0, k = 0; proc < size; proc++) {
1670     if (!len_s[proc]) continue;
1671     i = merge->owners_co[proc];
1672     PetscCallMPI(MPIU_Isend(coa + coi[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k));
1673     k++;
1674   }
1675   if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, r_waits, status));
1676   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, s_waits, status));
1677 
1678   PetscCall(PetscFree2(s_waits, status));
1679   PetscCall(PetscFree(r_waits));
1680   PetscCall(PetscFree(coa));
1681 
1682   /* 4) insert local Cseq and received values into Cmpi */
1683   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1684   for (k = 0; k < merge->nrecv; k++) {
1685     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1686     nrows       = *buf_ri_k[k];
1687     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1688     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1689   }
1690 
1691   for (i = 0; i < cm; i++) {
1692     row  = owners[rank] + i; /* global row index of C_seq */
1693     bj_i = bj + bi[i];       /* col indices of the i-th row of C */
1694     ba_i = ba + bi[i];
1695     bnz  = bi[i + 1] - bi[i];
1696     /* add received vals into ba */
1697     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1698       /* i-th row */
1699       if (i == *nextrow[k]) {
1700         cnz    = *(nextci[k] + 1) - *nextci[k];
1701         cj     = buf_rj[k] + *nextci[k];
1702         ca     = abuf_r[k] + *nextci[k];
1703         nextcj = 0;
1704         for (j = 0; nextcj < cnz; j++) {
1705           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1706             ba_i[j] += ca[nextcj++];
1707           }
1708         }
1709         nextrow[k]++;
1710         nextci[k]++;
1711         PetscCall(PetscLogFlops(2.0 * cnz));
1712       }
1713     }
1714     PetscCall(MatSetValues(C, 1, &row, bnz, bj_i, ba_i, INSERT_VALUES));
1715   }
1716   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
1717   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
1718 
1719   PetscCall(PetscFree(ba));
1720   PetscCall(PetscFree(abuf_r[0]));
1721   PetscCall(PetscFree(abuf_r));
1722   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
1723   PetscFunctionReturn(PETSC_SUCCESS);
1724 }
1725 
1726 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P, Mat A, PetscReal fill, Mat C)
1727 {
1728   Mat                  A_loc;
1729   Mat_APMPI           *ap;
1730   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
1731   Mat_MPIAIJ          *p = (Mat_MPIAIJ *)P->data, *a = (Mat_MPIAIJ *)A->data;
1732   PetscInt            *pdti, *pdtj, *poti, *potj, *ptJ;
1733   PetscInt             nnz;
1734   PetscInt            *lnk, *owners_co, *coi, *coj, i, k, pnz, row;
1735   PetscInt             am = A->rmap->n, pn = P->cmap->n;
1736   MPI_Comm             comm;
1737   PetscMPIInt          size, rank, tagi, tagj, *len_si, *len_s, *len_ri, proc;
1738   PetscInt           **buf_rj, **buf_ri, **buf_ri_k;
1739   PetscInt             len, *dnz, *onz, *owners;
1740   PetscInt             nzi, *bi, *bj;
1741   PetscInt             nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1742   MPI_Request         *swaits, *rwaits;
1743   MPI_Status          *sstatus, rstatus;
1744   Mat_Merge_SeqsToMPI *merge;
1745   PetscInt            *ai, *aj, *Jptr, anz, *prmap = p->garray, pon, nspacedouble = 0, j;
1746   PetscReal            afill  = 1.0, afill_tmp;
1747   PetscInt             rstart = P->cmap->rstart, rmax, Armax;
1748   Mat_SeqAIJ          *a_loc;
1749   PetscHMapI           ta;
1750   MatType              mtype;
1751 
1752   PetscFunctionBegin;
1753   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
1754   /* check if matrix local sizes are compatible */
1755   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, comm, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != P (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart,
1756              A->rmap->rend, P->rmap->rstart, P->rmap->rend);
1757 
1758   PetscCallMPI(MPI_Comm_size(comm, &size));
1759   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1760 
1761   /* create struct Mat_APMPI and attached it to C later */
1762   PetscCall(PetscNew(&ap));
1763 
1764   /* get A_loc by taking all local rows of A */
1765   PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &A_loc));
1766 
1767   ap->A_loc = A_loc;
1768   a_loc     = (Mat_SeqAIJ *)A_loc->data;
1769   ai        = a_loc->i;
1770   aj        = a_loc->j;
1771 
1772   /* determine symbolic Co=(p->B)^T*A - send to others */
1773   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
1774   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));
1775   pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1776                          >= (num of nonzero rows of C_seq) - pn */
1777   PetscCall(PetscMalloc1(pon + 1, &coi));
1778   coi[0] = 0;
1779 
1780   /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1781   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(poti[pon], ai[am]));
1782   PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1783   current_space = free_space;
1784 
1785   /* create and initialize a linked list */
1786   PetscCall(PetscHMapICreateWithSize(A->cmap->n + a->B->cmap->N, &ta));
1787   MatRowMergeMax_SeqAIJ(a_loc, am, ta);
1788   PetscCall(PetscHMapIGetSize(ta, &Armax));
1789 
1790   PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));
1791 
1792   for (i = 0; i < pon; i++) {
1793     pnz = poti[i + 1] - poti[i];
1794     ptJ = potj + poti[i];
1795     for (j = 0; j < pnz; j++) {
1796       row  = ptJ[j]; /* row of A_loc == col of Pot */
1797       anz  = ai[row + 1] - ai[row];
1798       Jptr = aj + ai[row];
1799       /* add non-zero cols of AP into the sorted linked list lnk */
1800       PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1801     }
1802     nnz = lnk[0];
1803 
1804     /* If free space is not available, double the total space in the list */
1805     if (current_space->local_remaining < nnz) {
1806       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space));
1807       nspacedouble++;
1808     }
1809 
1810     /* Copy data into free space, and zero out denserows */
1811     PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));
1812 
1813     current_space->array += nnz;
1814     current_space->local_used += nnz;
1815     current_space->local_remaining -= nnz;
1816 
1817     coi[i + 1] = coi[i] + nnz;
1818   }
1819 
1820   PetscCall(PetscMalloc1(coi[pon] + 1, &coj));
1821   PetscCall(PetscFreeSpaceContiguous(&free_space, coj));
1822   PetscCall(PetscLLCondensedDestroy_Scalable(lnk)); /* must destroy to get a new one for C */
1823 
1824   afill_tmp = (PetscReal)coi[pon] / (poti[pon] + ai[am] + 1);
1825   if (afill_tmp > afill) afill = afill_tmp;
1826 
1827   /* send j-array (coj) of Co to other processors */
1828   /* determine row ownership */
1829   PetscCall(PetscNew(&merge));
1830   PetscCall(PetscLayoutCreate(comm, &merge->rowmap));
1831 
1832   merge->rowmap->n  = pn;
1833   merge->rowmap->bs = 1;
1834 
1835   PetscCall(PetscLayoutSetUp(merge->rowmap));
1836   owners = merge->rowmap->range;
1837 
1838   /* determine the number of messages to send, their lengths */
1839   PetscCall(PetscCalloc1(size, &len_si));
1840   PetscCall(PetscCalloc1(size, &merge->len_s));
1841 
1842   len_s        = merge->len_s;
1843   merge->nsend = 0;
1844 
1845   PetscCall(PetscMalloc1(size + 2, &owners_co));
1846 
1847   proc = 0;
1848   for (i = 0; i < pon; i++) {
1849     while (prmap[i] >= owners[proc + 1]) proc++;
1850     len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1851     len_s[proc] += coi[i + 1] - coi[i];
1852   }
1853 
1854   len          = 0; /* max length of buf_si[] */
1855   owners_co[0] = 0;
1856   for (proc = 0; proc < size; proc++) {
1857     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1858     if (len_si[proc]) {
1859       merge->nsend++;
1860       len_si[proc] = 2 * (len_si[proc] + 1);
1861       len += len_si[proc];
1862     }
1863   }
1864 
1865   /* determine the number and length of messages to receive for coi and coj  */
1866   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv));
1867   PetscCall(PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri));
1868 
1869   /* post the Irecv and Isend of coj */
1870   PetscCall(PetscCommGetNewTag(comm, &tagj));
1871   PetscCall(PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rwaits));
1872   PetscCall(PetscMalloc1(merge->nsend + 1, &swaits));
1873   for (proc = 0, k = 0; proc < size; proc++) {
1874     if (!len_s[proc]) continue;
1875     i = owners_co[proc];
1876     PetscCallMPI(MPIU_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k));
1877     k++;
1878   }
1879 
1880   /* receives and sends of coj are complete */
1881   PetscCall(PetscMalloc1(size, &sstatus));
1882   for (i = 0; i < merge->nrecv; i++) {
1883     PETSC_UNUSED PetscMPIInt icompleted;
1884     PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1885   }
1886   PetscCall(PetscFree(rwaits));
1887   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));
1888 
1889   /* add received column indices into table to update Armax */
1890   /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1891   for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1892     Jptr = buf_rj[k];
1893     for (j = 0; j < merge->len_r[k]; j++) PetscCall(PetscHMapISet(ta, *(Jptr + j) + 1, 1));
1894   }
1895   PetscCall(PetscHMapIGetSize(ta, &Armax));
1896 
1897   /* send and recv coi */
1898   PetscCall(PetscCommGetNewTag(comm, &tagi));
1899   PetscCall(PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &rwaits));
1900   PetscCall(PetscMalloc1(len + 1, &buf_s));
1901   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1902   for (proc = 0, k = 0; proc < size; proc++) {
1903     if (!len_s[proc]) continue;
1904     /* form outgoing message for i-structure:
1905          buf_si[0]:                 nrows to be sent
1906                [1:nrows]:           row index (global)
1907                [nrows+1:2*nrows+1]: i-structure index
1908     */
1909     nrows       = len_si[proc] / 2 - 1;
1910     buf_si_i    = buf_si + nrows + 1;
1911     buf_si[0]   = nrows;
1912     buf_si_i[0] = 0;
1913     nrows       = 0;
1914     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1915       nzi                 = coi[i + 1] - coi[i];
1916       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1917       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1918       nrows++;
1919     }
1920     PetscCallMPI(MPIU_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k));
1921     k++;
1922     buf_si += len_si[proc];
1923   }
1924   i = merge->nrecv;
1925   while (i--) {
1926     PETSC_UNUSED PetscMPIInt icompleted;
1927     PetscCallMPI(MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus));
1928   }
1929   PetscCall(PetscFree(rwaits));
1930   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, swaits, sstatus));
1931   PetscCall(PetscFree(len_si));
1932   PetscCall(PetscFree(len_ri));
1933   PetscCall(PetscFree(swaits));
1934   PetscCall(PetscFree(sstatus));
1935   PetscCall(PetscFree(buf_s));
1936 
1937   /* compute the local portion of C (mpi mat) */
1938   /* allocate bi array and free space for accumulating nonzero column info */
1939   PetscCall(PetscMalloc1(pn + 1, &bi));
1940   bi[0] = 0;
1941 
1942   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1943   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(pdti[pn], PetscIntSumTruncate(poti[pon], ai[am])));
1944   PetscCall(PetscFreeSpaceGet(nnz, &free_space));
1945   current_space = free_space;
1946 
1947   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci));
1948   for (k = 0; k < merge->nrecv; k++) {
1949     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1950     nrows       = *buf_ri_k[k];
1951     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1952     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure  */
1953   }
1954 
1955   PetscCall(PetscLLCondensedCreate_Scalable(Armax, &lnk));
1956   MatPreallocateBegin(comm, pn, A->cmap->n, dnz, onz);
1957   rmax = 0;
1958   for (i = 0; i < pn; i++) {
1959     /* add pdt[i,:]*AP into lnk */
1960     pnz = pdti[i + 1] - pdti[i];
1961     ptJ = pdtj + pdti[i];
1962     for (j = 0; j < pnz; j++) {
1963       row  = ptJ[j]; /* row of AP == col of Pt */
1964       anz  = ai[row + 1] - ai[row];
1965       Jptr = aj + ai[row];
1966       /* add non-zero cols of AP into the sorted linked list lnk */
1967       PetscCall(PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk));
1968     }
1969 
1970     /* add received col data into lnk */
1971     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1972       if (i == *nextrow[k]) {            /* i-th row */
1973         nzi  = *(nextci[k] + 1) - *nextci[k];
1974         Jptr = buf_rj[k] + *nextci[k];
1975         PetscCall(PetscLLCondensedAddSorted_Scalable(nzi, Jptr, lnk));
1976         nextrow[k]++;
1977         nextci[k]++;
1978       }
1979     }
1980 
1981     /* add missing diagonal entry */
1982     if (C->force_diagonals) {
1983       k = i + owners[rank]; /* column index */
1984       PetscCall(PetscLLCondensedAddSorted_Scalable(1, &k, lnk));
1985     }
1986 
1987     nnz = lnk[0];
1988 
1989     /* if free space is not available, make more free space */
1990     if (current_space->local_remaining < nnz) {
1991       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space));
1992       nspacedouble++;
1993     }
1994     /* copy data into free space, then initialize lnk */
1995     PetscCall(PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk));
1996     PetscCall(MatPreallocateSet(i + owners[rank], nnz, current_space->array, dnz, onz));
1997 
1998     current_space->array += nnz;
1999     current_space->local_used += nnz;
2000     current_space->local_remaining -= nnz;
2001 
2002     bi[i + 1] = bi[i] + nnz;
2003     if (nnz > rmax) rmax = nnz;
2004   }
2005   PetscCall(PetscFree3(buf_ri_k, nextrow, nextci));
2006 
2007   PetscCall(PetscMalloc1(bi[pn] + 1, &bj));
2008   PetscCall(PetscFreeSpaceContiguous(&free_space, bj));
2009   afill_tmp = (PetscReal)bi[pn] / (pdti[pn] + poti[pon] + ai[am] + 1);
2010   if (afill_tmp > afill) afill = afill_tmp;
2011   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
2012   PetscCall(PetscHMapIDestroy(&ta));
2013   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj));
2014   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->B, &poti, &potj));
2015 
2016   /* create symbolic parallel matrix C - why cannot be assembled in Numeric part   */
2017   PetscCall(MatSetSizes(C, pn, A->cmap->n, PETSC_DETERMINE, PETSC_DETERMINE));
2018   PetscCall(MatSetBlockSizes(C, P->cmap->bs, A->cmap->bs));
2019   PetscCall(MatGetType(A, &mtype));
2020   PetscCall(MatSetType(C, mtype));
2021   PetscCall(MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz));
2022   MatPreallocateEnd(dnz, onz);
2023   PetscCall(MatSetBlockSize(C, 1));
2024   PetscCall(MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
2025   for (i = 0; i < pn; i++) {
2026     row  = i + rstart;
2027     nnz  = bi[i + 1] - bi[i];
2028     Jptr = bj + bi[i];
2029     PetscCall(MatSetValues(C, 1, &row, nnz, Jptr, NULL, INSERT_VALUES));
2030   }
2031   PetscCall(MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY));
2032   PetscCall(MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY));
2033   PetscCall(MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
2034   merge->bi        = bi;
2035   merge->bj        = bj;
2036   merge->coi       = coi;
2037   merge->coj       = coj;
2038   merge->buf_ri    = buf_ri;
2039   merge->buf_rj    = buf_rj;
2040   merge->owners_co = owners_co;
2041 
2042   /* attach the supporting struct to C for reuse */
2043   C->product->data    = ap;
2044   C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2045   ap->merge           = merge;
2046 
2047   C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2048 
2049 #if defined(PETSC_USE_INFO)
2050   if (bi[pn] != 0) {
2051     PetscCall(PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill));
2052     PetscCall(PetscInfo(C, "Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n", (double)afill));
2053   } else {
2054     PetscCall(PetscInfo(C, "Empty matrix product\n"));
2055   }
2056 #endif
2057   PetscFunctionReturn(PETSC_SUCCESS);
2058 }
2059 
2060 static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2061 {
2062   Mat_Product *product = C->product;
2063   Mat          A = product->A, B = product->B;
2064   PetscReal    fill = product->fill;
2065   PetscBool    flg;
2066 
2067   PetscFunctionBegin;
2068   /* scalable */
2069   PetscCall(PetscStrcmp(product->alg, "scalable", &flg));
2070   if (flg) {
2071     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C));
2072     goto next;
2073   }
2074 
2075   /* nonscalable */
2076   PetscCall(PetscStrcmp(product->alg, "nonscalable", &flg));
2077   if (flg) {
2078     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C));
2079     goto next;
2080   }
2081 
2082   /* matmatmult */
2083   PetscCall(PetscStrcmp(product->alg, "at*b", &flg));
2084   if (flg) {
2085     Mat        At;
2086     Mat_APMPI *ptap;
2087 
2088     PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &At));
2089     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(At, B, fill, C));
2090     ptap = (Mat_APMPI *)C->product->data;
2091     if (ptap) {
2092       ptap->Pt            = At;
2093       C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2094     }
2095     C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2096     goto next;
2097   }
2098 
2099   /* backend general code */
2100   PetscCall(PetscStrcmp(product->alg, "backend", &flg));
2101   if (flg) {
2102     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
2103     PetscFunctionReturn(PETSC_SUCCESS);
2104   }
2105 
2106   SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatProduct type is not supported");
2107 
2108 next:
2109   C->ops->productnumeric = MatProductNumeric_AtB;
2110   PetscFunctionReturn(PETSC_SUCCESS);
2111 }
2112 
2113 /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2114 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2115 {
2116   Mat_Product *product = C->product;
2117   Mat          A = product->A, B = product->B;
2118 #if defined(PETSC_HAVE_HYPRE)
2119   const char *algTypes[5] = {"scalable", "nonscalable", "seqmpi", "backend", "hypre"};
2120   PetscInt    nalg        = 5;
2121 #else
2122   const char *algTypes[4] = {
2123     "scalable",
2124     "nonscalable",
2125     "seqmpi",
2126     "backend",
2127   };
2128   PetscInt nalg = 4;
2129 #endif
2130   PetscInt  alg = 1; /* set nonscalable algorithm as default */
2131   PetscBool flg;
2132   MPI_Comm  comm;
2133 
2134   PetscFunctionBegin;
2135   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2136 
2137   /* Set "nonscalable" as default algorithm */
2138   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2139   if (flg) {
2140     PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2141 
2142     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2143     if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2144       MatInfo   Ainfo, Binfo;
2145       PetscInt  nz_local;
2146       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2147 
2148       PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2149       PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2150       nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2151 
2152       if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2153       PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPIU_BOOL, MPI_LOR, comm));
2154 
2155       if (alg_scalable) {
2156         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2157         PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2158         PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2159       }
2160     }
2161   }
2162 
2163   /* Get runtime option */
2164   if (product->api_user) {
2165     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat");
2166     PetscCall(PetscOptionsEList("-matmatmult_via", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2167     PetscOptionsEnd();
2168   } else {
2169     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat");
2170     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2171     PetscOptionsEnd();
2172   }
2173   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2174 
2175   C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2176   PetscFunctionReturn(PETSC_SUCCESS);
2177 }
2178 
2179 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABt(Mat C)
2180 {
2181   PetscFunctionBegin;
2182   PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2183   C->ops->productsymbolic = MatProductSymbolic_ABt_MPIAIJ_MPIAIJ;
2184   PetscFunctionReturn(PETSC_SUCCESS);
2185 }
2186 
2187 /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2188 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2189 {
2190   Mat_Product *product = C->product;
2191   Mat          A = product->A, B = product->B;
2192   const char  *algTypes[4] = {"scalable", "nonscalable", "at*b", "backend"};
2193   PetscInt     nalg        = 4;
2194   PetscInt     alg         = 1; /* set default algorithm  */
2195   PetscBool    flg;
2196   MPI_Comm     comm;
2197 
2198   PetscFunctionBegin;
2199   /* Check matrix local sizes */
2200   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2201   PetscCheck(A->rmap->rstart == B->rmap->rstart && A->rmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != B (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2202              A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);
2203 
2204   /* Set default algorithm */
2205   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2206   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2207 
2208   /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2209   if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2210     MatInfo   Ainfo, Binfo;
2211     PetscInt  nz_local;
2212     PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2213 
2214     PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2215     PetscCall(MatGetInfo(B, MAT_LOCAL, &Binfo));
2216     nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2217 
2218     if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2219     PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPIU_BOOL, MPI_LOR, comm));
2220 
2221     if (alg_scalable) {
2222       alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2223       PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2224       PetscCall(PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local)));
2225     }
2226   }
2227 
2228   /* Get runtime option */
2229   if (product->api_user) {
2230     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatTransposeMatMult", "Mat");
2231     PetscCall(PetscOptionsEList("-mattransposematmult_via", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2232     PetscOptionsEnd();
2233   } else {
2234     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AtB", "Mat");
2235     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2236     PetscOptionsEnd();
2237   }
2238   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2239 
2240   C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2241   PetscFunctionReturn(PETSC_SUCCESS);
2242 }
2243 
2244 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2245 {
2246   Mat_Product *product = C->product;
2247   Mat          A = product->A, P = product->B;
2248   MPI_Comm     comm;
2249   PetscBool    flg;
2250   PetscInt     alg = 1; /* set default algorithm */
2251 #if !defined(PETSC_HAVE_HYPRE)
2252   const char *algTypes[5] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend"};
2253   PetscInt    nalg        = 5;
2254 #else
2255   const char *algTypes[6] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend", "hypre"};
2256   PetscInt    nalg        = 6;
2257 #endif
2258   PetscInt pN = P->cmap->N;
2259 
2260   PetscFunctionBegin;
2261   /* Check matrix local sizes */
2262   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
2263   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Arow (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2264              A->rmap->rstart, A->rmap->rend, P->rmap->rstart, P->rmap->rend);
2265   PetscCheck(A->cmap->rstart == P->rmap->rstart && A->cmap->rend == P->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, Acol (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",
2266              A->cmap->rstart, A->cmap->rend, P->rmap->rstart, P->rmap->rend);
2267 
2268   /* Set "nonscalable" as default algorithm */
2269   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2270   if (flg) {
2271     PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2272 
2273     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2274     if (pN > 100000) {
2275       MatInfo   Ainfo, Pinfo;
2276       PetscInt  nz_local;
2277       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;
2278 
2279       PetscCall(MatGetInfo(A, MAT_LOCAL, &Ainfo));
2280       PetscCall(MatGetInfo(P, MAT_LOCAL, &Pinfo));
2281       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2282 
2283       if (pN > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2284       PetscCallMPI(MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPIU_BOOL, MPI_LOR, comm));
2285 
2286       if (alg_scalable) {
2287         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2288         PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2289       }
2290     }
2291   }
2292 
2293   /* Get runtime option */
2294   if (product->api_user) {
2295     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat");
2296     PetscCall(PetscOptionsEList("-matptap_via", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2297     PetscOptionsEnd();
2298   } else {
2299     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat");
2300     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg));
2301     PetscOptionsEnd();
2302   }
2303   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2304 
2305   C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2306   PetscFunctionReturn(PETSC_SUCCESS);
2307 }
2308 
2309 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2310 {
2311   Mat_Product *product = C->product;
2312   Mat          A = product->A, R = product->B;
2313 
2314   PetscFunctionBegin;
2315   /* Check matrix local sizes */
2316   PetscCheck(A->cmap->n == R->cmap->n && A->rmap->n == R->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, A local (%" PetscInt_FMT ", %" PetscInt_FMT "), R local (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->n,
2317              A->rmap->n, R->rmap->n, R->cmap->n);
2318 
2319   C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2320   PetscFunctionReturn(PETSC_SUCCESS);
2321 }
2322 
2323 /*
2324  Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2325 */
2326 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2327 {
2328   Mat_Product *product     = C->product;
2329   PetscBool    flg         = PETSC_FALSE;
2330   PetscInt     alg         = 1; /* default algorithm */
2331   const char  *algTypes[3] = {"scalable", "nonscalable", "seqmpi"};
2332   PetscInt     nalg        = 3;
2333 
2334   PetscFunctionBegin;
2335   /* Set default algorithm */
2336   PetscCall(PetscStrcmp(C->product->alg, "default", &flg));
2337   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2338 
2339   /* Get runtime option */
2340   if (product->api_user) {
2341     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMatMult", "Mat");
2342     PetscCall(PetscOptionsEList("-matmatmatmult_via", "Algorithmic approach", "MatMatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg));
2343     PetscOptionsEnd();
2344   } else {
2345     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_ABC", "Mat");
2346     PetscCall(PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatProduct_ABC", algTypes, nalg, algTypes[alg], &alg, &flg));
2347     PetscOptionsEnd();
2348   }
2349   if (flg) PetscCall(MatProductSetAlgorithm(C, algTypes[alg]));
2350 
2351   C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2352   C->ops->productsymbolic    = MatProductSymbolic_ABC;
2353   PetscFunctionReturn(PETSC_SUCCESS);
2354 }
2355 
2356 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2357 {
2358   Mat_Product *product = C->product;
2359 
2360   PetscFunctionBegin;
2361   switch (product->type) {
2362   case MATPRODUCT_AB:
2363     PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2364     break;
2365   case MATPRODUCT_ABt:
2366     PetscCall(MatProductSetFromOptions_MPIAIJ_ABt(C));
2367     break;
2368   case MATPRODUCT_AtB:
2369     PetscCall(MatProductSetFromOptions_MPIAIJ_AtB(C));
2370     break;
2371   case MATPRODUCT_PtAP:
2372     PetscCall(MatProductSetFromOptions_MPIAIJ_PtAP(C));
2373     break;
2374   case MATPRODUCT_RARt:
2375     PetscCall(MatProductSetFromOptions_MPIAIJ_RARt(C));
2376     break;
2377   case MATPRODUCT_ABC:
2378     PetscCall(MatProductSetFromOptions_MPIAIJ_ABC(C));
2379     break;
2380   default:
2381     break;
2382   }
2383   PetscFunctionReturn(PETSC_SUCCESS);
2384 }
2385