xref: /petsc/src/mat/impls/aij/mpi/mpiaij.c (revision 1777c8a54be4bdf32cfa56ef79c2513d44e9bbec)
1 #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/
2 #include <petsc/private/vecimpl.h>
3 #include <petsc/private/sfimpl.h>
4 #include <petsc/private/isimpl.h>
5 #include <petscblaslapack.h>
6 #include <petscsf.h>
7 #include <petsc/private/hashmapi.h>
8 
9 PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
10 {
11   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
12 
13   PetscFunctionBegin;
14   PetscCall(PetscLogObjectState((PetscObject)mat, "Rows=%" PetscInt_FMT ", Cols=%" PetscInt_FMT, mat->rmap->N, mat->cmap->N));
15   PetscCall(MatStashDestroy_Private(&mat->stash));
16   PetscCall(VecDestroy(&aij->diag));
17   PetscCall(MatDestroy(&aij->A));
18   PetscCall(MatDestroy(&aij->B));
19 #if defined(PETSC_USE_CTABLE)
20   PetscCall(PetscHMapIDestroy(&aij->colmap));
21 #else
22   PetscCall(PetscFree(aij->colmap));
23 #endif
24   PetscCall(PetscFree(aij->garray));
25   PetscCall(VecDestroy(&aij->lvec));
26   PetscCall(VecScatterDestroy(&aij->Mvctx));
27   PetscCall(PetscFree2(aij->rowvalues, aij->rowindices));
28   PetscCall(PetscFree(aij->ld));
29 
30   PetscCall(PetscFree(mat->data));
31 
32   /* may be created by MatCreateMPIAIJSumSeqAIJSymbolic */
33   PetscCall(PetscObjectCompose((PetscObject)mat, "MatMergeSeqsToMPI", NULL));
34 
35   PetscCall(PetscObjectChangeTypeName((PetscObject)mat, NULL));
36   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatStoreValues_C", NULL));
37   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatRetrieveValues_C", NULL));
38   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatIsTranspose_C", NULL));
39   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPIAIJSetPreallocation_C", NULL));
40   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatResetPreallocation_C", NULL));
41   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPIAIJSetPreallocationCSR_C", NULL));
42   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatDiagonalScaleLocal_C", NULL));
43   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpibaij_C", NULL));
44   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpisbaij_C", NULL));
45 #if defined(PETSC_HAVE_CUDA)
46   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijcusparse_C", NULL));
47 #endif
48 #if defined(PETSC_HAVE_HIP)
49   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijhipsparse_C", NULL));
50 #endif
51 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
52   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijkokkos_C", NULL));
53 #endif
54   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpidense_C", NULL));
55 #if defined(PETSC_HAVE_ELEMENTAL)
56   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_elemental_C", NULL));
57 #endif
58 #if defined(PETSC_HAVE_SCALAPACK)
59   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_scalapack_C", NULL));
60 #endif
61 #if defined(PETSC_HAVE_HYPRE)
62   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_hypre_C", NULL));
63   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatProductSetFromOptions_transpose_mpiaij_mpiaij_C", NULL));
64 #endif
65   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_is_C", NULL));
66   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatProductSetFromOptions_is_mpiaij_C", NULL));
67   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatProductSetFromOptions_mpiaij_mpiaij_C", NULL));
68   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPIAIJSetUseScalableIncreaseOverlap_C", NULL));
69   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijperm_C", NULL));
70   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijsell_C", NULL));
71 #if defined(PETSC_HAVE_MKL_SPARSE)
72   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijmkl_C", NULL));
73 #endif
74   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpiaijcrl_C", NULL));
75   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_is_C", NULL));
76   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpiaij_mpisell_C", NULL));
77   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatSetPreallocationCOO_C", NULL));
78   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatSetValuesCOO_C", NULL));
79   PetscFunctionReturn(PETSC_SUCCESS);
80 }
81 
82 /* defines MatSetValues_MPI_Hash(), MatAssemblyBegin_MPI_Hash(), and  MatAssemblyEnd_MPI_Hash() */
83 #define TYPE AIJ
84 #define TYPE_AIJ
85 #include "../src/mat/impls/aij/mpi/mpihashmat.h"
86 #undef TYPE
87 #undef TYPE_AIJ
88 
89 static PetscErrorCode MatGetRowIJ_MPIAIJ(Mat A, PetscInt oshift, PetscBool symmetric, PetscBool inodecompressed, PetscInt *m, const PetscInt *ia[], const PetscInt *ja[], PetscBool *done)
90 {
91   Mat B;
92 
93   PetscFunctionBegin;
94   PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &B));
95   PetscCall(PetscObjectCompose((PetscObject)A, "MatGetRowIJ_MPIAIJ", (PetscObject)B));
96   PetscCall(MatGetRowIJ(B, oshift, symmetric, inodecompressed, m, ia, ja, done));
97   PetscCall(MatDestroy(&B));
98   PetscFunctionReturn(PETSC_SUCCESS);
99 }
100 
101 static PetscErrorCode MatRestoreRowIJ_MPIAIJ(Mat A, PetscInt oshift, PetscBool symmetric, PetscBool inodecompressed, PetscInt *m, const PetscInt *ia[], const PetscInt *ja[], PetscBool *done)
102 {
103   Mat B;
104 
105   PetscFunctionBegin;
106   PetscCall(PetscObjectQuery((PetscObject)A, "MatGetRowIJ_MPIAIJ", (PetscObject *)&B));
107   PetscCall(MatRestoreRowIJ(B, oshift, symmetric, inodecompressed, m, ia, ja, done));
108   PetscCall(PetscObjectCompose((PetscObject)A, "MatGetRowIJ_MPIAIJ", NULL));
109   PetscFunctionReturn(PETSC_SUCCESS);
110 }
111 
112 /*MC
113    MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.
114 
115    This matrix type is identical to` MATSEQAIJ` when constructed with a single process communicator,
116    and `MATMPIAIJ` otherwise.  As a result, for single process communicators,
117   `MatSeqAIJSetPreallocation()` is supported, and similarly `MatMPIAIJSetPreallocation()` is supported
118   for communicators controlling multiple processes.  It is recommended that you call both of
119   the above preallocation routines for simplicity.
120 
121    Options Database Key:
122 . -mat_type aij - sets the matrix type to `MATAIJ` during a call to `MatSetFromOptions()`
123 
124   Developer Note:
125   Level: beginner
126 
127     Subclasses include `MATAIJCUSPARSE`, `MATAIJPERM`, `MATAIJSELL`, `MATAIJMKL`, `MATAIJCRL`, `MATAIJKOKKOS`,and also automatically switches over to use inodes when
128    enough exist.
129 
130 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MATSEQAIJ`, `MatCreateAIJ()`, `MatCreateSeqAIJ()`, `MATSEQAIJ`, `MATMPIAIJ`
131 M*/
132 
133 /*MC
134    MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.
135 
136    This matrix type is identical to `MATSEQAIJCRL` when constructed with a single process communicator,
137    and `MATMPIAIJCRL` otherwise.  As a result, for single process communicators,
138    `MatSeqAIJSetPreallocation()` is supported, and similarly `MatMPIAIJSetPreallocation()` is supported
139   for communicators controlling multiple processes.  It is recommended that you call both of
140   the above preallocation routines for simplicity.
141 
142    Options Database Key:
143 . -mat_type aijcrl - sets the matrix type to `MATMPIAIJCRL` during a call to `MatSetFromOptions()`
144 
145   Level: beginner
146 
147 .seealso: [](ch_matrices), `Mat`, `MatCreateMPIAIJCRL`, `MATSEQAIJCRL`, `MATMPIAIJCRL`, `MATSEQAIJCRL`, `MATMPIAIJCRL`
148 M*/
149 
150 static PetscErrorCode MatBindToCPU_MPIAIJ(Mat A, PetscBool flg)
151 {
152   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
153 
154   PetscFunctionBegin;
155 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP) || defined(PETSC_HAVE_VIENNACL)
156   A->boundtocpu = flg;
157 #endif
158   if (a->A) PetscCall(MatBindToCPU(a->A, flg));
159   if (a->B) PetscCall(MatBindToCPU(a->B, flg));
160 
161   /* In addition to binding the diagonal and off-diagonal matrices, bind the local vectors used for matrix-vector products.
162    * This maybe seems a little odd for a MatBindToCPU() call to do, but it makes no sense for the binding of these vectors
163    * to differ from the parent matrix. */
164   if (a->lvec) PetscCall(VecBindToCPU(a->lvec, flg));
165   if (a->diag) PetscCall(VecBindToCPU(a->diag, flg));
166 
167   PetscFunctionReturn(PETSC_SUCCESS);
168 }
169 
170 static PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
171 {
172   Mat_MPIAIJ *mat = (Mat_MPIAIJ *)M->data;
173 
174   PetscFunctionBegin;
175   if (mat->A) {
176     PetscCall(MatSetBlockSizes(mat->A, rbs, cbs));
177     PetscCall(MatSetBlockSizes(mat->B, rbs, 1));
178   }
179   PetscFunctionReturn(PETSC_SUCCESS);
180 }
181 
182 static PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M, IS *keptrows)
183 {
184   Mat_MPIAIJ      *mat = (Mat_MPIAIJ *)M->data;
185   Mat_SeqAIJ      *a   = (Mat_SeqAIJ *)mat->A->data;
186   Mat_SeqAIJ      *b   = (Mat_SeqAIJ *)mat->B->data;
187   const PetscInt  *ia, *ib;
188   const MatScalar *aa, *bb, *aav, *bav;
189   PetscInt         na, nb, i, j, *rows, cnt = 0, n0rows;
190   PetscInt         m = M->rmap->n, rstart = M->rmap->rstart;
191 
192   PetscFunctionBegin;
193   *keptrows = NULL;
194 
195   ia = a->i;
196   ib = b->i;
197   PetscCall(MatSeqAIJGetArrayRead(mat->A, &aav));
198   PetscCall(MatSeqAIJGetArrayRead(mat->B, &bav));
199   for (i = 0; i < m; i++) {
200     na = ia[i + 1] - ia[i];
201     nb = ib[i + 1] - ib[i];
202     if (!na && !nb) {
203       cnt++;
204       goto ok1;
205     }
206     aa = aav + ia[i];
207     for (j = 0; j < na; j++) {
208       if (aa[j] != 0.0) goto ok1;
209     }
210     bb = bav ? bav + ib[i] : NULL;
211     for (j = 0; j < nb; j++) {
212       if (bb[j] != 0.0) goto ok1;
213     }
214     cnt++;
215   ok1:;
216   }
217   PetscCall(MPIU_Allreduce(&cnt, &n0rows, 1, MPIU_INT, MPI_SUM, PetscObjectComm((PetscObject)M)));
218   if (!n0rows) {
219     PetscCall(MatSeqAIJRestoreArrayRead(mat->A, &aav));
220     PetscCall(MatSeqAIJRestoreArrayRead(mat->B, &bav));
221     PetscFunctionReturn(PETSC_SUCCESS);
222   }
223   PetscCall(PetscMalloc1(M->rmap->n - cnt, &rows));
224   cnt = 0;
225   for (i = 0; i < m; i++) {
226     na = ia[i + 1] - ia[i];
227     nb = ib[i + 1] - ib[i];
228     if (!na && !nb) continue;
229     aa = aav + ia[i];
230     for (j = 0; j < na; j++) {
231       if (aa[j] != 0.0) {
232         rows[cnt++] = rstart + i;
233         goto ok2;
234       }
235     }
236     bb = bav ? bav + ib[i] : NULL;
237     for (j = 0; j < nb; j++) {
238       if (bb[j] != 0.0) {
239         rows[cnt++] = rstart + i;
240         goto ok2;
241       }
242     }
243   ok2:;
244   }
245   PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)M), cnt, rows, PETSC_OWN_POINTER, keptrows));
246   PetscCall(MatSeqAIJRestoreArrayRead(mat->A, &aav));
247   PetscCall(MatSeqAIJRestoreArrayRead(mat->B, &bav));
248   PetscFunctionReturn(PETSC_SUCCESS);
249 }
250 
251 static PetscErrorCode MatDiagonalSet_MPIAIJ(Mat Y, Vec D, InsertMode is)
252 {
253   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)Y->data;
254   PetscBool   cong;
255 
256   PetscFunctionBegin;
257   PetscCall(MatHasCongruentLayouts(Y, &cong));
258   if (Y->assembled && cong) {
259     PetscCall(MatDiagonalSet(aij->A, D, is));
260   } else {
261     PetscCall(MatDiagonalSet_Default(Y, D, is));
262   }
263   PetscFunctionReturn(PETSC_SUCCESS);
264 }
265 
266 static PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M, IS *zrows)
267 {
268   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)M->data;
269   PetscInt    i, rstart, nrows, *rows;
270 
271   PetscFunctionBegin;
272   *zrows = NULL;
273   PetscCall(MatFindZeroDiagonals_SeqAIJ_Private(aij->A, &nrows, &rows));
274   PetscCall(MatGetOwnershipRange(M, &rstart, NULL));
275   for (i = 0; i < nrows; i++) rows[i] += rstart;
276   PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)M), nrows, rows, PETSC_OWN_POINTER, zrows));
277   PetscFunctionReturn(PETSC_SUCCESS);
278 }
279 
280 static PetscErrorCode MatGetColumnReductions_MPIAIJ(Mat A, PetscInt type, PetscReal *reductions)
281 {
282   Mat_MPIAIJ        *aij = (Mat_MPIAIJ *)A->data;
283   PetscInt           i, m, n, *garray = aij->garray;
284   Mat_SeqAIJ        *a_aij = (Mat_SeqAIJ *)aij->A->data;
285   Mat_SeqAIJ        *b_aij = (Mat_SeqAIJ *)aij->B->data;
286   PetscReal         *work;
287   const PetscScalar *dummy;
288 
289   PetscFunctionBegin;
290   PetscCall(MatGetSize(A, &m, &n));
291   PetscCall(PetscCalloc1(n, &work));
292   PetscCall(MatSeqAIJGetArrayRead(aij->A, &dummy));
293   PetscCall(MatSeqAIJRestoreArrayRead(aij->A, &dummy));
294   PetscCall(MatSeqAIJGetArrayRead(aij->B, &dummy));
295   PetscCall(MatSeqAIJRestoreArrayRead(aij->B, &dummy));
296   if (type == NORM_2) {
297     for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i] * a_aij->a[i]);
298     for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i] * b_aij->a[i]);
299   } else if (type == NORM_1) {
300     for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
301     for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
302   } else if (type == NORM_INFINITY) {
303     for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
304     for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]), work[garray[b_aij->j[i]]]);
305   } else if (type == REDUCTION_SUM_REALPART || type == REDUCTION_MEAN_REALPART) {
306     for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscRealPart(a_aij->a[i]);
307     for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscRealPart(b_aij->a[i]);
308   } else if (type == REDUCTION_SUM_IMAGINARYPART || type == REDUCTION_MEAN_IMAGINARYPART) {
309     for (i = 0; i < a_aij->i[aij->A->rmap->n]; i++) work[A->cmap->rstart + a_aij->j[i]] += PetscImaginaryPart(a_aij->a[i]);
310     for (i = 0; i < b_aij->i[aij->B->rmap->n]; i++) work[garray[b_aij->j[i]]] += PetscImaginaryPart(b_aij->a[i]);
311   } else SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Unknown reduction type");
312   if (type == NORM_INFINITY) {
313     PetscCall(MPIU_Allreduce(work, reductions, n, MPIU_REAL, MPIU_MAX, PetscObjectComm((PetscObject)A)));
314   } else {
315     PetscCall(MPIU_Allreduce(work, reductions, n, MPIU_REAL, MPIU_SUM, PetscObjectComm((PetscObject)A)));
316   }
317   PetscCall(PetscFree(work));
318   if (type == NORM_2) {
319     for (i = 0; i < n; i++) reductions[i] = PetscSqrtReal(reductions[i]);
320   } else if (type == REDUCTION_MEAN_REALPART || type == REDUCTION_MEAN_IMAGINARYPART) {
321     for (i = 0; i < n; i++) reductions[i] /= m;
322   }
323   PetscFunctionReturn(PETSC_SUCCESS);
324 }
325 
326 static PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A, IS *is)
327 {
328   Mat_MPIAIJ     *a = (Mat_MPIAIJ *)A->data;
329   IS              sis, gis;
330   const PetscInt *isis, *igis;
331   PetscInt        n, *iis, nsis, ngis, rstart, i;
332 
333   PetscFunctionBegin;
334   PetscCall(MatFindOffBlockDiagonalEntries(a->A, &sis));
335   PetscCall(MatFindNonzeroRows(a->B, &gis));
336   PetscCall(ISGetSize(gis, &ngis));
337   PetscCall(ISGetSize(sis, &nsis));
338   PetscCall(ISGetIndices(sis, &isis));
339   PetscCall(ISGetIndices(gis, &igis));
340 
341   PetscCall(PetscMalloc1(ngis + nsis, &iis));
342   PetscCall(PetscArraycpy(iis, igis, ngis));
343   PetscCall(PetscArraycpy(iis + ngis, isis, nsis));
344   n = ngis + nsis;
345   PetscCall(PetscSortRemoveDupsInt(&n, iis));
346   PetscCall(MatGetOwnershipRange(A, &rstart, NULL));
347   for (i = 0; i < n; i++) iis[i] += rstart;
348   PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)A), n, iis, PETSC_OWN_POINTER, is));
349 
350   PetscCall(ISRestoreIndices(sis, &isis));
351   PetscCall(ISRestoreIndices(gis, &igis));
352   PetscCall(ISDestroy(&sis));
353   PetscCall(ISDestroy(&gis));
354   PetscFunctionReturn(PETSC_SUCCESS);
355 }
356 
357 /*
358   Local utility routine that creates a mapping from the global column
359 number to the local number in the off-diagonal part of the local
360 storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
361 a slightly higher hash table cost; without it it is not scalable (each processor
362 has an order N integer array but is fast to access.
363 */
364 PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
365 {
366   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
367   PetscInt    n   = aij->B->cmap->n, i;
368 
369   PetscFunctionBegin;
370   PetscCheck(!n || aij->garray, PETSC_COMM_SELF, PETSC_ERR_PLIB, "MPIAIJ Matrix was assembled but is missing garray");
371 #if defined(PETSC_USE_CTABLE)
372   PetscCall(PetscHMapICreateWithSize(n, &aij->colmap));
373   for (i = 0; i < n; i++) PetscCall(PetscHMapISet(aij->colmap, aij->garray[i] + 1, i + 1));
374 #else
375   PetscCall(PetscCalloc1(mat->cmap->N + 1, &aij->colmap));
376   for (i = 0; i < n; i++) aij->colmap[aij->garray[i]] = i + 1;
377 #endif
378   PetscFunctionReturn(PETSC_SUCCESS);
379 }
380 
381 #define MatSetValues_SeqAIJ_A_Private(row, col, value, addv, orow, ocol) \
382   do { \
383     if (col <= lastcol1) low1 = 0; \
384     else high1 = nrow1; \
385     lastcol1 = col; \
386     while (high1 - low1 > 5) { \
387       t = (low1 + high1) / 2; \
388       if (rp1[t] > col) high1 = t; \
389       else low1 = t; \
390     } \
391     for (_i = low1; _i < high1; _i++) { \
392       if (rp1[_i] > col) break; \
393       if (rp1[_i] == col) { \
394         if (addv == ADD_VALUES) { \
395           ap1[_i] += value; \
396           /* Not sure LogFlops will slow dow the code or not */ \
397           (void)PetscLogFlops(1.0); \
398         } else ap1[_i] = value; \
399         goto a_noinsert; \
400       } \
401     } \
402     if (value == 0.0 && ignorezeroentries && row != col) { \
403       low1  = 0; \
404       high1 = nrow1; \
405       goto a_noinsert; \
406     } \
407     if (nonew == 1) { \
408       low1  = 0; \
409       high1 = nrow1; \
410       goto a_noinsert; \
411     } \
412     PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
413     MatSeqXAIJReallocateAIJ(A, am, 1, nrow1, row, col, rmax1, aa, ai, aj, rp1, ap1, aimax, nonew, MatScalar); \
414     N = nrow1++ - 1; \
415     a->nz++; \
416     high1++; \
417     /* shift up all the later entries in this row */ \
418     PetscCall(PetscArraymove(rp1 + _i + 1, rp1 + _i, N - _i + 1)); \
419     PetscCall(PetscArraymove(ap1 + _i + 1, ap1 + _i, N - _i + 1)); \
420     rp1[_i] = col; \
421     ap1[_i] = value; \
422     A->nonzerostate++; \
423   a_noinsert:; \
424     ailen[row] = nrow1; \
425   } while (0)
426 
427 #define MatSetValues_SeqAIJ_B_Private(row, col, value, addv, orow, ocol) \
428   do { \
429     if (col <= lastcol2) low2 = 0; \
430     else high2 = nrow2; \
431     lastcol2 = col; \
432     while (high2 - low2 > 5) { \
433       t = (low2 + high2) / 2; \
434       if (rp2[t] > col) high2 = t; \
435       else low2 = t; \
436     } \
437     for (_i = low2; _i < high2; _i++) { \
438       if (rp2[_i] > col) break; \
439       if (rp2[_i] == col) { \
440         if (addv == ADD_VALUES) { \
441           ap2[_i] += value; \
442           (void)PetscLogFlops(1.0); \
443         } else ap2[_i] = value; \
444         goto b_noinsert; \
445       } \
446     } \
447     if (value == 0.0 && ignorezeroentries) { \
448       low2  = 0; \
449       high2 = nrow2; \
450       goto b_noinsert; \
451     } \
452     if (nonew == 1) { \
453       low2  = 0; \
454       high2 = nrow2; \
455       goto b_noinsert; \
456     } \
457     PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
458     MatSeqXAIJReallocateAIJ(B, bm, 1, nrow2, row, col, rmax2, ba, bi, bj, rp2, ap2, bimax, nonew, MatScalar); \
459     N = nrow2++ - 1; \
460     b->nz++; \
461     high2++; \
462     /* shift up all the later entries in this row */ \
463     PetscCall(PetscArraymove(rp2 + _i + 1, rp2 + _i, N - _i + 1)); \
464     PetscCall(PetscArraymove(ap2 + _i + 1, ap2 + _i, N - _i + 1)); \
465     rp2[_i] = col; \
466     ap2[_i] = value; \
467     B->nonzerostate++; \
468   b_noinsert:; \
469     bilen[row] = nrow2; \
470   } while (0)
471 
472 static PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A, PetscInt row, const PetscScalar v[])
473 {
474   Mat_MPIAIJ  *mat = (Mat_MPIAIJ *)A->data;
475   Mat_SeqAIJ  *a = (Mat_SeqAIJ *)mat->A->data, *b = (Mat_SeqAIJ *)mat->B->data;
476   PetscInt     l, *garray                         = mat->garray, diag;
477   PetscScalar *aa, *ba;
478 
479   PetscFunctionBegin;
480   /* code only works for square matrices A */
481 
482   /* find size of row to the left of the diagonal part */
483   PetscCall(MatGetOwnershipRange(A, &diag, NULL));
484   row = row - diag;
485   for (l = 0; l < b->i[row + 1] - b->i[row]; l++) {
486     if (garray[b->j[b->i[row] + l]] > diag) break;
487   }
488   if (l) {
489     PetscCall(MatSeqAIJGetArray(mat->B, &ba));
490     PetscCall(PetscArraycpy(ba + b->i[row], v, l));
491     PetscCall(MatSeqAIJRestoreArray(mat->B, &ba));
492   }
493 
494   /* diagonal part */
495   if (a->i[row + 1] - a->i[row]) {
496     PetscCall(MatSeqAIJGetArray(mat->A, &aa));
497     PetscCall(PetscArraycpy(aa + a->i[row], v + l, (a->i[row + 1] - a->i[row])));
498     PetscCall(MatSeqAIJRestoreArray(mat->A, &aa));
499   }
500 
501   /* right of diagonal part */
502   if (b->i[row + 1] - b->i[row] - l) {
503     PetscCall(MatSeqAIJGetArray(mat->B, &ba));
504     PetscCall(PetscArraycpy(ba + b->i[row] + l, v + l + a->i[row + 1] - a->i[row], b->i[row + 1] - b->i[row] - l));
505     PetscCall(MatSeqAIJRestoreArray(mat->B, &ba));
506   }
507   PetscFunctionReturn(PETSC_SUCCESS);
508 }
509 
510 PetscErrorCode MatSetValues_MPIAIJ(Mat mat, PetscInt m, const PetscInt im[], PetscInt n, const PetscInt in[], const PetscScalar v[], InsertMode addv)
511 {
512   Mat_MPIAIJ *aij   = (Mat_MPIAIJ *)mat->data;
513   PetscScalar value = 0.0;
514   PetscInt    i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
515   PetscInt    cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
516   PetscBool   roworiented = aij->roworiented;
517 
518   /* Some Variables required in the macro */
519   Mat         A     = aij->A;
520   Mat_SeqAIJ *a     = (Mat_SeqAIJ *)A->data;
521   PetscInt   *aimax = a->imax, *ai = a->i, *ailen = a->ilen, *aj = a->j;
522   PetscBool   ignorezeroentries = a->ignorezeroentries;
523   Mat         B                 = aij->B;
524   Mat_SeqAIJ *b                 = (Mat_SeqAIJ *)B->data;
525   PetscInt   *bimax = b->imax, *bi = b->i, *bilen = b->ilen, *bj = b->j, bm = aij->B->rmap->n, am = aij->A->rmap->n;
526   MatScalar  *aa, *ba;
527   PetscInt   *rp1, *rp2, ii, nrow1, nrow2, _i, rmax1, rmax2, N, low1, high1, low2, high2, t, lastcol1, lastcol2;
528   PetscInt    nonew;
529   MatScalar  *ap1, *ap2;
530 
531   PetscFunctionBegin;
532   PetscCall(MatSeqAIJGetArray(A, &aa));
533   PetscCall(MatSeqAIJGetArray(B, &ba));
534   for (i = 0; i < m; i++) {
535     if (im[i] < 0) continue;
536     PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1);
537     if (im[i] >= rstart && im[i] < rend) {
538       row      = im[i] - rstart;
539       lastcol1 = -1;
540       rp1      = aj ? aj + ai[row] : NULL;
541       ap1      = aa ? aa + ai[row] : NULL;
542       rmax1    = aimax[row];
543       nrow1    = ailen[row];
544       low1     = 0;
545       high1    = nrow1;
546       lastcol2 = -1;
547       rp2      = bj ? bj + bi[row] : NULL;
548       ap2      = ba ? ba + bi[row] : NULL;
549       rmax2    = bimax[row];
550       nrow2    = bilen[row];
551       low2     = 0;
552       high2    = nrow2;
553 
554       for (j = 0; j < n; j++) {
555         if (v) value = roworiented ? v[i * n + j] : v[i + j * m];
556         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
557         if (in[j] >= cstart && in[j] < cend) {
558           col   = in[j] - cstart;
559           nonew = a->nonew;
560           MatSetValues_SeqAIJ_A_Private(row, col, value, addv, im[i], in[j]);
561         } else if (in[j] < 0) {
562           continue;
563         } else {
564           PetscCheck(in[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1);
565           if (mat->was_assembled) {
566             if (!aij->colmap) PetscCall(MatCreateColmap_MPIAIJ_Private(mat));
567 #if defined(PETSC_USE_CTABLE)
568             PetscCall(PetscHMapIGetWithDefault(aij->colmap, in[j] + 1, 0, &col)); /* map global col ids to local ones */
569             col--;
570 #else
571             col = aij->colmap[in[j]] - 1;
572 #endif
573             if (col < 0 && !((Mat_SeqAIJ *)(aij->B->data))->nonew) { /* col < 0 means in[j] is a new col for B */
574               PetscCall(MatDisAssemble_MPIAIJ(mat));                 /* Change aij->B from reduced/local format to expanded/global format */
575               col = in[j];
576               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
577               B     = aij->B;
578               b     = (Mat_SeqAIJ *)B->data;
579               bimax = b->imax;
580               bi    = b->i;
581               bilen = b->ilen;
582               bj    = b->j;
583               ba    = b->a;
584               rp2   = bj + bi[row];
585               ap2   = ba + bi[row];
586               rmax2 = bimax[row];
587               nrow2 = bilen[row];
588               low2  = 0;
589               high2 = nrow2;
590               bm    = aij->B->rmap->n;
591               ba    = b->a;
592             } else if (col < 0 && !(ignorezeroentries && value == 0.0)) {
593               if (1 == ((Mat_SeqAIJ *)(aij->B->data))->nonew) {
594                 PetscCall(PetscInfo(mat, "Skipping of insertion of new nonzero location in off-diagonal portion of matrix %g(%" PetscInt_FMT ",%" PetscInt_FMT ")\n", (double)PetscRealPart(value), im[i], in[j]));
595               } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", im[i], in[j]);
596             }
597           } else col = in[j];
598           nonew = b->nonew;
599           MatSetValues_SeqAIJ_B_Private(row, col, value, addv, im[i], in[j]);
600         }
601       }
602     } else {
603       PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Setting off process row %" PetscInt_FMT " even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set", im[i]);
604       if (!aij->donotstash) {
605         mat->assembled = PETSC_FALSE;
606         if (roworiented) {
607           PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, v ? v + i * n : NULL, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
608         } else {
609           PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, v ? v + i : NULL, m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
610         }
611       }
612     }
613   }
614   PetscCall(MatSeqAIJRestoreArray(A, &aa)); /* aa, bb might have been free'd due to reallocation above. But we don't access them here */
615   PetscCall(MatSeqAIJRestoreArray(B, &ba));
616   PetscFunctionReturn(PETSC_SUCCESS);
617 }
618 
619 /*
620     This function sets the j and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
621     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
622     No off-processor parts off the matrix are allowed here and mat->was_assembled has to be PETSC_FALSE.
623 */
624 PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Mat mat, const PetscInt mat_j[], const PetscInt mat_i[])
625 {
626   Mat_MPIAIJ *aij    = (Mat_MPIAIJ *)mat->data;
627   Mat         A      = aij->A; /* diagonal part of the matrix */
628   Mat         B      = aij->B; /* off-diagonal part of the matrix */
629   Mat_SeqAIJ *a      = (Mat_SeqAIJ *)A->data;
630   Mat_SeqAIJ *b      = (Mat_SeqAIJ *)B->data;
631   PetscInt    cstart = mat->cmap->rstart, cend = mat->cmap->rend, col;
632   PetscInt   *ailen = a->ilen, *aj = a->j;
633   PetscInt   *bilen = b->ilen, *bj = b->j;
634   PetscInt    am          = aij->A->rmap->n, j;
635   PetscInt    diag_so_far = 0, dnz;
636   PetscInt    offd_so_far = 0, onz;
637 
638   PetscFunctionBegin;
639   /* Iterate over all rows of the matrix */
640   for (j = 0; j < am; j++) {
641     dnz = onz = 0;
642     /*  Iterate over all non-zero columns of the current row */
643     for (col = mat_i[j]; col < mat_i[j + 1]; col++) {
644       /* If column is in the diagonal */
645       if (mat_j[col] >= cstart && mat_j[col] < cend) {
646         aj[diag_so_far++] = mat_j[col] - cstart;
647         dnz++;
648       } else { /* off-diagonal entries */
649         bj[offd_so_far++] = mat_j[col];
650         onz++;
651       }
652     }
653     ailen[j] = dnz;
654     bilen[j] = onz;
655   }
656   PetscFunctionReturn(PETSC_SUCCESS);
657 }
658 
659 /*
660     This function sets the local j, a and ilen arrays (of the diagonal and off-diagonal part) of an MPIAIJ-matrix.
661     The values in mat_i have to be sorted and the values in mat_j have to be sorted for each row (CSR-like).
662     No off-processor parts off the matrix are allowed here, they are set at a later point by MatSetValues_MPIAIJ.
663     Also, mat->was_assembled has to be false, otherwise the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart;
664     would not be true and the more complex MatSetValues_MPIAIJ has to be used.
665 */
666 PetscErrorCode MatSetValues_MPIAIJ_CopyFromCSRFormat(Mat mat, const PetscInt mat_j[], const PetscInt mat_i[], const PetscScalar mat_a[])
667 {
668   Mat_MPIAIJ  *aij  = (Mat_MPIAIJ *)mat->data;
669   Mat          A    = aij->A; /* diagonal part of the matrix */
670   Mat          B    = aij->B; /* off-diagonal part of the matrix */
671   Mat_SeqAIJ  *aijd = (Mat_SeqAIJ *)(aij->A)->data, *aijo = (Mat_SeqAIJ *)(aij->B)->data;
672   Mat_SeqAIJ  *a      = (Mat_SeqAIJ *)A->data;
673   Mat_SeqAIJ  *b      = (Mat_SeqAIJ *)B->data;
674   PetscInt     cstart = mat->cmap->rstart, cend = mat->cmap->rend;
675   PetscInt    *ailen = a->ilen, *aj = a->j;
676   PetscInt    *bilen = b->ilen, *bj = b->j;
677   PetscInt     am          = aij->A->rmap->n, j;
678   PetscInt    *full_diag_i = aijd->i, *full_offd_i = aijo->i; /* These variables can also include non-local elements, which are set at a later point. */
679   PetscInt     col, dnz_row, onz_row, rowstart_diag, rowstart_offd;
680   PetscScalar *aa = a->a, *ba = b->a;
681 
682   PetscFunctionBegin;
683   /* Iterate over all rows of the matrix */
684   for (j = 0; j < am; j++) {
685     dnz_row = onz_row = 0;
686     rowstart_offd     = full_offd_i[j];
687     rowstart_diag     = full_diag_i[j];
688     /*  Iterate over all non-zero columns of the current row */
689     for (col = mat_i[j]; col < mat_i[j + 1]; col++) {
690       /* If column is in the diagonal */
691       if (mat_j[col] >= cstart && mat_j[col] < cend) {
692         aj[rowstart_diag + dnz_row] = mat_j[col] - cstart;
693         aa[rowstart_diag + dnz_row] = mat_a[col];
694         dnz_row++;
695       } else { /* off-diagonal entries */
696         bj[rowstart_offd + onz_row] = mat_j[col];
697         ba[rowstart_offd + onz_row] = mat_a[col];
698         onz_row++;
699       }
700     }
701     ailen[j] = dnz_row;
702     bilen[j] = onz_row;
703   }
704   PetscFunctionReturn(PETSC_SUCCESS);
705 }
706 
707 static PetscErrorCode MatGetValues_MPIAIJ(Mat mat, PetscInt m, const PetscInt idxm[], PetscInt n, const PetscInt idxn[], PetscScalar v[])
708 {
709   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
710   PetscInt    i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
711   PetscInt    cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
712 
713   PetscFunctionBegin;
714   for (i = 0; i < m; i++) {
715     if (idxm[i] < 0) continue; /* negative row */
716     PetscCheck(idxm[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, idxm[i], mat->rmap->N - 1);
717     PetscCheck(idxm[i] >= rstart && idxm[i] < rend, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only local values currently supported, row requested %" PetscInt_FMT " range [%" PetscInt_FMT " %" PetscInt_FMT ")", idxm[i], rstart, rend);
718     row = idxm[i] - rstart;
719     for (j = 0; j < n; j++) {
720       if (idxn[j] < 0) continue; /* negative column */
721       PetscCheck(idxn[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, idxn[j], mat->cmap->N - 1);
722       if (idxn[j] >= cstart && idxn[j] < cend) {
723         col = idxn[j] - cstart;
724         PetscCall(MatGetValues(aij->A, 1, &row, 1, &col, v + i * n + j));
725       } else {
726         if (!aij->colmap) PetscCall(MatCreateColmap_MPIAIJ_Private(mat));
727 #if defined(PETSC_USE_CTABLE)
728         PetscCall(PetscHMapIGetWithDefault(aij->colmap, idxn[j] + 1, 0, &col));
729         col--;
730 #else
731         col = aij->colmap[idxn[j]] - 1;
732 #endif
733         if ((col < 0) || (aij->garray[col] != idxn[j])) *(v + i * n + j) = 0.0;
734         else PetscCall(MatGetValues(aij->B, 1, &row, 1, &col, v + i * n + j));
735       }
736     }
737   }
738   PetscFunctionReturn(PETSC_SUCCESS);
739 }
740 
741 static PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat, MatAssemblyType mode)
742 {
743   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
744   PetscInt    nstash, reallocs;
745 
746   PetscFunctionBegin;
747   if (aij->donotstash || mat->nooffprocentries) PetscFunctionReturn(PETSC_SUCCESS);
748 
749   PetscCall(MatStashScatterBegin_Private(mat, &mat->stash, mat->rmap->range));
750   PetscCall(MatStashGetInfo_Private(&mat->stash, &nstash, &reallocs));
751   PetscCall(PetscInfo(aij->A, "Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n", nstash, reallocs));
752   PetscFunctionReturn(PETSC_SUCCESS);
753 }
754 
755 PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat, MatAssemblyType mode)
756 {
757   Mat_MPIAIJ  *aij = (Mat_MPIAIJ *)mat->data;
758   PetscMPIInt  n;
759   PetscInt     i, j, rstart, ncols, flg;
760   PetscInt    *row, *col;
761   PetscBool    other_disassembled;
762   PetscScalar *val;
763 
764   /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */
765 
766   PetscFunctionBegin;
767   if (!aij->donotstash && !mat->nooffprocentries) {
768     while (1) {
769       PetscCall(MatStashScatterGetMesg_Private(&mat->stash, &n, &row, &col, &val, &flg));
770       if (!flg) break;
771 
772       for (i = 0; i < n;) {
773         /* Now identify the consecutive vals belonging to the same row */
774         for (j = i, rstart = row[j]; j < n; j++) {
775           if (row[j] != rstart) break;
776         }
777         if (j < n) ncols = j - i;
778         else ncols = n - i;
779         /* Now assemble all these values with a single function call */
780         PetscCall(MatSetValues_MPIAIJ(mat, 1, row + i, ncols, col + i, val + i, mat->insertmode));
781         i = j;
782       }
783     }
784     PetscCall(MatStashScatterEnd_Private(&mat->stash));
785   }
786 #if defined(PETSC_HAVE_DEVICE)
787   if (mat->offloadmask == PETSC_OFFLOAD_CPU) aij->A->offloadmask = PETSC_OFFLOAD_CPU;
788   /* We call MatBindToCPU() on aij->A and aij->B here, because if MatBindToCPU_MPIAIJ() is called before assembly, it cannot bind these. */
789   if (mat->boundtocpu) {
790     PetscCall(MatBindToCPU(aij->A, PETSC_TRUE));
791     PetscCall(MatBindToCPU(aij->B, PETSC_TRUE));
792   }
793 #endif
794   PetscCall(MatAssemblyBegin(aij->A, mode));
795   PetscCall(MatAssemblyEnd(aij->A, mode));
796 
797   /* determine if any processor has disassembled, if so we must
798      also disassemble ourself, in order that we may reassemble. */
799   /*
800      if nonzero structure of submatrix B cannot change then we know that
801      no processor disassembled thus we can skip this stuff
802   */
803   if (!((Mat_SeqAIJ *)aij->B->data)->nonew) {
804     PetscCall(MPIU_Allreduce(&mat->was_assembled, &other_disassembled, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
805     if (mat->was_assembled && !other_disassembled) { /* mat on this rank has reduced off-diag B with local col ids, but globally it does not */
806       PetscCall(MatDisAssemble_MPIAIJ(mat));
807     }
808   }
809   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) PetscCall(MatSetUpMultiply_MPIAIJ(mat));
810   PetscCall(MatSetOption(aij->B, MAT_USE_INODES, PETSC_FALSE));
811 #if defined(PETSC_HAVE_DEVICE)
812   if (mat->offloadmask == PETSC_OFFLOAD_CPU && aij->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) aij->B->offloadmask = PETSC_OFFLOAD_CPU;
813 #endif
814   PetscCall(MatAssemblyBegin(aij->B, mode));
815   PetscCall(MatAssemblyEnd(aij->B, mode));
816 
817   PetscCall(PetscFree2(aij->rowvalues, aij->rowindices));
818 
819   aij->rowvalues = NULL;
820 
821   PetscCall(VecDestroy(&aij->diag));
822 
823   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
824   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ *)(aij->A->data))->nonew) {
825     PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
826     PetscCall(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat)));
827   }
828 #if defined(PETSC_HAVE_DEVICE)
829   mat->offloadmask = PETSC_OFFLOAD_BOTH;
830 #endif
831   PetscFunctionReturn(PETSC_SUCCESS);
832 }
833 
834 static PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
835 {
836   Mat_MPIAIJ *l = (Mat_MPIAIJ *)A->data;
837 
838   PetscFunctionBegin;
839   PetscCall(MatZeroEntries(l->A));
840   PetscCall(MatZeroEntries(l->B));
841   PetscFunctionReturn(PETSC_SUCCESS);
842 }
843 
844 static PetscErrorCode MatZeroRows_MPIAIJ(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b)
845 {
846   Mat_MPIAIJ      *mat = (Mat_MPIAIJ *)A->data;
847   PetscObjectState sA, sB;
848   PetscInt        *lrows;
849   PetscInt         r, len;
850   PetscBool        cong, lch, gch;
851 
852   PetscFunctionBegin;
853   /* get locally owned rows */
854   PetscCall(MatZeroRowsMapLocal_Private(A, N, rows, &len, &lrows));
855   PetscCall(MatHasCongruentLayouts(A, &cong));
856   /* fix right hand side if needed */
857   if (x && b) {
858     const PetscScalar *xx;
859     PetscScalar       *bb;
860 
861     PetscCheck(cong, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Need matching row/col layout");
862     PetscCall(VecGetArrayRead(x, &xx));
863     PetscCall(VecGetArray(b, &bb));
864     for (r = 0; r < len; ++r) bb[lrows[r]] = diag * xx[lrows[r]];
865     PetscCall(VecRestoreArrayRead(x, &xx));
866     PetscCall(VecRestoreArray(b, &bb));
867   }
868 
869   sA = mat->A->nonzerostate;
870   sB = mat->B->nonzerostate;
871 
872   if (diag != 0.0 && cong) {
873     PetscCall(MatZeroRows(mat->A, len, lrows, diag, NULL, NULL));
874     PetscCall(MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL));
875   } else if (diag != 0.0) { /* non-square or non congruent layouts -> if keepnonzeropattern is false, we allow for new insertion */
876     Mat_SeqAIJ *aijA = (Mat_SeqAIJ *)mat->A->data;
877     Mat_SeqAIJ *aijB = (Mat_SeqAIJ *)mat->B->data;
878     PetscInt    nnwA, nnwB;
879     PetscBool   nnzA, nnzB;
880 
881     nnwA = aijA->nonew;
882     nnwB = aijB->nonew;
883     nnzA = aijA->keepnonzeropattern;
884     nnzB = aijB->keepnonzeropattern;
885     if (!nnzA) {
886       PetscCall(PetscInfo(mat->A, "Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on diagonal block.\n"));
887       aijA->nonew = 0;
888     }
889     if (!nnzB) {
890       PetscCall(PetscInfo(mat->B, "Requested to not keep the pattern and add a nonzero diagonal; may encounter reallocations on off-diagonal block.\n"));
891       aijB->nonew = 0;
892     }
893     /* Must zero here before the next loop */
894     PetscCall(MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL));
895     PetscCall(MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL));
896     for (r = 0; r < len; ++r) {
897       const PetscInt row = lrows[r] + A->rmap->rstart;
898       if (row >= A->cmap->N) continue;
899       PetscCall(MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES));
900     }
901     aijA->nonew = nnwA;
902     aijB->nonew = nnwB;
903   } else {
904     PetscCall(MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL));
905     PetscCall(MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL));
906   }
907   PetscCall(PetscFree(lrows));
908   PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
909   PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
910 
911   /* reduce nonzerostate */
912   lch = (PetscBool)(sA != mat->A->nonzerostate || sB != mat->B->nonzerostate);
913   PetscCall(MPIU_Allreduce(&lch, &gch, 1, MPIU_BOOL, MPI_LOR, PetscObjectComm((PetscObject)A)));
914   if (gch) A->nonzerostate++;
915   PetscFunctionReturn(PETSC_SUCCESS);
916 }
917 
918 static PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A, PetscInt N, const PetscInt rows[], PetscScalar diag, Vec x, Vec b)
919 {
920   Mat_MPIAIJ        *l = (Mat_MPIAIJ *)A->data;
921   PetscMPIInt        n = A->rmap->n;
922   PetscInt           i, j, r, m, len = 0;
923   PetscInt          *lrows, *owners = A->rmap->range;
924   PetscMPIInt        p = 0;
925   PetscSFNode       *rrows;
926   PetscSF            sf;
927   const PetscScalar *xx;
928   PetscScalar       *bb, *mask, *aij_a;
929   Vec                xmask, lmask;
930   Mat_SeqAIJ        *aij = (Mat_SeqAIJ *)l->B->data;
931   const PetscInt    *aj, *ii, *ridx;
932   PetscScalar       *aa;
933 
934   PetscFunctionBegin;
935   /* Create SF where leaves are input rows and roots are owned rows */
936   PetscCall(PetscMalloc1(n, &lrows));
937   for (r = 0; r < n; ++r) lrows[r] = -1;
938   PetscCall(PetscMalloc1(N, &rrows));
939   for (r = 0; r < N; ++r) {
940     const PetscInt idx = rows[r];
941     PetscCheck(idx >= 0 && A->rmap->N > idx, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row %" PetscInt_FMT " out of range [0,%" PetscInt_FMT ")", idx, A->rmap->N);
942     if (idx < owners[p] || owners[p + 1] <= idx) { /* short-circuit the search if the last p owns this row too */
943       PetscCall(PetscLayoutFindOwner(A->rmap, idx, &p));
944     }
945     rrows[r].rank  = p;
946     rrows[r].index = rows[r] - owners[p];
947   }
948   PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf));
949   PetscCall(PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER));
950   /* Collect flags for rows to be zeroed */
951   PetscCall(PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *)rows, lrows, MPI_LOR));
952   PetscCall(PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *)rows, lrows, MPI_LOR));
953   PetscCall(PetscSFDestroy(&sf));
954   /* Compress and put in row numbers */
955   for (r = 0; r < n; ++r)
956     if (lrows[r] >= 0) lrows[len++] = r;
957   /* zero diagonal part of matrix */
958   PetscCall(MatZeroRowsColumns(l->A, len, lrows, diag, x, b));
959   /* handle off-diagonal part of matrix */
960   PetscCall(MatCreateVecs(A, &xmask, NULL));
961   PetscCall(VecDuplicate(l->lvec, &lmask));
962   PetscCall(VecGetArray(xmask, &bb));
963   for (i = 0; i < len; i++) bb[lrows[i]] = 1;
964   PetscCall(VecRestoreArray(xmask, &bb));
965   PetscCall(VecScatterBegin(l->Mvctx, xmask, lmask, ADD_VALUES, SCATTER_FORWARD));
966   PetscCall(VecScatterEnd(l->Mvctx, xmask, lmask, ADD_VALUES, SCATTER_FORWARD));
967   PetscCall(VecDestroy(&xmask));
968   if (x && b) { /* this code is buggy when the row and column layout don't match */
969     PetscBool cong;
970 
971     PetscCall(MatHasCongruentLayouts(A, &cong));
972     PetscCheck(cong, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Need matching row/col layout");
973     PetscCall(VecScatterBegin(l->Mvctx, x, l->lvec, INSERT_VALUES, SCATTER_FORWARD));
974     PetscCall(VecScatterEnd(l->Mvctx, x, l->lvec, INSERT_VALUES, SCATTER_FORWARD));
975     PetscCall(VecGetArrayRead(l->lvec, &xx));
976     PetscCall(VecGetArray(b, &bb));
977   }
978   PetscCall(VecGetArray(lmask, &mask));
979   /* remove zeroed rows of off-diagonal matrix */
980   PetscCall(MatSeqAIJGetArray(l->B, &aij_a));
981   ii = aij->i;
982   for (i = 0; i < len; i++) PetscCall(PetscArrayzero(aij_a + ii[lrows[i]], ii[lrows[i] + 1] - ii[lrows[i]]));
983   /* loop over all elements of off process part of matrix zeroing removed columns*/
984   if (aij->compressedrow.use) {
985     m    = aij->compressedrow.nrows;
986     ii   = aij->compressedrow.i;
987     ridx = aij->compressedrow.rindex;
988     for (i = 0; i < m; i++) {
989       n  = ii[i + 1] - ii[i];
990       aj = aij->j + ii[i];
991       aa = aij_a + ii[i];
992 
993       for (j = 0; j < n; j++) {
994         if (PetscAbsScalar(mask[*aj])) {
995           if (b) bb[*ridx] -= *aa * xx[*aj];
996           *aa = 0.0;
997         }
998         aa++;
999         aj++;
1000       }
1001       ridx++;
1002     }
1003   } else { /* do not use compressed row format */
1004     m = l->B->rmap->n;
1005     for (i = 0; i < m; i++) {
1006       n  = ii[i + 1] - ii[i];
1007       aj = aij->j + ii[i];
1008       aa = aij_a + ii[i];
1009       for (j = 0; j < n; j++) {
1010         if (PetscAbsScalar(mask[*aj])) {
1011           if (b) bb[i] -= *aa * xx[*aj];
1012           *aa = 0.0;
1013         }
1014         aa++;
1015         aj++;
1016       }
1017     }
1018   }
1019   if (x && b) {
1020     PetscCall(VecRestoreArray(b, &bb));
1021     PetscCall(VecRestoreArrayRead(l->lvec, &xx));
1022   }
1023   PetscCall(MatSeqAIJRestoreArray(l->B, &aij_a));
1024   PetscCall(VecRestoreArray(lmask, &mask));
1025   PetscCall(VecDestroy(&lmask));
1026   PetscCall(PetscFree(lrows));
1027 
1028   /* only change matrix nonzero state if pattern was allowed to be changed */
1029   if (!((Mat_SeqAIJ *)(l->A->data))->keepnonzeropattern) {
1030     PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
1031     PetscCall(MPIU_Allreduce(&state, &A->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)A)));
1032   }
1033   PetscFunctionReturn(PETSC_SUCCESS);
1034 }
1035 
1036 static PetscErrorCode MatMult_MPIAIJ(Mat A, Vec xx, Vec yy)
1037 {
1038   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1039   PetscInt    nt;
1040   VecScatter  Mvctx = a->Mvctx;
1041 
1042   PetscFunctionBegin;
1043   PetscCall(VecGetLocalSize(xx, &nt));
1044   PetscCheck(nt == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Incompatible partition of A (%" PetscInt_FMT ") and xx (%" PetscInt_FMT ")", A->cmap->n, nt);
1045   PetscCall(VecScatterBegin(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
1046   PetscUseTypeMethod(a->A, mult, xx, yy);
1047   PetscCall(VecScatterEnd(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
1048   PetscUseTypeMethod(a->B, multadd, a->lvec, yy, yy);
1049   PetscFunctionReturn(PETSC_SUCCESS);
1050 }
1051 
1052 static PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A, Vec bb, Vec xx)
1053 {
1054   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1055 
1056   PetscFunctionBegin;
1057   PetscCall(MatMultDiagonalBlock(a->A, bb, xx));
1058   PetscFunctionReturn(PETSC_SUCCESS);
1059 }
1060 
1061 static PetscErrorCode MatMultAdd_MPIAIJ(Mat A, Vec xx, Vec yy, Vec zz)
1062 {
1063   Mat_MPIAIJ *a     = (Mat_MPIAIJ *)A->data;
1064   VecScatter  Mvctx = a->Mvctx;
1065 
1066   PetscFunctionBegin;
1067   PetscCall(VecScatterBegin(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
1068   PetscCall((*a->A->ops->multadd)(a->A, xx, yy, zz));
1069   PetscCall(VecScatterEnd(Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
1070   PetscCall((*a->B->ops->multadd)(a->B, a->lvec, zz, zz));
1071   PetscFunctionReturn(PETSC_SUCCESS);
1072 }
1073 
1074 static PetscErrorCode MatMultTranspose_MPIAIJ(Mat A, Vec xx, Vec yy)
1075 {
1076   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1077 
1078   PetscFunctionBegin;
1079   /* do nondiagonal part */
1080   PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec));
1081   /* do local part */
1082   PetscCall((*a->A->ops->multtranspose)(a->A, xx, yy));
1083   /* add partial results together */
1084   PetscCall(VecScatterBegin(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
1085   PetscCall(VecScatterEnd(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
1086   PetscFunctionReturn(PETSC_SUCCESS);
1087 }
1088 
1089 static PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat, Mat Bmat, PetscReal tol, PetscBool *f)
1090 {
1091   MPI_Comm    comm;
1092   Mat_MPIAIJ *Aij = (Mat_MPIAIJ *)Amat->data, *Bij = (Mat_MPIAIJ *)Bmat->data;
1093   Mat         Adia = Aij->A, Bdia = Bij->A, Aoff, Boff, *Aoffs, *Boffs;
1094   IS          Me, Notme;
1095   PetscInt    M, N, first, last, *notme, i;
1096   PetscBool   lf;
1097   PetscMPIInt size;
1098 
1099   PetscFunctionBegin;
1100   /* Easy test: symmetric diagonal block */
1101   PetscCall(MatIsTranspose(Adia, Bdia, tol, &lf));
1102   PetscCall(MPIU_Allreduce(&lf, f, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)Amat)));
1103   if (!*f) PetscFunctionReturn(PETSC_SUCCESS);
1104   PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm));
1105   PetscCallMPI(MPI_Comm_size(comm, &size));
1106   if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
1107 
1108   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1109   PetscCall(MatGetSize(Amat, &M, &N));
1110   PetscCall(MatGetOwnershipRange(Amat, &first, &last));
1111   PetscCall(PetscMalloc1(N - last + first, &notme));
1112   for (i = 0; i < first; i++) notme[i] = i;
1113   for (i = last; i < M; i++) notme[i - last + first] = i;
1114   PetscCall(ISCreateGeneral(MPI_COMM_SELF, N - last + first, notme, PETSC_COPY_VALUES, &Notme));
1115   PetscCall(ISCreateStride(MPI_COMM_SELF, last - first, first, 1, &Me));
1116   PetscCall(MatCreateSubMatrices(Amat, 1, &Me, &Notme, MAT_INITIAL_MATRIX, &Aoffs));
1117   Aoff = Aoffs[0];
1118   PetscCall(MatCreateSubMatrices(Bmat, 1, &Notme, &Me, MAT_INITIAL_MATRIX, &Boffs));
1119   Boff = Boffs[0];
1120   PetscCall(MatIsTranspose(Aoff, Boff, tol, f));
1121   PetscCall(MatDestroyMatrices(1, &Aoffs));
1122   PetscCall(MatDestroyMatrices(1, &Boffs));
1123   PetscCall(ISDestroy(&Me));
1124   PetscCall(ISDestroy(&Notme));
1125   PetscCall(PetscFree(notme));
1126   PetscFunctionReturn(PETSC_SUCCESS);
1127 }
1128 
1129 static PetscErrorCode MatIsSymmetric_MPIAIJ(Mat A, PetscReal tol, PetscBool *f)
1130 {
1131   PetscFunctionBegin;
1132   PetscCall(MatIsTranspose_MPIAIJ(A, A, tol, f));
1133   PetscFunctionReturn(PETSC_SUCCESS);
1134 }
1135 
1136 static PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A, Vec xx, Vec yy, Vec zz)
1137 {
1138   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1139 
1140   PetscFunctionBegin;
1141   /* do nondiagonal part */
1142   PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec));
1143   /* do local part */
1144   PetscCall((*a->A->ops->multtransposeadd)(a->A, xx, yy, zz));
1145   /* add partial results together */
1146   PetscCall(VecScatterBegin(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
1147   PetscCall(VecScatterEnd(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
1148   PetscFunctionReturn(PETSC_SUCCESS);
1149 }
1150 
1151 /*
1152   This only works correctly for square matrices where the subblock A->A is the
1153    diagonal block
1154 */
1155 static PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A, Vec v)
1156 {
1157   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1158 
1159   PetscFunctionBegin;
1160   PetscCheck(A->rmap->N == A->cmap->N, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Supports only square matrix where A->A is diag block");
1161   PetscCheck(A->rmap->rstart == A->cmap->rstart && A->rmap->rend == A->cmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "row partition must equal col partition");
1162   PetscCall(MatGetDiagonal(a->A, v));
1163   PetscFunctionReturn(PETSC_SUCCESS);
1164 }
1165 
1166 static PetscErrorCode MatScale_MPIAIJ(Mat A, PetscScalar aa)
1167 {
1168   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1169 
1170   PetscFunctionBegin;
1171   PetscCall(MatScale(a->A, aa));
1172   PetscCall(MatScale(a->B, aa));
1173   PetscFunctionReturn(PETSC_SUCCESS);
1174 }
1175 
1176 static PetscErrorCode MatView_MPIAIJ_Binary(Mat mat, PetscViewer viewer)
1177 {
1178   Mat_MPIAIJ        *aij    = (Mat_MPIAIJ *)mat->data;
1179   Mat_SeqAIJ        *A      = (Mat_SeqAIJ *)aij->A->data;
1180   Mat_SeqAIJ        *B      = (Mat_SeqAIJ *)aij->B->data;
1181   const PetscInt    *garray = aij->garray;
1182   const PetscScalar *aa, *ba;
1183   PetscInt           header[4], M, N, m, rs, cs, cnt, i, ja, jb;
1184   PetscInt64         nz, hnz;
1185   PetscInt          *rowlens;
1186   PetscInt          *colidxs;
1187   PetscScalar       *matvals;
1188   PetscMPIInt        rank;
1189 
1190   PetscFunctionBegin;
1191   PetscCall(PetscViewerSetUp(viewer));
1192 
1193   M  = mat->rmap->N;
1194   N  = mat->cmap->N;
1195   m  = mat->rmap->n;
1196   rs = mat->rmap->rstart;
1197   cs = mat->cmap->rstart;
1198   nz = A->nz + B->nz;
1199 
1200   /* write matrix header */
1201   header[0] = MAT_FILE_CLASSID;
1202   header[1] = M;
1203   header[2] = N;
1204   PetscCallMPI(MPI_Reduce(&nz, &hnz, 1, MPIU_INT64, MPI_SUM, 0, PetscObjectComm((PetscObject)mat)));
1205   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank));
1206   if (rank == 0) {
1207     if (hnz > PETSC_MAX_INT) header[3] = PETSC_MAX_INT;
1208     else header[3] = (PetscInt)hnz;
1209   }
1210   PetscCall(PetscViewerBinaryWrite(viewer, header, 4, PETSC_INT));
1211 
1212   /* fill in and store row lengths  */
1213   PetscCall(PetscMalloc1(m, &rowlens));
1214   for (i = 0; i < m; i++) rowlens[i] = A->i[i + 1] - A->i[i] + B->i[i + 1] - B->i[i];
1215   PetscCall(PetscViewerBinaryWriteAll(viewer, rowlens, m, rs, M, PETSC_INT));
1216   PetscCall(PetscFree(rowlens));
1217 
1218   /* fill in and store column indices */
1219   PetscCall(PetscMalloc1(nz, &colidxs));
1220   for (cnt = 0, i = 0; i < m; i++) {
1221     for (jb = B->i[i]; jb < B->i[i + 1]; jb++) {
1222       if (garray[B->j[jb]] > cs) break;
1223       colidxs[cnt++] = garray[B->j[jb]];
1224     }
1225     for (ja = A->i[i]; ja < A->i[i + 1]; ja++) colidxs[cnt++] = A->j[ja] + cs;
1226     for (; jb < B->i[i + 1]; jb++) colidxs[cnt++] = garray[B->j[jb]];
1227   }
1228   PetscCheck(cnt == nz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Internal PETSc error: cnt = %" PetscInt_FMT " nz = %" PetscInt64_FMT, cnt, nz);
1229   PetscCall(PetscViewerBinaryWriteAll(viewer, colidxs, nz, PETSC_DETERMINE, PETSC_DETERMINE, PETSC_INT));
1230   PetscCall(PetscFree(colidxs));
1231 
1232   /* fill in and store nonzero values */
1233   PetscCall(MatSeqAIJGetArrayRead(aij->A, &aa));
1234   PetscCall(MatSeqAIJGetArrayRead(aij->B, &ba));
1235   PetscCall(PetscMalloc1(nz, &matvals));
1236   for (cnt = 0, i = 0; i < m; i++) {
1237     for (jb = B->i[i]; jb < B->i[i + 1]; jb++) {
1238       if (garray[B->j[jb]] > cs) break;
1239       matvals[cnt++] = ba[jb];
1240     }
1241     for (ja = A->i[i]; ja < A->i[i + 1]; ja++) matvals[cnt++] = aa[ja];
1242     for (; jb < B->i[i + 1]; jb++) matvals[cnt++] = ba[jb];
1243   }
1244   PetscCall(MatSeqAIJRestoreArrayRead(aij->A, &aa));
1245   PetscCall(MatSeqAIJRestoreArrayRead(aij->B, &ba));
1246   PetscCheck(cnt == nz, PETSC_COMM_SELF, PETSC_ERR_LIB, "Internal PETSc error: cnt = %" PetscInt_FMT " nz = %" PetscInt64_FMT, cnt, nz);
1247   PetscCall(PetscViewerBinaryWriteAll(viewer, matvals, nz, PETSC_DETERMINE, PETSC_DETERMINE, PETSC_SCALAR));
1248   PetscCall(PetscFree(matvals));
1249 
1250   /* write block size option to the viewer's .info file */
1251   PetscCall(MatView_Binary_BlockSizes(mat, viewer));
1252   PetscFunctionReturn(PETSC_SUCCESS);
1253 }
1254 
1255 #include <petscdraw.h>
1256 static PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat, PetscViewer viewer)
1257 {
1258   Mat_MPIAIJ       *aij  = (Mat_MPIAIJ *)mat->data;
1259   PetscMPIInt       rank = aij->rank, size = aij->size;
1260   PetscBool         isdraw, iascii, isbinary;
1261   PetscViewer       sviewer;
1262   PetscViewerFormat format;
1263 
1264   PetscFunctionBegin;
1265   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
1266   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
1267   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
1268   if (iascii) {
1269     PetscCall(PetscViewerGetFormat(viewer, &format));
1270     if (format == PETSC_VIEWER_LOAD_BALANCE) {
1271       PetscInt i, nmax = 0, nmin = PETSC_MAX_INT, navg = 0, *nz, nzlocal = ((Mat_SeqAIJ *)(aij->A->data))->nz + ((Mat_SeqAIJ *)(aij->B->data))->nz;
1272       PetscCall(PetscMalloc1(size, &nz));
1273       PetscCallMPI(MPI_Allgather(&nzlocal, 1, MPIU_INT, nz, 1, MPIU_INT, PetscObjectComm((PetscObject)mat)));
1274       for (i = 0; i < (PetscInt)size; i++) {
1275         nmax = PetscMax(nmax, nz[i]);
1276         nmin = PetscMin(nmin, nz[i]);
1277         navg += nz[i];
1278       }
1279       PetscCall(PetscFree(nz));
1280       navg = navg / size;
1281       PetscCall(PetscViewerASCIIPrintf(viewer, "Load Balance - Nonzeros: Min %" PetscInt_FMT "  avg %" PetscInt_FMT "  max %" PetscInt_FMT "\n", nmin, navg, nmax));
1282       PetscFunctionReturn(PETSC_SUCCESS);
1283     }
1284     PetscCall(PetscViewerGetFormat(viewer, &format));
1285     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1286       MatInfo   info;
1287       PetscInt *inodes = NULL;
1288 
1289       PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank));
1290       PetscCall(MatGetInfo(mat, MAT_LOCAL, &info));
1291       PetscCall(MatInodeGetInodeSizes(aij->A, NULL, &inodes, NULL));
1292       PetscCall(PetscViewerASCIIPushSynchronized(viewer));
1293       if (!inodes) {
1294         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %g, not using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used, (PetscInt)info.nz_allocated,
1295                                                      (double)info.memory));
1296       } else {
1297         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %g, using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used, (PetscInt)info.nz_allocated,
1298                                                      (double)info.memory));
1299       }
1300       PetscCall(MatGetInfo(aij->A, MAT_LOCAL, &info));
1301       PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] on-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
1302       PetscCall(MatGetInfo(aij->B, MAT_LOCAL, &info));
1303       PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] off-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
1304       PetscCall(PetscViewerFlush(viewer));
1305       PetscCall(PetscViewerASCIIPopSynchronized(viewer));
1306       PetscCall(PetscViewerASCIIPrintf(viewer, "Information on VecScatter used in matrix-vector product: \n"));
1307       PetscCall(VecScatterView(aij->Mvctx, viewer));
1308       PetscFunctionReturn(PETSC_SUCCESS);
1309     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1310       PetscInt inodecount, inodelimit, *inodes;
1311       PetscCall(MatInodeGetInodeSizes(aij->A, &inodecount, &inodes, &inodelimit));
1312       if (inodes) {
1313         PetscCall(PetscViewerASCIIPrintf(viewer, "using I-node (on process 0) routines: found %" PetscInt_FMT " nodes, limit used is %" PetscInt_FMT "\n", inodecount, inodelimit));
1314       } else {
1315         PetscCall(PetscViewerASCIIPrintf(viewer, "not using I-node (on process 0) routines\n"));
1316       }
1317       PetscFunctionReturn(PETSC_SUCCESS);
1318     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1319       PetscFunctionReturn(PETSC_SUCCESS);
1320     }
1321   } else if (isbinary) {
1322     if (size == 1) {
1323       PetscCall(PetscObjectSetName((PetscObject)aij->A, ((PetscObject)mat)->name));
1324       PetscCall(MatView(aij->A, viewer));
1325     } else {
1326       PetscCall(MatView_MPIAIJ_Binary(mat, viewer));
1327     }
1328     PetscFunctionReturn(PETSC_SUCCESS);
1329   } else if (iascii && size == 1) {
1330     PetscCall(PetscObjectSetName((PetscObject)aij->A, ((PetscObject)mat)->name));
1331     PetscCall(MatView(aij->A, viewer));
1332     PetscFunctionReturn(PETSC_SUCCESS);
1333   } else if (isdraw) {
1334     PetscDraw draw;
1335     PetscBool isnull;
1336     PetscCall(PetscViewerDrawGetDraw(viewer, 0, &draw));
1337     PetscCall(PetscDrawIsNull(draw, &isnull));
1338     if (isnull) PetscFunctionReturn(PETSC_SUCCESS);
1339   }
1340 
1341   { /* assemble the entire matrix onto first processor */
1342     Mat A = NULL, Av;
1343     IS  isrow, iscol;
1344 
1345     PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat), rank == 0 ? mat->rmap->N : 0, 0, 1, &isrow));
1346     PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat), rank == 0 ? mat->cmap->N : 0, 0, 1, &iscol));
1347     PetscCall(MatCreateSubMatrix(mat, isrow, iscol, MAT_INITIAL_MATRIX, &A));
1348     PetscCall(MatMPIAIJGetSeqAIJ(A, &Av, NULL, NULL));
1349     /*  The commented code uses MatCreateSubMatrices instead */
1350     /*
1351     Mat *AA, A = NULL, Av;
1352     IS  isrow,iscol;
1353 
1354     PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->rmap->N : 0,0,1,&isrow));
1355     PetscCall(ISCreateStride(PetscObjectComm((PetscObject)mat),rank == 0 ? mat->cmap->N : 0,0,1,&iscol));
1356     PetscCall(MatCreateSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&AA));
1357     if (rank == 0) {
1358        PetscCall(PetscObjectReference((PetscObject)AA[0]));
1359        A    = AA[0];
1360        Av   = AA[0];
1361     }
1362     PetscCall(MatDestroySubMatrices(1,&AA));
1363 */
1364     PetscCall(ISDestroy(&iscol));
1365     PetscCall(ISDestroy(&isrow));
1366     /*
1367        Everyone has to call to draw the matrix since the graphics waits are
1368        synchronized across all processors that share the PetscDraw object
1369     */
1370     PetscCall(PetscViewerGetSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
1371     if (rank == 0) {
1372       if (((PetscObject)mat)->name) PetscCall(PetscObjectSetName((PetscObject)Av, ((PetscObject)mat)->name));
1373       PetscCall(MatView_SeqAIJ(Av, sviewer));
1374     }
1375     PetscCall(PetscViewerRestoreSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
1376     PetscCall(PetscViewerFlush(viewer));
1377     PetscCall(MatDestroy(&A));
1378   }
1379   PetscFunctionReturn(PETSC_SUCCESS);
1380 }
1381 
1382 PetscErrorCode MatView_MPIAIJ(Mat mat, PetscViewer viewer)
1383 {
1384   PetscBool iascii, isdraw, issocket, isbinary;
1385 
1386   PetscFunctionBegin;
1387   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii));
1388   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
1389   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
1390   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERSOCKET, &issocket));
1391   if (iascii || isdraw || isbinary || issocket) PetscCall(MatView_MPIAIJ_ASCIIorDraworSocket(mat, viewer));
1392   PetscFunctionReturn(PETSC_SUCCESS);
1393 }
1394 
1395 static PetscErrorCode MatSOR_MPIAIJ(Mat matin, Vec bb, PetscReal omega, MatSORType flag, PetscReal fshift, PetscInt its, PetscInt lits, Vec xx)
1396 {
1397   Mat_MPIAIJ *mat = (Mat_MPIAIJ *)matin->data;
1398   Vec         bb1 = NULL;
1399   PetscBool   hasop;
1400 
1401   PetscFunctionBegin;
1402   if (flag == SOR_APPLY_UPPER) {
1403     PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1404     PetscFunctionReturn(PETSC_SUCCESS);
1405   }
1406 
1407   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) PetscCall(VecDuplicate(bb, &bb1));
1408 
1409   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1410     if (flag & SOR_ZERO_INITIAL_GUESS) {
1411       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1412       its--;
1413     }
1414 
1415     while (its--) {
1416       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1417       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1418 
1419       /* update rhs: bb1 = bb - B*x */
1420       PetscCall(VecScale(mat->lvec, -1.0));
1421       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));
1422 
1423       /* local sweep */
1424       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_SYMMETRIC_SWEEP, fshift, lits, 1, xx));
1425     }
1426   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1427     if (flag & SOR_ZERO_INITIAL_GUESS) {
1428       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1429       its--;
1430     }
1431     while (its--) {
1432       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1433       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1434 
1435       /* update rhs: bb1 = bb - B*x */
1436       PetscCall(VecScale(mat->lvec, -1.0));
1437       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));
1438 
1439       /* local sweep */
1440       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_FORWARD_SWEEP, fshift, lits, 1, xx));
1441     }
1442   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1443     if (flag & SOR_ZERO_INITIAL_GUESS) {
1444       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1445       its--;
1446     }
1447     while (its--) {
1448       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1449       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1450 
1451       /* update rhs: bb1 = bb - B*x */
1452       PetscCall(VecScale(mat->lvec, -1.0));
1453       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));
1454 
1455       /* local sweep */
1456       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_BACKWARD_SWEEP, fshift, lits, 1, xx));
1457     }
1458   } else if (flag & SOR_EISENSTAT) {
1459     Vec xx1;
1460 
1461     PetscCall(VecDuplicate(bb, &xx1));
1462     PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, (MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP), fshift, lits, 1, xx));
1463 
1464     PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1465     PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1466     if (!mat->diag) {
1467       PetscCall(MatCreateVecs(matin, &mat->diag, NULL));
1468       PetscCall(MatGetDiagonal(matin, mat->diag));
1469     }
1470     PetscCall(MatHasOperation(matin, MATOP_MULT_DIAGONAL_BLOCK, &hasop));
1471     if (hasop) {
1472       PetscCall(MatMultDiagonalBlock(matin, xx, bb1));
1473     } else {
1474       PetscCall(VecPointwiseMult(bb1, mat->diag, xx));
1475     }
1476     PetscCall(VecAYPX(bb1, (omega - 2.0) / omega, bb));
1477 
1478     PetscCall(MatMultAdd(mat->B, mat->lvec, bb1, bb1));
1479 
1480     /* local sweep */
1481     PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, (MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP), fshift, lits, 1, xx1));
1482     PetscCall(VecAXPY(xx, 1.0, xx1));
1483     PetscCall(VecDestroy(&xx1));
1484   } else SETERRQ(PetscObjectComm((PetscObject)matin), PETSC_ERR_SUP, "Parallel SOR not supported");
1485 
1486   PetscCall(VecDestroy(&bb1));
1487 
1488   matin->factorerrortype = mat->A->factorerrortype;
1489   PetscFunctionReturn(PETSC_SUCCESS);
1490 }
1491 
1492 static PetscErrorCode MatPermute_MPIAIJ(Mat A, IS rowp, IS colp, Mat *B)
1493 {
1494   Mat             aA, aB, Aperm;
1495   const PetscInt *rwant, *cwant, *gcols, *ai, *bi, *aj, *bj;
1496   PetscScalar    *aa, *ba;
1497   PetscInt        i, j, m, n, ng, anz, bnz, *dnnz, *onnz, *tdnnz, *tonnz, *rdest, *cdest, *work, *gcdest;
1498   PetscSF         rowsf, sf;
1499   IS              parcolp = NULL;
1500   PetscBool       done;
1501 
1502   PetscFunctionBegin;
1503   PetscCall(MatGetLocalSize(A, &m, &n));
1504   PetscCall(ISGetIndices(rowp, &rwant));
1505   PetscCall(ISGetIndices(colp, &cwant));
1506   PetscCall(PetscMalloc3(PetscMax(m, n), &work, m, &rdest, n, &cdest));
1507 
1508   /* Invert row permutation to find out where my rows should go */
1509   PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &rowsf));
1510   PetscCall(PetscSFSetGraphLayout(rowsf, A->rmap, A->rmap->n, NULL, PETSC_OWN_POINTER, rwant));
1511   PetscCall(PetscSFSetFromOptions(rowsf));
1512   for (i = 0; i < m; i++) work[i] = A->rmap->rstart + i;
1513   PetscCall(PetscSFReduceBegin(rowsf, MPIU_INT, work, rdest, MPI_REPLACE));
1514   PetscCall(PetscSFReduceEnd(rowsf, MPIU_INT, work, rdest, MPI_REPLACE));
1515 
1516   /* Invert column permutation to find out where my columns should go */
1517   PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf));
1518   PetscCall(PetscSFSetGraphLayout(sf, A->cmap, A->cmap->n, NULL, PETSC_OWN_POINTER, cwant));
1519   PetscCall(PetscSFSetFromOptions(sf));
1520   for (i = 0; i < n; i++) work[i] = A->cmap->rstart + i;
1521   PetscCall(PetscSFReduceBegin(sf, MPIU_INT, work, cdest, MPI_REPLACE));
1522   PetscCall(PetscSFReduceEnd(sf, MPIU_INT, work, cdest, MPI_REPLACE));
1523   PetscCall(PetscSFDestroy(&sf));
1524 
1525   PetscCall(ISRestoreIndices(rowp, &rwant));
1526   PetscCall(ISRestoreIndices(colp, &cwant));
1527   PetscCall(MatMPIAIJGetSeqAIJ(A, &aA, &aB, &gcols));
1528 
1529   /* Find out where my gcols should go */
1530   PetscCall(MatGetSize(aB, NULL, &ng));
1531   PetscCall(PetscMalloc1(ng, &gcdest));
1532   PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf));
1533   PetscCall(PetscSFSetGraphLayout(sf, A->cmap, ng, NULL, PETSC_OWN_POINTER, gcols));
1534   PetscCall(PetscSFSetFromOptions(sf));
1535   PetscCall(PetscSFBcastBegin(sf, MPIU_INT, cdest, gcdest, MPI_REPLACE));
1536   PetscCall(PetscSFBcastEnd(sf, MPIU_INT, cdest, gcdest, MPI_REPLACE));
1537   PetscCall(PetscSFDestroy(&sf));
1538 
1539   PetscCall(PetscCalloc4(m, &dnnz, m, &onnz, m, &tdnnz, m, &tonnz));
1540   PetscCall(MatGetRowIJ(aA, 0, PETSC_FALSE, PETSC_FALSE, &anz, &ai, &aj, &done));
1541   PetscCall(MatGetRowIJ(aB, 0, PETSC_FALSE, PETSC_FALSE, &bnz, &bi, &bj, &done));
1542   for (i = 0; i < m; i++) {
1543     PetscInt    row = rdest[i];
1544     PetscMPIInt rowner;
1545     PetscCall(PetscLayoutFindOwner(A->rmap, row, &rowner));
1546     for (j = ai[i]; j < ai[i + 1]; j++) {
1547       PetscInt    col = cdest[aj[j]];
1548       PetscMPIInt cowner;
1549       PetscCall(PetscLayoutFindOwner(A->cmap, col, &cowner)); /* Could build an index for the columns to eliminate this search */
1550       if (rowner == cowner) dnnz[i]++;
1551       else onnz[i]++;
1552     }
1553     for (j = bi[i]; j < bi[i + 1]; j++) {
1554       PetscInt    col = gcdest[bj[j]];
1555       PetscMPIInt cowner;
1556       PetscCall(PetscLayoutFindOwner(A->cmap, col, &cowner));
1557       if (rowner == cowner) dnnz[i]++;
1558       else onnz[i]++;
1559     }
1560   }
1561   PetscCall(PetscSFBcastBegin(rowsf, MPIU_INT, dnnz, tdnnz, MPI_REPLACE));
1562   PetscCall(PetscSFBcastEnd(rowsf, MPIU_INT, dnnz, tdnnz, MPI_REPLACE));
1563   PetscCall(PetscSFBcastBegin(rowsf, MPIU_INT, onnz, tonnz, MPI_REPLACE));
1564   PetscCall(PetscSFBcastEnd(rowsf, MPIU_INT, onnz, tonnz, MPI_REPLACE));
1565   PetscCall(PetscSFDestroy(&rowsf));
1566 
1567   PetscCall(MatCreateAIJ(PetscObjectComm((PetscObject)A), A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N, 0, tdnnz, 0, tonnz, &Aperm));
1568   PetscCall(MatSeqAIJGetArray(aA, &aa));
1569   PetscCall(MatSeqAIJGetArray(aB, &ba));
1570   for (i = 0; i < m; i++) {
1571     PetscInt *acols = dnnz, *bcols = onnz; /* Repurpose now-unneeded arrays */
1572     PetscInt  j0, rowlen;
1573     rowlen = ai[i + 1] - ai[i];
1574     for (j0 = j = 0; j < rowlen; j0 = j) { /* rowlen could be larger than number of rows m, so sum in batches */
1575       for (; j < PetscMin(rowlen, j0 + m); j++) acols[j - j0] = cdest[aj[ai[i] + j]];
1576       PetscCall(MatSetValues(Aperm, 1, &rdest[i], j - j0, acols, aa + ai[i] + j0, INSERT_VALUES));
1577     }
1578     rowlen = bi[i + 1] - bi[i];
1579     for (j0 = j = 0; j < rowlen; j0 = j) {
1580       for (; j < PetscMin(rowlen, j0 + m); j++) bcols[j - j0] = gcdest[bj[bi[i] + j]];
1581       PetscCall(MatSetValues(Aperm, 1, &rdest[i], j - j0, bcols, ba + bi[i] + j0, INSERT_VALUES));
1582     }
1583   }
1584   PetscCall(MatAssemblyBegin(Aperm, MAT_FINAL_ASSEMBLY));
1585   PetscCall(MatAssemblyEnd(Aperm, MAT_FINAL_ASSEMBLY));
1586   PetscCall(MatRestoreRowIJ(aA, 0, PETSC_FALSE, PETSC_FALSE, &anz, &ai, &aj, &done));
1587   PetscCall(MatRestoreRowIJ(aB, 0, PETSC_FALSE, PETSC_FALSE, &bnz, &bi, &bj, &done));
1588   PetscCall(MatSeqAIJRestoreArray(aA, &aa));
1589   PetscCall(MatSeqAIJRestoreArray(aB, &ba));
1590   PetscCall(PetscFree4(dnnz, onnz, tdnnz, tonnz));
1591   PetscCall(PetscFree3(work, rdest, cdest));
1592   PetscCall(PetscFree(gcdest));
1593   if (parcolp) PetscCall(ISDestroy(&colp));
1594   *B = Aperm;
1595   PetscFunctionReturn(PETSC_SUCCESS);
1596 }
1597 
1598 static PetscErrorCode MatGetGhosts_MPIAIJ(Mat mat, PetscInt *nghosts, const PetscInt *ghosts[])
1599 {
1600   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1601 
1602   PetscFunctionBegin;
1603   PetscCall(MatGetSize(aij->B, NULL, nghosts));
1604   if (ghosts) *ghosts = aij->garray;
1605   PetscFunctionReturn(PETSC_SUCCESS);
1606 }
1607 
1608 static PetscErrorCode MatGetInfo_MPIAIJ(Mat matin, MatInfoType flag, MatInfo *info)
1609 {
1610   Mat_MPIAIJ    *mat = (Mat_MPIAIJ *)matin->data;
1611   Mat            A = mat->A, B = mat->B;
1612   PetscLogDouble isend[5], irecv[5];
1613 
1614   PetscFunctionBegin;
1615   info->block_size = 1.0;
1616   PetscCall(MatGetInfo(A, MAT_LOCAL, info));
1617 
1618   isend[0] = info->nz_used;
1619   isend[1] = info->nz_allocated;
1620   isend[2] = info->nz_unneeded;
1621   isend[3] = info->memory;
1622   isend[4] = info->mallocs;
1623 
1624   PetscCall(MatGetInfo(B, MAT_LOCAL, info));
1625 
1626   isend[0] += info->nz_used;
1627   isend[1] += info->nz_allocated;
1628   isend[2] += info->nz_unneeded;
1629   isend[3] += info->memory;
1630   isend[4] += info->mallocs;
1631   if (flag == MAT_LOCAL) {
1632     info->nz_used      = isend[0];
1633     info->nz_allocated = isend[1];
1634     info->nz_unneeded  = isend[2];
1635     info->memory       = isend[3];
1636     info->mallocs      = isend[4];
1637   } else if (flag == MAT_GLOBAL_MAX) {
1638     PetscCall(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_MAX, PetscObjectComm((PetscObject)matin)));
1639 
1640     info->nz_used      = irecv[0];
1641     info->nz_allocated = irecv[1];
1642     info->nz_unneeded  = irecv[2];
1643     info->memory       = irecv[3];
1644     info->mallocs      = irecv[4];
1645   } else if (flag == MAT_GLOBAL_SUM) {
1646     PetscCall(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_SUM, PetscObjectComm((PetscObject)matin)));
1647 
1648     info->nz_used      = irecv[0];
1649     info->nz_allocated = irecv[1];
1650     info->nz_unneeded  = irecv[2];
1651     info->memory       = irecv[3];
1652     info->mallocs      = irecv[4];
1653   }
1654   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1655   info->fill_ratio_needed = 0;
1656   info->factor_mallocs    = 0;
1657   PetscFunctionReturn(PETSC_SUCCESS);
1658 }
1659 
1660 PetscErrorCode MatSetOption_MPIAIJ(Mat A, MatOption op, PetscBool flg)
1661 {
1662   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
1663 
1664   PetscFunctionBegin;
1665   switch (op) {
1666   case MAT_NEW_NONZERO_LOCATIONS:
1667   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1668   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1669   case MAT_KEEP_NONZERO_PATTERN:
1670   case MAT_NEW_NONZERO_LOCATION_ERR:
1671   case MAT_USE_INODES:
1672   case MAT_IGNORE_ZERO_ENTRIES:
1673   case MAT_FORM_EXPLICIT_TRANSPOSE:
1674     MatCheckPreallocated(A, 1);
1675     PetscCall(MatSetOption(a->A, op, flg));
1676     PetscCall(MatSetOption(a->B, op, flg));
1677     break;
1678   case MAT_ROW_ORIENTED:
1679     MatCheckPreallocated(A, 1);
1680     a->roworiented = flg;
1681 
1682     PetscCall(MatSetOption(a->A, op, flg));
1683     PetscCall(MatSetOption(a->B, op, flg));
1684     break;
1685   case MAT_FORCE_DIAGONAL_ENTRIES:
1686   case MAT_SORTED_FULL:
1687     PetscCall(PetscInfo(A, "Option %s ignored\n", MatOptions[op]));
1688     break;
1689   case MAT_IGNORE_OFF_PROC_ENTRIES:
1690     a->donotstash = flg;
1691     break;
1692   /* Symmetry flags are handled directly by MatSetOption() and they don't affect preallocation */
1693   case MAT_SPD:
1694   case MAT_SYMMETRIC:
1695   case MAT_STRUCTURALLY_SYMMETRIC:
1696   case MAT_HERMITIAN:
1697   case MAT_SYMMETRY_ETERNAL:
1698   case MAT_STRUCTURAL_SYMMETRY_ETERNAL:
1699   case MAT_SPD_ETERNAL:
1700     /* if the diagonal matrix is square it inherits some of the properties above */
1701     break;
1702   case MAT_SUBMAT_SINGLEIS:
1703     A->submat_singleis = flg;
1704     break;
1705   case MAT_STRUCTURE_ONLY:
1706     /* The option is handled directly by MatSetOption() */
1707     break;
1708   default:
1709     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unknown option %d", op);
1710   }
1711   PetscFunctionReturn(PETSC_SUCCESS);
1712 }
1713 
1714 PetscErrorCode MatGetRow_MPIAIJ(Mat matin, PetscInt row, PetscInt *nz, PetscInt **idx, PetscScalar **v)
1715 {
1716   Mat_MPIAIJ  *mat = (Mat_MPIAIJ *)matin->data;
1717   PetscScalar *vworkA, *vworkB, **pvA, **pvB, *v_p;
1718   PetscInt     i, *cworkA, *cworkB, **pcA, **pcB, cstart = matin->cmap->rstart;
1719   PetscInt     nztot, nzA, nzB, lrow, rstart = matin->rmap->rstart, rend = matin->rmap->rend;
1720   PetscInt    *cmap, *idx_p;
1721 
1722   PetscFunctionBegin;
1723   PetscCheck(!mat->getrowactive, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Already active");
1724   mat->getrowactive = PETSC_TRUE;
1725 
1726   if (!mat->rowvalues && (idx || v)) {
1727     /*
1728         allocate enough space to hold information from the longest row.
1729     */
1730     Mat_SeqAIJ *Aa = (Mat_SeqAIJ *)mat->A->data, *Ba = (Mat_SeqAIJ *)mat->B->data;
1731     PetscInt    max = 1, tmp;
1732     for (i = 0; i < matin->rmap->n; i++) {
1733       tmp = Aa->i[i + 1] - Aa->i[i] + Ba->i[i + 1] - Ba->i[i];
1734       if (max < tmp) max = tmp;
1735     }
1736     PetscCall(PetscMalloc2(max, &mat->rowvalues, max, &mat->rowindices));
1737   }
1738 
1739   PetscCheck(row >= rstart && row < rend, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Only local rows");
1740   lrow = row - rstart;
1741 
1742   pvA = &vworkA;
1743   pcA = &cworkA;
1744   pvB = &vworkB;
1745   pcB = &cworkB;
1746   if (!v) {
1747     pvA = NULL;
1748     pvB = NULL;
1749   }
1750   if (!idx) {
1751     pcA = NULL;
1752     if (!v) pcB = NULL;
1753   }
1754   PetscCall((*mat->A->ops->getrow)(mat->A, lrow, &nzA, pcA, pvA));
1755   PetscCall((*mat->B->ops->getrow)(mat->B, lrow, &nzB, pcB, pvB));
1756   nztot = nzA + nzB;
1757 
1758   cmap = mat->garray;
1759   if (v || idx) {
1760     if (nztot) {
1761       /* Sort by increasing column numbers, assuming A and B already sorted */
1762       PetscInt imark = -1;
1763       if (v) {
1764         *v = v_p = mat->rowvalues;
1765         for (i = 0; i < nzB; i++) {
1766           if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1767           else break;
1768         }
1769         imark = i;
1770         for (i = 0; i < nzA; i++) v_p[imark + i] = vworkA[i];
1771         for (i = imark; i < nzB; i++) v_p[nzA + i] = vworkB[i];
1772       }
1773       if (idx) {
1774         *idx = idx_p = mat->rowindices;
1775         if (imark > -1) {
1776           for (i = 0; i < imark; i++) idx_p[i] = cmap[cworkB[i]];
1777         } else {
1778           for (i = 0; i < nzB; i++) {
1779             if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1780             else break;
1781           }
1782           imark = i;
1783         }
1784         for (i = 0; i < nzA; i++) idx_p[imark + i] = cstart + cworkA[i];
1785         for (i = imark; i < nzB; i++) idx_p[nzA + i] = cmap[cworkB[i]];
1786       }
1787     } else {
1788       if (idx) *idx = NULL;
1789       if (v) *v = NULL;
1790     }
1791   }
1792   *nz = nztot;
1793   PetscCall((*mat->A->ops->restorerow)(mat->A, lrow, &nzA, pcA, pvA));
1794   PetscCall((*mat->B->ops->restorerow)(mat->B, lrow, &nzB, pcB, pvB));
1795   PetscFunctionReturn(PETSC_SUCCESS);
1796 }
1797 
1798 PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat, PetscInt row, PetscInt *nz, PetscInt **idx, PetscScalar **v)
1799 {
1800   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1801 
1802   PetscFunctionBegin;
1803   PetscCheck(aij->getrowactive, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "MatGetRow() must be called first");
1804   aij->getrowactive = PETSC_FALSE;
1805   PetscFunctionReturn(PETSC_SUCCESS);
1806 }
1807 
1808 static PetscErrorCode MatNorm_MPIAIJ(Mat mat, NormType type, PetscReal *norm)
1809 {
1810   Mat_MPIAIJ      *aij  = (Mat_MPIAIJ *)mat->data;
1811   Mat_SeqAIJ      *amat = (Mat_SeqAIJ *)aij->A->data, *bmat = (Mat_SeqAIJ *)aij->B->data;
1812   PetscInt         i, j, cstart = mat->cmap->rstart;
1813   PetscReal        sum = 0.0;
1814   const MatScalar *v, *amata, *bmata;
1815 
1816   PetscFunctionBegin;
1817   if (aij->size == 1) {
1818     PetscCall(MatNorm(aij->A, type, norm));
1819   } else {
1820     PetscCall(MatSeqAIJGetArrayRead(aij->A, &amata));
1821     PetscCall(MatSeqAIJGetArrayRead(aij->B, &bmata));
1822     if (type == NORM_FROBENIUS) {
1823       v = amata;
1824       for (i = 0; i < amat->nz; i++) {
1825         sum += PetscRealPart(PetscConj(*v) * (*v));
1826         v++;
1827       }
1828       v = bmata;
1829       for (i = 0; i < bmat->nz; i++) {
1830         sum += PetscRealPart(PetscConj(*v) * (*v));
1831         v++;
1832       }
1833       PetscCall(MPIU_Allreduce(&sum, norm, 1, MPIU_REAL, MPIU_SUM, PetscObjectComm((PetscObject)mat)));
1834       *norm = PetscSqrtReal(*norm);
1835       PetscCall(PetscLogFlops(2.0 * amat->nz + 2.0 * bmat->nz));
1836     } else if (type == NORM_1) { /* max column norm */
1837       PetscReal *tmp, *tmp2;
1838       PetscInt  *jj, *garray = aij->garray;
1839       PetscCall(PetscCalloc1(mat->cmap->N + 1, &tmp));
1840       PetscCall(PetscMalloc1(mat->cmap->N + 1, &tmp2));
1841       *norm = 0.0;
1842       v     = amata;
1843       jj    = amat->j;
1844       for (j = 0; j < amat->nz; j++) {
1845         tmp[cstart + *jj++] += PetscAbsScalar(*v);
1846         v++;
1847       }
1848       v  = bmata;
1849       jj = bmat->j;
1850       for (j = 0; j < bmat->nz; j++) {
1851         tmp[garray[*jj++]] += PetscAbsScalar(*v);
1852         v++;
1853       }
1854       PetscCall(MPIU_Allreduce(tmp, tmp2, mat->cmap->N, MPIU_REAL, MPIU_SUM, PetscObjectComm((PetscObject)mat)));
1855       for (j = 0; j < mat->cmap->N; j++) {
1856         if (tmp2[j] > *norm) *norm = tmp2[j];
1857       }
1858       PetscCall(PetscFree(tmp));
1859       PetscCall(PetscFree(tmp2));
1860       PetscCall(PetscLogFlops(PetscMax(amat->nz + bmat->nz - 1, 0)));
1861     } else if (type == NORM_INFINITY) { /* max row norm */
1862       PetscReal ntemp = 0.0;
1863       for (j = 0; j < aij->A->rmap->n; j++) {
1864         v   = amata + amat->i[j];
1865         sum = 0.0;
1866         for (i = 0; i < amat->i[j + 1] - amat->i[j]; i++) {
1867           sum += PetscAbsScalar(*v);
1868           v++;
1869         }
1870         v = bmata + bmat->i[j];
1871         for (i = 0; i < bmat->i[j + 1] - bmat->i[j]; i++) {
1872           sum += PetscAbsScalar(*v);
1873           v++;
1874         }
1875         if (sum > ntemp) ntemp = sum;
1876       }
1877       PetscCall(MPIU_Allreduce(&ntemp, norm, 1, MPIU_REAL, MPIU_MAX, PetscObjectComm((PetscObject)mat)));
1878       PetscCall(PetscLogFlops(PetscMax(amat->nz + bmat->nz - 1, 0)));
1879     } else SETERRQ(PetscObjectComm((PetscObject)mat), PETSC_ERR_SUP, "No support for two norm");
1880     PetscCall(MatSeqAIJRestoreArrayRead(aij->A, &amata));
1881     PetscCall(MatSeqAIJRestoreArrayRead(aij->B, &bmata));
1882   }
1883   PetscFunctionReturn(PETSC_SUCCESS);
1884 }
1885 
1886 static PetscErrorCode MatTranspose_MPIAIJ(Mat A, MatReuse reuse, Mat *matout)
1887 {
1888   Mat_MPIAIJ      *a    = (Mat_MPIAIJ *)A->data, *b;
1889   Mat_SeqAIJ      *Aloc = (Mat_SeqAIJ *)a->A->data, *Bloc = (Mat_SeqAIJ *)a->B->data, *sub_B_diag;
1890   PetscInt         M = A->rmap->N, N = A->cmap->N, ma, na, mb, nb, row, *cols, *cols_tmp, *B_diag_ilen, i, ncol, A_diag_ncol;
1891   const PetscInt  *ai, *aj, *bi, *bj, *B_diag_i;
1892   Mat              B, A_diag, *B_diag;
1893   const MatScalar *pbv, *bv;
1894 
1895   PetscFunctionBegin;
1896   if (reuse == MAT_REUSE_MATRIX) PetscCall(MatTransposeCheckNonzeroState_Private(A, *matout));
1897   ma = A->rmap->n;
1898   na = A->cmap->n;
1899   mb = a->B->rmap->n;
1900   nb = a->B->cmap->n;
1901   ai = Aloc->i;
1902   aj = Aloc->j;
1903   bi = Bloc->i;
1904   bj = Bloc->j;
1905   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1906     PetscInt            *d_nnz, *g_nnz, *o_nnz;
1907     PetscSFNode         *oloc;
1908     PETSC_UNUSED PetscSF sf;
1909 
1910     PetscCall(PetscMalloc4(na, &d_nnz, na, &o_nnz, nb, &g_nnz, nb, &oloc));
1911     /* compute d_nnz for preallocation */
1912     PetscCall(PetscArrayzero(d_nnz, na));
1913     for (i = 0; i < ai[ma]; i++) d_nnz[aj[i]]++;
1914     /* compute local off-diagonal contributions */
1915     PetscCall(PetscArrayzero(g_nnz, nb));
1916     for (i = 0; i < bi[ma]; i++) g_nnz[bj[i]]++;
1917     /* map those to global */
1918     PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)A), &sf));
1919     PetscCall(PetscSFSetGraphLayout(sf, A->cmap, nb, NULL, PETSC_USE_POINTER, a->garray));
1920     PetscCall(PetscSFSetFromOptions(sf));
1921     PetscCall(PetscArrayzero(o_nnz, na));
1922     PetscCall(PetscSFReduceBegin(sf, MPIU_INT, g_nnz, o_nnz, MPI_SUM));
1923     PetscCall(PetscSFReduceEnd(sf, MPIU_INT, g_nnz, o_nnz, MPI_SUM));
1924     PetscCall(PetscSFDestroy(&sf));
1925 
1926     PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1927     PetscCall(MatSetSizes(B, A->cmap->n, A->rmap->n, N, M));
1928     PetscCall(MatSetBlockSizes(B, PetscAbs(A->cmap->bs), PetscAbs(A->rmap->bs)));
1929     PetscCall(MatSetType(B, ((PetscObject)A)->type_name));
1930     PetscCall(MatMPIAIJSetPreallocation(B, 0, d_nnz, 0, o_nnz));
1931     PetscCall(PetscFree4(d_nnz, o_nnz, g_nnz, oloc));
1932   } else {
1933     B = *matout;
1934     PetscCall(MatSetOption(B, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE));
1935   }
1936 
1937   b           = (Mat_MPIAIJ *)B->data;
1938   A_diag      = a->A;
1939   B_diag      = &b->A;
1940   sub_B_diag  = (Mat_SeqAIJ *)(*B_diag)->data;
1941   A_diag_ncol = A_diag->cmap->N;
1942   B_diag_ilen = sub_B_diag->ilen;
1943   B_diag_i    = sub_B_diag->i;
1944 
1945   /* Set ilen for diagonal of B */
1946   for (i = 0; i < A_diag_ncol; i++) B_diag_ilen[i] = B_diag_i[i + 1] - B_diag_i[i];
1947 
1948   /* Transpose the diagonal part of the matrix. In contrast to the off-diagonal part, this can be done
1949   very quickly (=without using MatSetValues), because all writes are local. */
1950   PetscCall(MatTransposeSetPrecursor(A_diag, *B_diag));
1951   PetscCall(MatTranspose(A_diag, MAT_REUSE_MATRIX, B_diag));
1952 
1953   /* copy over the B part */
1954   PetscCall(PetscMalloc1(bi[mb], &cols));
1955   PetscCall(MatSeqAIJGetArrayRead(a->B, &bv));
1956   pbv = bv;
1957   row = A->rmap->rstart;
1958   for (i = 0; i < bi[mb]; i++) cols[i] = a->garray[bj[i]];
1959   cols_tmp = cols;
1960   for (i = 0; i < mb; i++) {
1961     ncol = bi[i + 1] - bi[i];
1962     PetscCall(MatSetValues(B, ncol, cols_tmp, 1, &row, pbv, INSERT_VALUES));
1963     row++;
1964     if (pbv) pbv += ncol;
1965     if (cols_tmp) cols_tmp += ncol;
1966   }
1967   PetscCall(PetscFree(cols));
1968   PetscCall(MatSeqAIJRestoreArrayRead(a->B, &bv));
1969 
1970   PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1971   PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1972   if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
1973     *matout = B;
1974   } else {
1975     PetscCall(MatHeaderMerge(A, &B));
1976   }
1977   PetscFunctionReturn(PETSC_SUCCESS);
1978 }
1979 
1980 static PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat, Vec ll, Vec rr)
1981 {
1982   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
1983   Mat         a = aij->A, b = aij->B;
1984   PetscInt    s1, s2, s3;
1985 
1986   PetscFunctionBegin;
1987   PetscCall(MatGetLocalSize(mat, &s2, &s3));
1988   if (rr) {
1989     PetscCall(VecGetLocalSize(rr, &s1));
1990     PetscCheck(s1 == s3, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "right vector non-conforming local size");
1991     /* Overlap communication with computation. */
1992     PetscCall(VecScatterBegin(aij->Mvctx, rr, aij->lvec, INSERT_VALUES, SCATTER_FORWARD));
1993   }
1994   if (ll) {
1995     PetscCall(VecGetLocalSize(ll, &s1));
1996     PetscCheck(s1 == s2, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "left vector non-conforming local size");
1997     PetscUseTypeMethod(b, diagonalscale, ll, NULL);
1998   }
1999   /* scale  the diagonal block */
2000   PetscUseTypeMethod(a, diagonalscale, ll, rr);
2001 
2002   if (rr) {
2003     /* Do a scatter end and then right scale the off-diagonal block */
2004     PetscCall(VecScatterEnd(aij->Mvctx, rr, aij->lvec, INSERT_VALUES, SCATTER_FORWARD));
2005     PetscUseTypeMethod(b, diagonalscale, NULL, aij->lvec);
2006   }
2007   PetscFunctionReturn(PETSC_SUCCESS);
2008 }
2009 
2010 static PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2011 {
2012   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2013 
2014   PetscFunctionBegin;
2015   PetscCall(MatSetUnfactored(a->A));
2016   PetscFunctionReturn(PETSC_SUCCESS);
2017 }
2018 
2019 static PetscErrorCode MatEqual_MPIAIJ(Mat A, Mat B, PetscBool *flag)
2020 {
2021   Mat_MPIAIJ *matB = (Mat_MPIAIJ *)B->data, *matA = (Mat_MPIAIJ *)A->data;
2022   Mat         a, b, c, d;
2023   PetscBool   flg;
2024 
2025   PetscFunctionBegin;
2026   a = matA->A;
2027   b = matA->B;
2028   c = matB->A;
2029   d = matB->B;
2030 
2031   PetscCall(MatEqual(a, c, &flg));
2032   if (flg) PetscCall(MatEqual(b, d, &flg));
2033   PetscCall(MPIU_Allreduce(&flg, flag, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)A)));
2034   PetscFunctionReturn(PETSC_SUCCESS);
2035 }
2036 
2037 static PetscErrorCode MatCopy_MPIAIJ(Mat A, Mat B, MatStructure str)
2038 {
2039   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2040   Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
2041 
2042   PetscFunctionBegin;
2043   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2044   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2045     /* because of the column compression in the off-processor part of the matrix a->B,
2046        the number of columns in a->B and b->B may be different, hence we cannot call
2047        the MatCopy() directly on the two parts. If need be, we can provide a more
2048        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2049        then copying the submatrices */
2050     PetscCall(MatCopy_Basic(A, B, str));
2051   } else {
2052     PetscCall(MatCopy(a->A, b->A, str));
2053     PetscCall(MatCopy(a->B, b->B, str));
2054   }
2055   PetscCall(PetscObjectStateIncrease((PetscObject)B));
2056   PetscFunctionReturn(PETSC_SUCCESS);
2057 }
2058 
2059 /*
2060    Computes the number of nonzeros per row needed for preallocation when X and Y
2061    have different nonzero structure.
2062 */
2063 PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m, const PetscInt *xi, const PetscInt *xj, const PetscInt *xltog, const PetscInt *yi, const PetscInt *yj, const PetscInt *yltog, PetscInt *nnz)
2064 {
2065   PetscInt i, j, k, nzx, nzy;
2066 
2067   PetscFunctionBegin;
2068   /* Set the number of nonzeros in the new matrix */
2069   for (i = 0; i < m; i++) {
2070     const PetscInt *xjj = xj + xi[i], *yjj = yj + yi[i];
2071     nzx    = xi[i + 1] - xi[i];
2072     nzy    = yi[i + 1] - yi[i];
2073     nnz[i] = 0;
2074     for (j = 0, k = 0; j < nzx; j++) {                                /* Point in X */
2075       for (; k < nzy && yltog[yjj[k]] < xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2076       if (k < nzy && yltog[yjj[k]] == xltog[xjj[j]]) k++;             /* Skip duplicate */
2077       nnz[i]++;
2078     }
2079     for (; k < nzy; k++) nnz[i]++;
2080   }
2081   PetscFunctionReturn(PETSC_SUCCESS);
2082 }
2083 
2084 /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2085 static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y, const PetscInt *yltog, Mat X, const PetscInt *xltog, PetscInt *nnz)
2086 {
2087   PetscInt    m = Y->rmap->N;
2088   Mat_SeqAIJ *x = (Mat_SeqAIJ *)X->data;
2089   Mat_SeqAIJ *y = (Mat_SeqAIJ *)Y->data;
2090 
2091   PetscFunctionBegin;
2092   PetscCall(MatAXPYGetPreallocation_MPIX_private(m, x->i, x->j, xltog, y->i, y->j, yltog, nnz));
2093   PetscFunctionReturn(PETSC_SUCCESS);
2094 }
2095 
2096 static PetscErrorCode MatAXPY_MPIAIJ(Mat Y, PetscScalar a, Mat X, MatStructure str)
2097 {
2098   Mat_MPIAIJ *xx = (Mat_MPIAIJ *)X->data, *yy = (Mat_MPIAIJ *)Y->data;
2099 
2100   PetscFunctionBegin;
2101   if (str == SAME_NONZERO_PATTERN) {
2102     PetscCall(MatAXPY(yy->A, a, xx->A, str));
2103     PetscCall(MatAXPY(yy->B, a, xx->B, str));
2104   } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2105     PetscCall(MatAXPY_Basic(Y, a, X, str));
2106   } else {
2107     Mat       B;
2108     PetscInt *nnz_d, *nnz_o;
2109 
2110     PetscCall(PetscMalloc1(yy->A->rmap->N, &nnz_d));
2111     PetscCall(PetscMalloc1(yy->B->rmap->N, &nnz_o));
2112     PetscCall(MatCreate(PetscObjectComm((PetscObject)Y), &B));
2113     PetscCall(PetscObjectSetName((PetscObject)B, ((PetscObject)Y)->name));
2114     PetscCall(MatSetLayouts(B, Y->rmap, Y->cmap));
2115     PetscCall(MatSetType(B, ((PetscObject)Y)->type_name));
2116     PetscCall(MatAXPYGetPreallocation_SeqAIJ(yy->A, xx->A, nnz_d));
2117     PetscCall(MatAXPYGetPreallocation_MPIAIJ(yy->B, yy->garray, xx->B, xx->garray, nnz_o));
2118     PetscCall(MatMPIAIJSetPreallocation(B, 0, nnz_d, 0, nnz_o));
2119     PetscCall(MatAXPY_BasicWithPreallocation(B, Y, a, X, str));
2120     PetscCall(MatHeaderMerge(Y, &B));
2121     PetscCall(PetscFree(nnz_d));
2122     PetscCall(PetscFree(nnz_o));
2123   }
2124   PetscFunctionReturn(PETSC_SUCCESS);
2125 }
2126 
2127 PETSC_INTERN PetscErrorCode MatConjugate_SeqAIJ(Mat);
2128 
2129 static PetscErrorCode MatConjugate_MPIAIJ(Mat mat)
2130 {
2131   PetscFunctionBegin;
2132   if (PetscDefined(USE_COMPLEX)) {
2133     Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
2134 
2135     PetscCall(MatConjugate_SeqAIJ(aij->A));
2136     PetscCall(MatConjugate_SeqAIJ(aij->B));
2137   }
2138   PetscFunctionReturn(PETSC_SUCCESS);
2139 }
2140 
2141 static PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2142 {
2143   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2144 
2145   PetscFunctionBegin;
2146   PetscCall(MatRealPart(a->A));
2147   PetscCall(MatRealPart(a->B));
2148   PetscFunctionReturn(PETSC_SUCCESS);
2149 }
2150 
2151 static PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2152 {
2153   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2154 
2155   PetscFunctionBegin;
2156   PetscCall(MatImaginaryPart(a->A));
2157   PetscCall(MatImaginaryPart(a->B));
2158   PetscFunctionReturn(PETSC_SUCCESS);
2159 }
2160 
2161 static PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2162 {
2163   Mat_MPIAIJ        *a = (Mat_MPIAIJ *)A->data;
2164   PetscInt           i, *idxb = NULL, m = A->rmap->n;
2165   PetscScalar       *va, *vv;
2166   Vec                vB, vA;
2167   const PetscScalar *vb;
2168 
2169   PetscFunctionBegin;
2170   PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &vA));
2171   PetscCall(MatGetRowMaxAbs(a->A, vA, idx));
2172 
2173   PetscCall(VecGetArrayWrite(vA, &va));
2174   if (idx) {
2175     for (i = 0; i < m; i++) {
2176       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2177     }
2178   }
2179 
2180   PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &vB));
2181   PetscCall(PetscMalloc1(m, &idxb));
2182   PetscCall(MatGetRowMaxAbs(a->B, vB, idxb));
2183 
2184   PetscCall(VecGetArrayWrite(v, &vv));
2185   PetscCall(VecGetArrayRead(vB, &vb));
2186   for (i = 0; i < m; i++) {
2187     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2188       vv[i] = vb[i];
2189       if (idx) idx[i] = a->garray[idxb[i]];
2190     } else {
2191       vv[i] = va[i];
2192       if (idx && PetscAbsScalar(va[i]) == PetscAbsScalar(vb[i]) && idxb[i] != -1 && idx[i] > a->garray[idxb[i]]) idx[i] = a->garray[idxb[i]];
2193     }
2194   }
2195   PetscCall(VecRestoreArrayWrite(vA, &vv));
2196   PetscCall(VecRestoreArrayWrite(vA, &va));
2197   PetscCall(VecRestoreArrayRead(vB, &vb));
2198   PetscCall(PetscFree(idxb));
2199   PetscCall(VecDestroy(&vA));
2200   PetscCall(VecDestroy(&vB));
2201   PetscFunctionReturn(PETSC_SUCCESS);
2202 }
2203 
2204 static PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2205 {
2206   Mat_MPIAIJ        *mat = (Mat_MPIAIJ *)A->data;
2207   PetscInt           m = A->rmap->n, n = A->cmap->n;
2208   PetscInt           cstart = A->cmap->rstart, cend = A->cmap->rend;
2209   PetscInt          *cmap = mat->garray;
2210   PetscInt          *diagIdx, *offdiagIdx;
2211   Vec                diagV, offdiagV;
2212   PetscScalar       *a, *diagA, *offdiagA;
2213   const PetscScalar *ba, *bav;
2214   PetscInt           r, j, col, ncols, *bi, *bj;
2215   Mat                B = mat->B;
2216   Mat_SeqAIJ        *b = (Mat_SeqAIJ *)B->data;
2217 
2218   PetscFunctionBegin;
2219   /* When a process holds entire A and other processes have no entry */
2220   if (A->cmap->N == n) {
2221     PetscCall(VecGetArrayWrite(v, &diagA));
2222     PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m, diagA, &diagV));
2223     PetscCall(MatGetRowMinAbs(mat->A, diagV, idx));
2224     PetscCall(VecDestroy(&diagV));
2225     PetscCall(VecRestoreArrayWrite(v, &diagA));
2226     PetscFunctionReturn(PETSC_SUCCESS);
2227   } else if (n == 0) {
2228     if (m) {
2229       PetscCall(VecGetArrayWrite(v, &a));
2230       for (r = 0; r < m; r++) {
2231         a[r] = 0.0;
2232         if (idx) idx[r] = -1;
2233       }
2234       PetscCall(VecRestoreArrayWrite(v, &a));
2235     }
2236     PetscFunctionReturn(PETSC_SUCCESS);
2237   }
2238 
2239   PetscCall(PetscMalloc2(m, &diagIdx, m, &offdiagIdx));
2240   PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &diagV));
2241   PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV));
2242   PetscCall(MatGetRowMinAbs(mat->A, diagV, diagIdx));
2243 
2244   /* Get offdiagIdx[] for implicit 0.0 */
2245   PetscCall(MatSeqAIJGetArrayRead(B, &bav));
2246   ba = bav;
2247   bi = b->i;
2248   bj = b->j;
2249   PetscCall(VecGetArrayWrite(offdiagV, &offdiagA));
2250   for (r = 0; r < m; r++) {
2251     ncols = bi[r + 1] - bi[r];
2252     if (ncols == A->cmap->N - n) { /* Brow is dense */
2253       offdiagA[r]   = *ba;
2254       offdiagIdx[r] = cmap[0];
2255     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2256       offdiagA[r] = 0.0;
2257 
2258       /* Find first hole in the cmap */
2259       for (j = 0; j < ncols; j++) {
2260         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2261         if (col > j && j < cstart) {
2262           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2263           break;
2264         } else if (col > j + n && j >= cstart) {
2265           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2266           break;
2267         }
2268       }
2269       if (j == ncols && ncols < A->cmap->N - n) {
2270         /* a hole is outside compressed Bcols */
2271         if (ncols == 0) {
2272           if (cstart) {
2273             offdiagIdx[r] = 0;
2274           } else offdiagIdx[r] = cend;
2275         } else { /* ncols > 0 */
2276           offdiagIdx[r] = cmap[ncols - 1] + 1;
2277           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2278         }
2279       }
2280     }
2281 
2282     for (j = 0; j < ncols; j++) {
2283       if (PetscAbsScalar(offdiagA[r]) > PetscAbsScalar(*ba)) {
2284         offdiagA[r]   = *ba;
2285         offdiagIdx[r] = cmap[*bj];
2286       }
2287       ba++;
2288       bj++;
2289     }
2290   }
2291 
2292   PetscCall(VecGetArrayWrite(v, &a));
2293   PetscCall(VecGetArrayRead(diagV, (const PetscScalar **)&diagA));
2294   for (r = 0; r < m; ++r) {
2295     if (PetscAbsScalar(diagA[r]) < PetscAbsScalar(offdiagA[r])) {
2296       a[r] = diagA[r];
2297       if (idx) idx[r] = cstart + diagIdx[r];
2298     } else if (PetscAbsScalar(diagA[r]) == PetscAbsScalar(offdiagA[r])) {
2299       a[r] = diagA[r];
2300       if (idx) {
2301         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2302           idx[r] = cstart + diagIdx[r];
2303         } else idx[r] = offdiagIdx[r];
2304       }
2305     } else {
2306       a[r] = offdiagA[r];
2307       if (idx) idx[r] = offdiagIdx[r];
2308     }
2309   }
2310   PetscCall(MatSeqAIJRestoreArrayRead(B, &bav));
2311   PetscCall(VecRestoreArrayWrite(v, &a));
2312   PetscCall(VecRestoreArrayRead(diagV, (const PetscScalar **)&diagA));
2313   PetscCall(VecRestoreArrayWrite(offdiagV, &offdiagA));
2314   PetscCall(VecDestroy(&diagV));
2315   PetscCall(VecDestroy(&offdiagV));
2316   PetscCall(PetscFree2(diagIdx, offdiagIdx));
2317   PetscFunctionReturn(PETSC_SUCCESS);
2318 }
2319 
2320 static PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2321 {
2322   Mat_MPIAIJ        *mat = (Mat_MPIAIJ *)A->data;
2323   PetscInt           m = A->rmap->n, n = A->cmap->n;
2324   PetscInt           cstart = A->cmap->rstart, cend = A->cmap->rend;
2325   PetscInt          *cmap = mat->garray;
2326   PetscInt          *diagIdx, *offdiagIdx;
2327   Vec                diagV, offdiagV;
2328   PetscScalar       *a, *diagA, *offdiagA;
2329   const PetscScalar *ba, *bav;
2330   PetscInt           r, j, col, ncols, *bi, *bj;
2331   Mat                B = mat->B;
2332   Mat_SeqAIJ        *b = (Mat_SeqAIJ *)B->data;
2333 
2334   PetscFunctionBegin;
2335   /* When a process holds entire A and other processes have no entry */
2336   if (A->cmap->N == n) {
2337     PetscCall(VecGetArrayWrite(v, &diagA));
2338     PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m, diagA, &diagV));
2339     PetscCall(MatGetRowMin(mat->A, diagV, idx));
2340     PetscCall(VecDestroy(&diagV));
2341     PetscCall(VecRestoreArrayWrite(v, &diagA));
2342     PetscFunctionReturn(PETSC_SUCCESS);
2343   } else if (n == 0) {
2344     if (m) {
2345       PetscCall(VecGetArrayWrite(v, &a));
2346       for (r = 0; r < m; r++) {
2347         a[r] = PETSC_MAX_REAL;
2348         if (idx) idx[r] = -1;
2349       }
2350       PetscCall(VecRestoreArrayWrite(v, &a));
2351     }
2352     PetscFunctionReturn(PETSC_SUCCESS);
2353   }
2354 
2355   PetscCall(PetscCalloc2(m, &diagIdx, m, &offdiagIdx));
2356   PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &diagV));
2357   PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV));
2358   PetscCall(MatGetRowMin(mat->A, diagV, diagIdx));
2359 
2360   /* Get offdiagIdx[] for implicit 0.0 */
2361   PetscCall(MatSeqAIJGetArrayRead(B, &bav));
2362   ba = bav;
2363   bi = b->i;
2364   bj = b->j;
2365   PetscCall(VecGetArrayWrite(offdiagV, &offdiagA));
2366   for (r = 0; r < m; r++) {
2367     ncols = bi[r + 1] - bi[r];
2368     if (ncols == A->cmap->N - n) { /* Brow is dense */
2369       offdiagA[r]   = *ba;
2370       offdiagIdx[r] = cmap[0];
2371     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2372       offdiagA[r] = 0.0;
2373 
2374       /* Find first hole in the cmap */
2375       for (j = 0; j < ncols; j++) {
2376         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2377         if (col > j && j < cstart) {
2378           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2379           break;
2380         } else if (col > j + n && j >= cstart) {
2381           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2382           break;
2383         }
2384       }
2385       if (j == ncols && ncols < A->cmap->N - n) {
2386         /* a hole is outside compressed Bcols */
2387         if (ncols == 0) {
2388           if (cstart) {
2389             offdiagIdx[r] = 0;
2390           } else offdiagIdx[r] = cend;
2391         } else { /* ncols > 0 */
2392           offdiagIdx[r] = cmap[ncols - 1] + 1;
2393           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2394         }
2395       }
2396     }
2397 
2398     for (j = 0; j < ncols; j++) {
2399       if (PetscRealPart(offdiagA[r]) > PetscRealPart(*ba)) {
2400         offdiagA[r]   = *ba;
2401         offdiagIdx[r] = cmap[*bj];
2402       }
2403       ba++;
2404       bj++;
2405     }
2406   }
2407 
2408   PetscCall(VecGetArrayWrite(v, &a));
2409   PetscCall(VecGetArrayRead(diagV, (const PetscScalar **)&diagA));
2410   for (r = 0; r < m; ++r) {
2411     if (PetscRealPart(diagA[r]) < PetscRealPart(offdiagA[r])) {
2412       a[r] = diagA[r];
2413       if (idx) idx[r] = cstart + diagIdx[r];
2414     } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2415       a[r] = diagA[r];
2416       if (idx) {
2417         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2418           idx[r] = cstart + diagIdx[r];
2419         } else idx[r] = offdiagIdx[r];
2420       }
2421     } else {
2422       a[r] = offdiagA[r];
2423       if (idx) idx[r] = offdiagIdx[r];
2424     }
2425   }
2426   PetscCall(MatSeqAIJRestoreArrayRead(B, &bav));
2427   PetscCall(VecRestoreArrayWrite(v, &a));
2428   PetscCall(VecRestoreArrayRead(diagV, (const PetscScalar **)&diagA));
2429   PetscCall(VecRestoreArrayWrite(offdiagV, &offdiagA));
2430   PetscCall(VecDestroy(&diagV));
2431   PetscCall(VecDestroy(&offdiagV));
2432   PetscCall(PetscFree2(diagIdx, offdiagIdx));
2433   PetscFunctionReturn(PETSC_SUCCESS);
2434 }
2435 
2436 static PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2437 {
2438   Mat_MPIAIJ        *mat = (Mat_MPIAIJ *)A->data;
2439   PetscInt           m = A->rmap->n, n = A->cmap->n;
2440   PetscInt           cstart = A->cmap->rstart, cend = A->cmap->rend;
2441   PetscInt          *cmap = mat->garray;
2442   PetscInt          *diagIdx, *offdiagIdx;
2443   Vec                diagV, offdiagV;
2444   PetscScalar       *a, *diagA, *offdiagA;
2445   const PetscScalar *ba, *bav;
2446   PetscInt           r, j, col, ncols, *bi, *bj;
2447   Mat                B = mat->B;
2448   Mat_SeqAIJ        *b = (Mat_SeqAIJ *)B->data;
2449 
2450   PetscFunctionBegin;
2451   /* When a process holds entire A and other processes have no entry */
2452   if (A->cmap->N == n) {
2453     PetscCall(VecGetArrayWrite(v, &diagA));
2454     PetscCall(VecCreateSeqWithArray(PETSC_COMM_SELF, 1, m, diagA, &diagV));
2455     PetscCall(MatGetRowMax(mat->A, diagV, idx));
2456     PetscCall(VecDestroy(&diagV));
2457     PetscCall(VecRestoreArrayWrite(v, &diagA));
2458     PetscFunctionReturn(PETSC_SUCCESS);
2459   } else if (n == 0) {
2460     if (m) {
2461       PetscCall(VecGetArrayWrite(v, &a));
2462       for (r = 0; r < m; r++) {
2463         a[r] = PETSC_MIN_REAL;
2464         if (idx) idx[r] = -1;
2465       }
2466       PetscCall(VecRestoreArrayWrite(v, &a));
2467     }
2468     PetscFunctionReturn(PETSC_SUCCESS);
2469   }
2470 
2471   PetscCall(PetscMalloc2(m, &diagIdx, m, &offdiagIdx));
2472   PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &diagV));
2473   PetscCall(VecCreateSeq(PETSC_COMM_SELF, m, &offdiagV));
2474   PetscCall(MatGetRowMax(mat->A, diagV, diagIdx));
2475 
2476   /* Get offdiagIdx[] for implicit 0.0 */
2477   PetscCall(MatSeqAIJGetArrayRead(B, &bav));
2478   ba = bav;
2479   bi = b->i;
2480   bj = b->j;
2481   PetscCall(VecGetArrayWrite(offdiagV, &offdiagA));
2482   for (r = 0; r < m; r++) {
2483     ncols = bi[r + 1] - bi[r];
2484     if (ncols == A->cmap->N - n) { /* Brow is dense */
2485       offdiagA[r]   = *ba;
2486       offdiagIdx[r] = cmap[0];
2487     } else { /* Brow is sparse so already KNOW maximum is 0.0 or higher */
2488       offdiagA[r] = 0.0;
2489 
2490       /* Find first hole in the cmap */
2491       for (j = 0; j < ncols; j++) {
2492         col = cmap[bj[j]]; /* global column number = cmap[B column number] */
2493         if (col > j && j < cstart) {
2494           offdiagIdx[r] = j; /* global column number of first implicit 0.0 */
2495           break;
2496         } else if (col > j + n && j >= cstart) {
2497           offdiagIdx[r] = j + n; /* global column number of first implicit 0.0 */
2498           break;
2499         }
2500       }
2501       if (j == ncols && ncols < A->cmap->N - n) {
2502         /* a hole is outside compressed Bcols */
2503         if (ncols == 0) {
2504           if (cstart) {
2505             offdiagIdx[r] = 0;
2506           } else offdiagIdx[r] = cend;
2507         } else { /* ncols > 0 */
2508           offdiagIdx[r] = cmap[ncols - 1] + 1;
2509           if (offdiagIdx[r] == cstart) offdiagIdx[r] += n;
2510         }
2511       }
2512     }
2513 
2514     for (j = 0; j < ncols; j++) {
2515       if (PetscRealPart(offdiagA[r]) < PetscRealPart(*ba)) {
2516         offdiagA[r]   = *ba;
2517         offdiagIdx[r] = cmap[*bj];
2518       }
2519       ba++;
2520       bj++;
2521     }
2522   }
2523 
2524   PetscCall(VecGetArrayWrite(v, &a));
2525   PetscCall(VecGetArrayRead(diagV, (const PetscScalar **)&diagA));
2526   for (r = 0; r < m; ++r) {
2527     if (PetscRealPart(diagA[r]) > PetscRealPart(offdiagA[r])) {
2528       a[r] = diagA[r];
2529       if (idx) idx[r] = cstart + diagIdx[r];
2530     } else if (PetscRealPart(diagA[r]) == PetscRealPart(offdiagA[r])) {
2531       a[r] = diagA[r];
2532       if (idx) {
2533         if (cstart + diagIdx[r] <= offdiagIdx[r]) {
2534           idx[r] = cstart + diagIdx[r];
2535         } else idx[r] = offdiagIdx[r];
2536       }
2537     } else {
2538       a[r] = offdiagA[r];
2539       if (idx) idx[r] = offdiagIdx[r];
2540     }
2541   }
2542   PetscCall(MatSeqAIJRestoreArrayRead(B, &bav));
2543   PetscCall(VecRestoreArrayWrite(v, &a));
2544   PetscCall(VecRestoreArrayRead(diagV, (const PetscScalar **)&diagA));
2545   PetscCall(VecRestoreArrayWrite(offdiagV, &offdiagA));
2546   PetscCall(VecDestroy(&diagV));
2547   PetscCall(VecDestroy(&offdiagV));
2548   PetscCall(PetscFree2(diagIdx, offdiagIdx));
2549   PetscFunctionReturn(PETSC_SUCCESS);
2550 }
2551 
2552 PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat, Mat *newmat)
2553 {
2554   Mat *dummy;
2555 
2556   PetscFunctionBegin;
2557   PetscCall(MatCreateSubMatrix_MPIAIJ_All(mat, MAT_DO_NOT_GET_VALUES, MAT_INITIAL_MATRIX, &dummy));
2558   *newmat = *dummy;
2559   PetscCall(PetscFree(dummy));
2560   PetscFunctionReturn(PETSC_SUCCESS);
2561 }
2562 
2563 static PetscErrorCode MatInvertBlockDiagonal_MPIAIJ(Mat A, const PetscScalar **values)
2564 {
2565   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2566 
2567   PetscFunctionBegin;
2568   PetscCall(MatInvertBlockDiagonal(a->A, values));
2569   A->factorerrortype = a->A->factorerrortype;
2570   PetscFunctionReturn(PETSC_SUCCESS);
2571 }
2572 
2573 static PetscErrorCode MatSetRandom_MPIAIJ(Mat x, PetscRandom rctx)
2574 {
2575   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)x->data;
2576 
2577   PetscFunctionBegin;
2578   PetscCheck(x->assembled || x->preallocated, PetscObjectComm((PetscObject)x), PETSC_ERR_ARG_WRONGSTATE, "MatSetRandom on an unassembled and unpreallocated MATMPIAIJ is not allowed");
2579   PetscCall(MatSetRandom(aij->A, rctx));
2580   if (x->assembled) {
2581     PetscCall(MatSetRandom(aij->B, rctx));
2582   } else {
2583     PetscCall(MatSetRandomSkipColumnRange_SeqAIJ_Private(aij->B, x->cmap->rstart, x->cmap->rend, rctx));
2584   }
2585   PetscCall(MatAssemblyBegin(x, MAT_FINAL_ASSEMBLY));
2586   PetscCall(MatAssemblyEnd(x, MAT_FINAL_ASSEMBLY));
2587   PetscFunctionReturn(PETSC_SUCCESS);
2588 }
2589 
2590 static PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A, PetscBool sc)
2591 {
2592   PetscFunctionBegin;
2593   if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2594   else A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ;
2595   PetscFunctionReturn(PETSC_SUCCESS);
2596 }
2597 
2598 /*@
2599   MatMPIAIJGetNumberNonzeros - gets the number of nonzeros in the matrix on this MPI rank
2600 
2601   Not Collective
2602 
2603   Input Parameter:
2604 . A - the matrix
2605 
2606   Output Parameter:
2607 . nz - the number of nonzeros
2608 
2609   Level: advanced
2610 
2611 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`
2612 @*/
2613 PetscErrorCode MatMPIAIJGetNumberNonzeros(Mat A, PetscCount *nz)
2614 {
2615   Mat_MPIAIJ *maij = (Mat_MPIAIJ *)A->data;
2616   Mat_SeqAIJ *aaij = (Mat_SeqAIJ *)maij->A->data, *baij = (Mat_SeqAIJ *)maij->B->data;
2617   PetscBool   isaij;
2618 
2619   PetscFunctionBegin;
2620   PetscCall(PetscObjectBaseTypeCompare((PetscObject)A, MATMPIAIJ, &isaij));
2621   PetscCheck(isaij, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Not for type %s", ((PetscObject)A)->type_name);
2622   *nz = aaij->i[A->rmap->n] + baij->i[A->rmap->n];
2623   PetscFunctionReturn(PETSC_SUCCESS);
2624 }
2625 
2626 /*@
2627   MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap
2628 
2629   Collective
2630 
2631   Input Parameters:
2632 + A  - the matrix
2633 - sc - `PETSC_TRUE` indicates use the scalable algorithm (default is not to use the scalable algorithm)
2634 
2635   Level: advanced
2636 
2637 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`
2638 @*/
2639 PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A, PetscBool sc)
2640 {
2641   PetscFunctionBegin;
2642   PetscTryMethod(A, "MatMPIAIJSetUseScalableIncreaseOverlap_C", (Mat, PetscBool), (A, sc));
2643   PetscFunctionReturn(PETSC_SUCCESS);
2644 }
2645 
2646 PetscErrorCode MatSetFromOptions_MPIAIJ(Mat A, PetscOptionItems *PetscOptionsObject)
2647 {
2648   PetscBool sc = PETSC_FALSE, flg;
2649 
2650   PetscFunctionBegin;
2651   PetscOptionsHeadBegin(PetscOptionsObject, "MPIAIJ options");
2652   if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2653   PetscCall(PetscOptionsBool("-mat_increase_overlap_scalable", "Use a scalable algorithm to compute the overlap", "MatIncreaseOverlap", sc, &sc, &flg));
2654   if (flg) PetscCall(MatMPIAIJSetUseScalableIncreaseOverlap(A, sc));
2655   PetscOptionsHeadEnd();
2656   PetscFunctionReturn(PETSC_SUCCESS);
2657 }
2658 
2659 static PetscErrorCode MatShift_MPIAIJ(Mat Y, PetscScalar a)
2660 {
2661   Mat_MPIAIJ *maij = (Mat_MPIAIJ *)Y->data;
2662   Mat_SeqAIJ *aij  = (Mat_SeqAIJ *)maij->A->data;
2663 
2664   PetscFunctionBegin;
2665   if (!Y->preallocated) {
2666     PetscCall(MatMPIAIJSetPreallocation(Y, 1, NULL, 0, NULL));
2667   } else if (!aij->nz) { /* It does not matter if diagonals of Y only partially lie in maij->A. We just need an estimated preallocation. */
2668     PetscInt nonew = aij->nonew;
2669     PetscCall(MatSeqAIJSetPreallocation(maij->A, 1, NULL));
2670     aij->nonew = nonew;
2671   }
2672   PetscCall(MatShift_Basic(Y, a));
2673   PetscFunctionReturn(PETSC_SUCCESS);
2674 }
2675 
2676 static PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A, PetscBool *missing, PetscInt *d)
2677 {
2678   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2679 
2680   PetscFunctionBegin;
2681   PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only works for square matrices");
2682   PetscCall(MatMissingDiagonal(a->A, missing, d));
2683   if (d) {
2684     PetscInt rstart;
2685     PetscCall(MatGetOwnershipRange(A, &rstart, NULL));
2686     *d += rstart;
2687   }
2688   PetscFunctionReturn(PETSC_SUCCESS);
2689 }
2690 
2691 static PetscErrorCode MatInvertVariableBlockDiagonal_MPIAIJ(Mat A, PetscInt nblocks, const PetscInt *bsizes, PetscScalar *diag)
2692 {
2693   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2694 
2695   PetscFunctionBegin;
2696   PetscCall(MatInvertVariableBlockDiagonal(a->A, nblocks, bsizes, diag));
2697   PetscFunctionReturn(PETSC_SUCCESS);
2698 }
2699 
2700 static PetscErrorCode MatEliminateZeros_MPIAIJ(Mat A, PetscBool keep)
2701 {
2702   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
2703 
2704   PetscFunctionBegin;
2705   PetscCall(MatEliminateZeros_SeqAIJ(a->A, keep));        // possibly keep zero diagonal coefficients
2706   PetscCall(MatEliminateZeros_SeqAIJ(a->B, PETSC_FALSE)); // never keep zero diagonal coefficients
2707   PetscFunctionReturn(PETSC_SUCCESS);
2708 }
2709 
2710 static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2711                                        MatGetRow_MPIAIJ,
2712                                        MatRestoreRow_MPIAIJ,
2713                                        MatMult_MPIAIJ,
2714                                        /* 4*/ MatMultAdd_MPIAIJ,
2715                                        MatMultTranspose_MPIAIJ,
2716                                        MatMultTransposeAdd_MPIAIJ,
2717                                        NULL,
2718                                        NULL,
2719                                        NULL,
2720                                        /*10*/ NULL,
2721                                        NULL,
2722                                        NULL,
2723                                        MatSOR_MPIAIJ,
2724                                        MatTranspose_MPIAIJ,
2725                                        /*15*/ MatGetInfo_MPIAIJ,
2726                                        MatEqual_MPIAIJ,
2727                                        MatGetDiagonal_MPIAIJ,
2728                                        MatDiagonalScale_MPIAIJ,
2729                                        MatNorm_MPIAIJ,
2730                                        /*20*/ MatAssemblyBegin_MPIAIJ,
2731                                        MatAssemblyEnd_MPIAIJ,
2732                                        MatSetOption_MPIAIJ,
2733                                        MatZeroEntries_MPIAIJ,
2734                                        /*24*/ MatZeroRows_MPIAIJ,
2735                                        NULL,
2736                                        NULL,
2737                                        NULL,
2738                                        NULL,
2739                                        /*29*/ MatSetUp_MPI_Hash,
2740                                        NULL,
2741                                        NULL,
2742                                        MatGetDiagonalBlock_MPIAIJ,
2743                                        NULL,
2744                                        /*34*/ MatDuplicate_MPIAIJ,
2745                                        NULL,
2746                                        NULL,
2747                                        NULL,
2748                                        NULL,
2749                                        /*39*/ MatAXPY_MPIAIJ,
2750                                        MatCreateSubMatrices_MPIAIJ,
2751                                        MatIncreaseOverlap_MPIAIJ,
2752                                        MatGetValues_MPIAIJ,
2753                                        MatCopy_MPIAIJ,
2754                                        /*44*/ MatGetRowMax_MPIAIJ,
2755                                        MatScale_MPIAIJ,
2756                                        MatShift_MPIAIJ,
2757                                        MatDiagonalSet_MPIAIJ,
2758                                        MatZeroRowsColumns_MPIAIJ,
2759                                        /*49*/ MatSetRandom_MPIAIJ,
2760                                        MatGetRowIJ_MPIAIJ,
2761                                        MatRestoreRowIJ_MPIAIJ,
2762                                        NULL,
2763                                        NULL,
2764                                        /*54*/ MatFDColoringCreate_MPIXAIJ,
2765                                        NULL,
2766                                        MatSetUnfactored_MPIAIJ,
2767                                        MatPermute_MPIAIJ,
2768                                        NULL,
2769                                        /*59*/ MatCreateSubMatrix_MPIAIJ,
2770                                        MatDestroy_MPIAIJ,
2771                                        MatView_MPIAIJ,
2772                                        NULL,
2773                                        NULL,
2774                                        /*64*/ NULL,
2775                                        MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2776                                        NULL,
2777                                        NULL,
2778                                        NULL,
2779                                        /*69*/ MatGetRowMaxAbs_MPIAIJ,
2780                                        MatGetRowMinAbs_MPIAIJ,
2781                                        NULL,
2782                                        NULL,
2783                                        NULL,
2784                                        NULL,
2785                                        /*75*/ MatFDColoringApply_AIJ,
2786                                        MatSetFromOptions_MPIAIJ,
2787                                        NULL,
2788                                        NULL,
2789                                        MatFindZeroDiagonals_MPIAIJ,
2790                                        /*80*/ NULL,
2791                                        NULL,
2792                                        NULL,
2793                                        /*83*/ MatLoad_MPIAIJ,
2794                                        MatIsSymmetric_MPIAIJ,
2795                                        NULL,
2796                                        NULL,
2797                                        NULL,
2798                                        NULL,
2799                                        /*89*/ NULL,
2800                                        NULL,
2801                                        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2802                                        NULL,
2803                                        NULL,
2804                                        /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2805                                        NULL,
2806                                        NULL,
2807                                        NULL,
2808                                        MatBindToCPU_MPIAIJ,
2809                                        /*99*/ MatProductSetFromOptions_MPIAIJ,
2810                                        NULL,
2811                                        NULL,
2812                                        MatConjugate_MPIAIJ,
2813                                        NULL,
2814                                        /*104*/ MatSetValuesRow_MPIAIJ,
2815                                        MatRealPart_MPIAIJ,
2816                                        MatImaginaryPart_MPIAIJ,
2817                                        NULL,
2818                                        NULL,
2819                                        /*109*/ NULL,
2820                                        NULL,
2821                                        MatGetRowMin_MPIAIJ,
2822                                        NULL,
2823                                        MatMissingDiagonal_MPIAIJ,
2824                                        /*114*/ MatGetSeqNonzeroStructure_MPIAIJ,
2825                                        NULL,
2826                                        MatGetGhosts_MPIAIJ,
2827                                        NULL,
2828                                        NULL,
2829                                        /*119*/ MatMultDiagonalBlock_MPIAIJ,
2830                                        NULL,
2831                                        NULL,
2832                                        NULL,
2833                                        MatGetMultiProcBlock_MPIAIJ,
2834                                        /*124*/ MatFindNonzeroRows_MPIAIJ,
2835                                        MatGetColumnReductions_MPIAIJ,
2836                                        MatInvertBlockDiagonal_MPIAIJ,
2837                                        MatInvertVariableBlockDiagonal_MPIAIJ,
2838                                        MatCreateSubMatricesMPI_MPIAIJ,
2839                                        /*129*/ NULL,
2840                                        NULL,
2841                                        NULL,
2842                                        MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2843                                        NULL,
2844                                        /*134*/ NULL,
2845                                        NULL,
2846                                        NULL,
2847                                        NULL,
2848                                        NULL,
2849                                        /*139*/ MatSetBlockSizes_MPIAIJ,
2850                                        NULL,
2851                                        NULL,
2852                                        MatFDColoringSetUp_MPIXAIJ,
2853                                        MatFindOffBlockDiagonalEntries_MPIAIJ,
2854                                        MatCreateMPIMatConcatenateSeqMat_MPIAIJ,
2855                                        /*145*/ NULL,
2856                                        NULL,
2857                                        NULL,
2858                                        MatCreateGraph_Simple_AIJ,
2859                                        NULL,
2860                                        /*150*/ NULL,
2861                                        MatEliminateZeros_MPIAIJ};
2862 
2863 static PetscErrorCode MatStoreValues_MPIAIJ(Mat mat)
2864 {
2865   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
2866 
2867   PetscFunctionBegin;
2868   PetscCall(MatStoreValues(aij->A));
2869   PetscCall(MatStoreValues(aij->B));
2870   PetscFunctionReturn(PETSC_SUCCESS);
2871 }
2872 
2873 static PetscErrorCode MatRetrieveValues_MPIAIJ(Mat mat)
2874 {
2875   Mat_MPIAIJ *aij = (Mat_MPIAIJ *)mat->data;
2876 
2877   PetscFunctionBegin;
2878   PetscCall(MatRetrieveValues(aij->A));
2879   PetscCall(MatRetrieveValues(aij->B));
2880   PetscFunctionReturn(PETSC_SUCCESS);
2881 }
2882 
2883 PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
2884 {
2885   Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
2886   PetscMPIInt size;
2887 
2888   PetscFunctionBegin;
2889   if (B->hash_active) {
2890     B->ops[0]      = b->cops;
2891     B->hash_active = PETSC_FALSE;
2892   }
2893   PetscCall(PetscLayoutSetUp(B->rmap));
2894   PetscCall(PetscLayoutSetUp(B->cmap));
2895 
2896 #if defined(PETSC_USE_CTABLE)
2897   PetscCall(PetscHMapIDestroy(&b->colmap));
2898 #else
2899   PetscCall(PetscFree(b->colmap));
2900 #endif
2901   PetscCall(PetscFree(b->garray));
2902   PetscCall(VecDestroy(&b->lvec));
2903   PetscCall(VecScatterDestroy(&b->Mvctx));
2904 
2905   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size));
2906   PetscCall(MatDestroy(&b->B));
2907   PetscCall(MatCreate(PETSC_COMM_SELF, &b->B));
2908   PetscCall(MatSetSizes(b->B, B->rmap->n, size > 1 ? B->cmap->N : 0, B->rmap->n, size > 1 ? B->cmap->N : 0));
2909   PetscCall(MatSetBlockSizesFromMats(b->B, B, B));
2910   PetscCall(MatSetType(b->B, MATSEQAIJ));
2911 
2912   PetscCall(MatDestroy(&b->A));
2913   PetscCall(MatCreate(PETSC_COMM_SELF, &b->A));
2914   PetscCall(MatSetSizes(b->A, B->rmap->n, B->cmap->n, B->rmap->n, B->cmap->n));
2915   PetscCall(MatSetBlockSizesFromMats(b->A, B, B));
2916   PetscCall(MatSetType(b->A, MATSEQAIJ));
2917 
2918   PetscCall(MatSeqAIJSetPreallocation(b->A, d_nz, d_nnz));
2919   PetscCall(MatSeqAIJSetPreallocation(b->B, o_nz, o_nnz));
2920   B->preallocated  = PETSC_TRUE;
2921   B->was_assembled = PETSC_FALSE;
2922   B->assembled     = PETSC_FALSE;
2923   PetscFunctionReturn(PETSC_SUCCESS);
2924 }
2925 
2926 static PetscErrorCode MatResetPreallocation_MPIAIJ(Mat B)
2927 {
2928   Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
2929 
2930   PetscFunctionBegin;
2931   PetscValidHeaderSpecific(B, MAT_CLASSID, 1);
2932   PetscCall(PetscLayoutSetUp(B->rmap));
2933   PetscCall(PetscLayoutSetUp(B->cmap));
2934 
2935 #if defined(PETSC_USE_CTABLE)
2936   PetscCall(PetscHMapIDestroy(&b->colmap));
2937 #else
2938   PetscCall(PetscFree(b->colmap));
2939 #endif
2940   PetscCall(PetscFree(b->garray));
2941   PetscCall(VecDestroy(&b->lvec));
2942   PetscCall(VecScatterDestroy(&b->Mvctx));
2943 
2944   PetscCall(MatResetPreallocation(b->A));
2945   PetscCall(MatResetPreallocation(b->B));
2946   B->preallocated  = PETSC_TRUE;
2947   B->was_assembled = PETSC_FALSE;
2948   B->assembled     = PETSC_FALSE;
2949   PetscFunctionReturn(PETSC_SUCCESS);
2950 }
2951 
2952 PetscErrorCode MatDuplicate_MPIAIJ(Mat matin, MatDuplicateOption cpvalues, Mat *newmat)
2953 {
2954   Mat         mat;
2955   Mat_MPIAIJ *a, *oldmat = (Mat_MPIAIJ *)matin->data;
2956 
2957   PetscFunctionBegin;
2958   *newmat = NULL;
2959   PetscCall(MatCreate(PetscObjectComm((PetscObject)matin), &mat));
2960   PetscCall(MatSetSizes(mat, matin->rmap->n, matin->cmap->n, matin->rmap->N, matin->cmap->N));
2961   PetscCall(MatSetBlockSizesFromMats(mat, matin, matin));
2962   PetscCall(MatSetType(mat, ((PetscObject)matin)->type_name));
2963   a = (Mat_MPIAIJ *)mat->data;
2964 
2965   mat->factortype = matin->factortype;
2966   mat->assembled  = matin->assembled;
2967   mat->insertmode = NOT_SET_VALUES;
2968 
2969   a->size         = oldmat->size;
2970   a->rank         = oldmat->rank;
2971   a->donotstash   = oldmat->donotstash;
2972   a->roworiented  = oldmat->roworiented;
2973   a->rowindices   = NULL;
2974   a->rowvalues    = NULL;
2975   a->getrowactive = PETSC_FALSE;
2976 
2977   PetscCall(PetscLayoutReference(matin->rmap, &mat->rmap));
2978   PetscCall(PetscLayoutReference(matin->cmap, &mat->cmap));
2979   if (matin->hash_active) {
2980     PetscCall(MatSetUp(mat));
2981   } else {
2982     mat->preallocated = matin->preallocated;
2983     if (oldmat->colmap) {
2984 #if defined(PETSC_USE_CTABLE)
2985       PetscCall(PetscHMapIDuplicate(oldmat->colmap, &a->colmap));
2986 #else
2987       PetscCall(PetscMalloc1(mat->cmap->N, &a->colmap));
2988       PetscCall(PetscArraycpy(a->colmap, oldmat->colmap, mat->cmap->N));
2989 #endif
2990     } else a->colmap = NULL;
2991     if (oldmat->garray) {
2992       PetscInt len;
2993       len = oldmat->B->cmap->n;
2994       PetscCall(PetscMalloc1(len + 1, &a->garray));
2995       if (len) PetscCall(PetscArraycpy(a->garray, oldmat->garray, len));
2996     } else a->garray = NULL;
2997 
2998     /* It may happen MatDuplicate is called with a non-assembled matrix
2999       In fact, MatDuplicate only requires the matrix to be preallocated
3000       This may happen inside a DMCreateMatrix_Shell */
3001     if (oldmat->lvec) PetscCall(VecDuplicate(oldmat->lvec, &a->lvec));
3002     if (oldmat->Mvctx) PetscCall(VecScatterCopy(oldmat->Mvctx, &a->Mvctx));
3003     PetscCall(MatDuplicate(oldmat->A, cpvalues, &a->A));
3004     PetscCall(MatDuplicate(oldmat->B, cpvalues, &a->B));
3005   }
3006   PetscCall(PetscFunctionListDuplicate(((PetscObject)matin)->qlist, &((PetscObject)mat)->qlist));
3007   *newmat = mat;
3008   PetscFunctionReturn(PETSC_SUCCESS);
3009 }
3010 
3011 PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
3012 {
3013   PetscBool isbinary, ishdf5;
3014 
3015   PetscFunctionBegin;
3016   PetscValidHeaderSpecific(newMat, MAT_CLASSID, 1);
3017   PetscValidHeaderSpecific(viewer, PETSC_VIEWER_CLASSID, 2);
3018   /* force binary viewer to load .info file if it has not yet done so */
3019   PetscCall(PetscViewerSetUp(viewer));
3020   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
3021   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERHDF5, &ishdf5));
3022   if (isbinary) {
3023     PetscCall(MatLoad_MPIAIJ_Binary(newMat, viewer));
3024   } else if (ishdf5) {
3025 #if defined(PETSC_HAVE_HDF5)
3026     PetscCall(MatLoad_AIJ_HDF5(newMat, viewer));
3027 #else
3028     SETERRQ(PetscObjectComm((PetscObject)newMat), PETSC_ERR_SUP, "HDF5 not supported in this build.\nPlease reconfigure using --download-hdf5");
3029 #endif
3030   } else {
3031     SETERRQ(PetscObjectComm((PetscObject)newMat), PETSC_ERR_SUP, "Viewer type %s not yet supported for reading %s matrices", ((PetscObject)viewer)->type_name, ((PetscObject)newMat)->type_name);
3032   }
3033   PetscFunctionReturn(PETSC_SUCCESS);
3034 }
3035 
3036 PetscErrorCode MatLoad_MPIAIJ_Binary(Mat mat, PetscViewer viewer)
3037 {
3038   PetscInt     header[4], M, N, m, nz, rows, cols, sum, i;
3039   PetscInt    *rowidxs, *colidxs;
3040   PetscScalar *matvals;
3041 
3042   PetscFunctionBegin;
3043   PetscCall(PetscViewerSetUp(viewer));
3044 
3045   /* read in matrix header */
3046   PetscCall(PetscViewerBinaryRead(viewer, header, 4, NULL, PETSC_INT));
3047   PetscCheck(header[0] == MAT_FILE_CLASSID, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Not a matrix object in file");
3048   M  = header[1];
3049   N  = header[2];
3050   nz = header[3];
3051   PetscCheck(M >= 0, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Matrix row size (%" PetscInt_FMT ") in file is negative", M);
3052   PetscCheck(N >= 0, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Matrix column size (%" PetscInt_FMT ") in file is negative", N);
3053   PetscCheck(nz >= 0, PETSC_COMM_SELF, PETSC_ERR_FILE_UNEXPECTED, "Matrix stored in special format on disk, cannot load as MPIAIJ");
3054 
3055   /* set block sizes from the viewer's .info file */
3056   PetscCall(MatLoad_Binary_BlockSizes(mat, viewer));
3057   /* set global sizes if not set already */
3058   if (mat->rmap->N < 0) mat->rmap->N = M;
3059   if (mat->cmap->N < 0) mat->cmap->N = N;
3060   PetscCall(PetscLayoutSetUp(mat->rmap));
3061   PetscCall(PetscLayoutSetUp(mat->cmap));
3062 
3063   /* check if the matrix sizes are correct */
3064   PetscCall(MatGetSize(mat, &rows, &cols));
3065   PetscCheck(M == rows && N == cols, PETSC_COMM_SELF, PETSC_ERR_FILE_UNEXPECTED, "Matrix in file of different sizes (%" PetscInt_FMT ", %" PetscInt_FMT ") than the input matrix (%" PetscInt_FMT ", %" PetscInt_FMT ")", M, N, rows, cols);
3066 
3067   /* read in row lengths and build row indices */
3068   PetscCall(MatGetLocalSize(mat, &m, NULL));
3069   PetscCall(PetscMalloc1(m + 1, &rowidxs));
3070   PetscCall(PetscViewerBinaryReadAll(viewer, rowidxs + 1, m, PETSC_DECIDE, M, PETSC_INT));
3071   rowidxs[0] = 0;
3072   for (i = 0; i < m; i++) rowidxs[i + 1] += rowidxs[i];
3073   if (nz != PETSC_MAX_INT) {
3074     PetscCall(MPIU_Allreduce(&rowidxs[m], &sum, 1, MPIU_INT, MPI_SUM, PetscObjectComm((PetscObject)viewer)));
3075     PetscCheck(sum == nz, PetscObjectComm((PetscObject)viewer), PETSC_ERR_FILE_UNEXPECTED, "Inconsistent matrix data in file: nonzeros = %" PetscInt_FMT ", sum-row-lengths = %" PetscInt_FMT, nz, sum);
3076   }
3077 
3078   /* read in column indices and matrix values */
3079   PetscCall(PetscMalloc2(rowidxs[m], &colidxs, rowidxs[m], &matvals));
3080   PetscCall(PetscViewerBinaryReadAll(viewer, colidxs, rowidxs[m], PETSC_DETERMINE, PETSC_DETERMINE, PETSC_INT));
3081   PetscCall(PetscViewerBinaryReadAll(viewer, matvals, rowidxs[m], PETSC_DETERMINE, PETSC_DETERMINE, PETSC_SCALAR));
3082   /* store matrix indices and values */
3083   PetscCall(MatMPIAIJSetPreallocationCSR(mat, rowidxs, colidxs, matvals));
3084   PetscCall(PetscFree(rowidxs));
3085   PetscCall(PetscFree2(colidxs, matvals));
3086   PetscFunctionReturn(PETSC_SUCCESS);
3087 }
3088 
3089 /* Not scalable because of ISAllGather() unless getting all columns. */
3090 static PetscErrorCode ISGetSeqIS_Private(Mat mat, IS iscol, IS *isseq)
3091 {
3092   IS          iscol_local;
3093   PetscBool   isstride;
3094   PetscMPIInt lisstride = 0, gisstride;
3095 
3096   PetscFunctionBegin;
3097   /* check if we are grabbing all columns*/
3098   PetscCall(PetscObjectTypeCompare((PetscObject)iscol, ISSTRIDE, &isstride));
3099 
3100   if (isstride) {
3101     PetscInt start, len, mstart, mlen;
3102     PetscCall(ISStrideGetInfo(iscol, &start, NULL));
3103     PetscCall(ISGetLocalSize(iscol, &len));
3104     PetscCall(MatGetOwnershipRangeColumn(mat, &mstart, &mlen));
3105     if (mstart == start && mlen - mstart == len) lisstride = 1;
3106   }
3107 
3108   PetscCall(MPIU_Allreduce(&lisstride, &gisstride, 1, MPI_INT, MPI_MIN, PetscObjectComm((PetscObject)mat)));
3109   if (gisstride) {
3110     PetscInt N;
3111     PetscCall(MatGetSize(mat, NULL, &N));
3112     PetscCall(ISCreateStride(PETSC_COMM_SELF, N, 0, 1, &iscol_local));
3113     PetscCall(ISSetIdentity(iscol_local));
3114     PetscCall(PetscInfo(mat, "Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n"));
3115   } else {
3116     PetscInt cbs;
3117     PetscCall(ISGetBlockSize(iscol, &cbs));
3118     PetscCall(ISAllGather(iscol, &iscol_local));
3119     PetscCall(ISSetBlockSize(iscol_local, cbs));
3120   }
3121 
3122   *isseq = iscol_local;
3123   PetscFunctionReturn(PETSC_SUCCESS);
3124 }
3125 
3126 /*
3127  Used by MatCreateSubMatrix_MPIAIJ_SameRowColDist() to avoid ISAllGather() and global size of iscol_local
3128  (see MatCreateSubMatrix_MPIAIJ_nonscalable)
3129 
3130  Input Parameters:
3131 +   mat - matrix
3132 .   isrow - parallel row index set; its local indices are a subset of local columns of `mat`,
3133            i.e., mat->rstart <= isrow[i] < mat->rend
3134 -   iscol - parallel column index set; its local indices are a subset of local columns of `mat`,
3135            i.e., mat->cstart <= iscol[i] < mat->cend
3136 
3137  Output Parameters:
3138 +   isrow_d - sequential row index set for retrieving mat->A
3139 .   iscol_d - sequential  column index set for retrieving mat->A
3140 .   iscol_o - sequential column index set for retrieving mat->B
3141 -   garray - column map; garray[i] indicates global location of iscol_o[i] in `iscol`
3142  */
3143 static PetscErrorCode ISGetSeqIS_SameColDist_Private(Mat mat, IS isrow, IS iscol, IS *isrow_d, IS *iscol_d, IS *iscol_o, const PetscInt *garray[])
3144 {
3145   Vec             x, cmap;
3146   const PetscInt *is_idx;
3147   PetscScalar    *xarray, *cmaparray;
3148   PetscInt        ncols, isstart, *idx, m, rstart, *cmap1, count;
3149   Mat_MPIAIJ     *a    = (Mat_MPIAIJ *)mat->data;
3150   Mat             B    = a->B;
3151   Vec             lvec = a->lvec, lcmap;
3152   PetscInt        i, cstart, cend, Bn = B->cmap->N;
3153   MPI_Comm        comm;
3154   VecScatter      Mvctx = a->Mvctx;
3155 
3156   PetscFunctionBegin;
3157   PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3158   PetscCall(ISGetLocalSize(iscol, &ncols));
3159 
3160   /* (1) iscol is a sub-column vector of mat, pad it with '-1.' to form a full vector x */
3161   PetscCall(MatCreateVecs(mat, &x, NULL));
3162   PetscCall(VecSet(x, -1.0));
3163   PetscCall(VecDuplicate(x, &cmap));
3164   PetscCall(VecSet(cmap, -1.0));
3165 
3166   /* Get start indices */
3167   PetscCallMPI(MPI_Scan(&ncols, &isstart, 1, MPIU_INT, MPI_SUM, comm));
3168   isstart -= ncols;
3169   PetscCall(MatGetOwnershipRangeColumn(mat, &cstart, &cend));
3170 
3171   PetscCall(ISGetIndices(iscol, &is_idx));
3172   PetscCall(VecGetArray(x, &xarray));
3173   PetscCall(VecGetArray(cmap, &cmaparray));
3174   PetscCall(PetscMalloc1(ncols, &idx));
3175   for (i = 0; i < ncols; i++) {
3176     xarray[is_idx[i] - cstart]    = (PetscScalar)is_idx[i];
3177     cmaparray[is_idx[i] - cstart] = i + isstart;        /* global index of iscol[i] */
3178     idx[i]                        = is_idx[i] - cstart; /* local index of iscol[i]  */
3179   }
3180   PetscCall(VecRestoreArray(x, &xarray));
3181   PetscCall(VecRestoreArray(cmap, &cmaparray));
3182   PetscCall(ISRestoreIndices(iscol, &is_idx));
3183 
3184   /* Get iscol_d */
3185   PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, iscol_d));
3186   PetscCall(ISGetBlockSize(iscol, &i));
3187   PetscCall(ISSetBlockSize(*iscol_d, i));
3188 
3189   /* Get isrow_d */
3190   PetscCall(ISGetLocalSize(isrow, &m));
3191   rstart = mat->rmap->rstart;
3192   PetscCall(PetscMalloc1(m, &idx));
3193   PetscCall(ISGetIndices(isrow, &is_idx));
3194   for (i = 0; i < m; i++) idx[i] = is_idx[i] - rstart;
3195   PetscCall(ISRestoreIndices(isrow, &is_idx));
3196 
3197   PetscCall(ISCreateGeneral(PETSC_COMM_SELF, m, idx, PETSC_OWN_POINTER, isrow_d));
3198   PetscCall(ISGetBlockSize(isrow, &i));
3199   PetscCall(ISSetBlockSize(*isrow_d, i));
3200 
3201   /* (2) Scatter x and cmap using aij->Mvctx to get their off-process portions (see MatMult_MPIAIJ) */
3202   PetscCall(VecScatterBegin(Mvctx, x, lvec, INSERT_VALUES, SCATTER_FORWARD));
3203   PetscCall(VecScatterEnd(Mvctx, x, lvec, INSERT_VALUES, SCATTER_FORWARD));
3204 
3205   PetscCall(VecDuplicate(lvec, &lcmap));
3206 
3207   PetscCall(VecScatterBegin(Mvctx, cmap, lcmap, INSERT_VALUES, SCATTER_FORWARD));
3208   PetscCall(VecScatterEnd(Mvctx, cmap, lcmap, INSERT_VALUES, SCATTER_FORWARD));
3209 
3210   /* (3) create sequential iscol_o (a subset of iscol) and isgarray */
3211   /* off-process column indices */
3212   count = 0;
3213   PetscCall(PetscMalloc1(Bn, &idx));
3214   PetscCall(PetscMalloc1(Bn, &cmap1));
3215 
3216   PetscCall(VecGetArray(lvec, &xarray));
3217   PetscCall(VecGetArray(lcmap, &cmaparray));
3218   for (i = 0; i < Bn; i++) {
3219     if (PetscRealPart(xarray[i]) > -1.0) {
3220       idx[count]   = i;                                     /* local column index in off-diagonal part B */
3221       cmap1[count] = (PetscInt)PetscRealPart(cmaparray[i]); /* column index in submat */
3222       count++;
3223     }
3224   }
3225   PetscCall(VecRestoreArray(lvec, &xarray));
3226   PetscCall(VecRestoreArray(lcmap, &cmaparray));
3227 
3228   PetscCall(ISCreateGeneral(PETSC_COMM_SELF, count, idx, PETSC_COPY_VALUES, iscol_o));
3229   /* cannot ensure iscol_o has same blocksize as iscol! */
3230 
3231   PetscCall(PetscFree(idx));
3232   *garray = cmap1;
3233 
3234   PetscCall(VecDestroy(&x));
3235   PetscCall(VecDestroy(&cmap));
3236   PetscCall(VecDestroy(&lcmap));
3237   PetscFunctionReturn(PETSC_SUCCESS);
3238 }
3239 
3240 /* isrow and iscol have same processor distribution as mat, output *submat is a submatrix of local mat */
3241 PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowColDist(Mat mat, IS isrow, IS iscol, MatReuse call, Mat *submat)
3242 {
3243   Mat_MPIAIJ *a = (Mat_MPIAIJ *)mat->data, *asub;
3244   Mat         M = NULL;
3245   MPI_Comm    comm;
3246   IS          iscol_d, isrow_d, iscol_o;
3247   Mat         Asub = NULL, Bsub = NULL;
3248   PetscInt    n;
3249 
3250   PetscFunctionBegin;
3251   PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3252 
3253   if (call == MAT_REUSE_MATRIX) {
3254     /* Retrieve isrow_d, iscol_d and iscol_o from submat */
3255     PetscCall(PetscObjectQuery((PetscObject)*submat, "isrow_d", (PetscObject *)&isrow_d));
3256     PetscCheck(isrow_d, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "isrow_d passed in was not used before, cannot reuse");
3257 
3258     PetscCall(PetscObjectQuery((PetscObject)*submat, "iscol_d", (PetscObject *)&iscol_d));
3259     PetscCheck(iscol_d, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "iscol_d passed in was not used before, cannot reuse");
3260 
3261     PetscCall(PetscObjectQuery((PetscObject)*submat, "iscol_o", (PetscObject *)&iscol_o));
3262     PetscCheck(iscol_o, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "iscol_o passed in was not used before, cannot reuse");
3263 
3264     /* Update diagonal and off-diagonal portions of submat */
3265     asub = (Mat_MPIAIJ *)(*submat)->data;
3266     PetscCall(MatCreateSubMatrix_SeqAIJ(a->A, isrow_d, iscol_d, PETSC_DECIDE, MAT_REUSE_MATRIX, &asub->A));
3267     PetscCall(ISGetLocalSize(iscol_o, &n));
3268     if (n) PetscCall(MatCreateSubMatrix_SeqAIJ(a->B, isrow_d, iscol_o, PETSC_DECIDE, MAT_REUSE_MATRIX, &asub->B));
3269     PetscCall(MatAssemblyBegin(*submat, MAT_FINAL_ASSEMBLY));
3270     PetscCall(MatAssemblyEnd(*submat, MAT_FINAL_ASSEMBLY));
3271 
3272   } else { /* call == MAT_INITIAL_MATRIX) */
3273     const PetscInt *garray;
3274     PetscInt        BsubN;
3275 
3276     /* Create isrow_d, iscol_d, iscol_o and isgarray (replace isgarray with array?) */
3277     PetscCall(ISGetSeqIS_SameColDist_Private(mat, isrow, iscol, &isrow_d, &iscol_d, &iscol_o, &garray));
3278 
3279     /* Create local submatrices Asub and Bsub */
3280     PetscCall(MatCreateSubMatrix_SeqAIJ(a->A, isrow_d, iscol_d, PETSC_DECIDE, MAT_INITIAL_MATRIX, &Asub));
3281     PetscCall(MatCreateSubMatrix_SeqAIJ(a->B, isrow_d, iscol_o, PETSC_DECIDE, MAT_INITIAL_MATRIX, &Bsub));
3282 
3283     /* Create submatrix M */
3284     PetscCall(MatCreateMPIAIJWithSeqAIJ(comm, Asub, Bsub, garray, &M));
3285 
3286     /* If Bsub has empty columns, compress iscol_o such that it will retrieve condensed Bsub from a->B during reuse */
3287     asub = (Mat_MPIAIJ *)M->data;
3288 
3289     PetscCall(ISGetLocalSize(iscol_o, &BsubN));
3290     n = asub->B->cmap->N;
3291     if (BsubN > n) {
3292       /* This case can be tested using ~petsc/src/tao/bound/tutorials/runplate2_3 */
3293       const PetscInt *idx;
3294       PetscInt        i, j, *idx_new, *subgarray = asub->garray;
3295       PetscCall(PetscInfo(M, "submatrix Bn %" PetscInt_FMT " != BsubN %" PetscInt_FMT ", update iscol_o\n", n, BsubN));
3296 
3297       PetscCall(PetscMalloc1(n, &idx_new));
3298       j = 0;
3299       PetscCall(ISGetIndices(iscol_o, &idx));
3300       for (i = 0; i < n; i++) {
3301         if (j >= BsubN) break;
3302         while (subgarray[i] > garray[j]) j++;
3303 
3304         if (subgarray[i] == garray[j]) {
3305           idx_new[i] = idx[j++];
3306         } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "subgarray[%" PetscInt_FMT "]=%" PetscInt_FMT " cannot < garray[%" PetscInt_FMT "]=%" PetscInt_FMT, i, subgarray[i], j, garray[j]);
3307       }
3308       PetscCall(ISRestoreIndices(iscol_o, &idx));
3309 
3310       PetscCall(ISDestroy(&iscol_o));
3311       PetscCall(ISCreateGeneral(PETSC_COMM_SELF, n, idx_new, PETSC_OWN_POINTER, &iscol_o));
3312 
3313     } else if (BsubN < n) {
3314       SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Columns of Bsub (%" PetscInt_FMT ") cannot be smaller than B's (%" PetscInt_FMT ")", BsubN, asub->B->cmap->N);
3315     }
3316 
3317     PetscCall(PetscFree(garray));
3318     *submat = M;
3319 
3320     /* Save isrow_d, iscol_d and iscol_o used in processor for next request */
3321     PetscCall(PetscObjectCompose((PetscObject)M, "isrow_d", (PetscObject)isrow_d));
3322     PetscCall(ISDestroy(&isrow_d));
3323 
3324     PetscCall(PetscObjectCompose((PetscObject)M, "iscol_d", (PetscObject)iscol_d));
3325     PetscCall(ISDestroy(&iscol_d));
3326 
3327     PetscCall(PetscObjectCompose((PetscObject)M, "iscol_o", (PetscObject)iscol_o));
3328     PetscCall(ISDestroy(&iscol_o));
3329   }
3330   PetscFunctionReturn(PETSC_SUCCESS);
3331 }
3332 
3333 PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat, IS isrow, IS iscol, MatReuse call, Mat *newmat)
3334 {
3335   IS        iscol_local = NULL, isrow_d;
3336   PetscInt  csize;
3337   PetscInt  n, i, j, start, end;
3338   PetscBool sameRowDist = PETSC_FALSE, sameDist[2], tsameDist[2];
3339   MPI_Comm  comm;
3340 
3341   PetscFunctionBegin;
3342   /* If isrow has same processor distribution as mat,
3343      call MatCreateSubMatrix_MPIAIJ_SameRowDist() to avoid using a hash table with global size of iscol */
3344   if (call == MAT_REUSE_MATRIX) {
3345     PetscCall(PetscObjectQuery((PetscObject)*newmat, "isrow_d", (PetscObject *)&isrow_d));
3346     if (isrow_d) {
3347       sameRowDist  = PETSC_TRUE;
3348       tsameDist[1] = PETSC_TRUE; /* sameColDist */
3349     } else {
3350       PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubIScol", (PetscObject *)&iscol_local));
3351       if (iscol_local) {
3352         sameRowDist  = PETSC_TRUE;
3353         tsameDist[1] = PETSC_FALSE; /* !sameColDist */
3354       }
3355     }
3356   } else {
3357     /* Check if isrow has same processor distribution as mat */
3358     sameDist[0] = PETSC_FALSE;
3359     PetscCall(ISGetLocalSize(isrow, &n));
3360     if (!n) {
3361       sameDist[0] = PETSC_TRUE;
3362     } else {
3363       PetscCall(ISGetMinMax(isrow, &i, &j));
3364       PetscCall(MatGetOwnershipRange(mat, &start, &end));
3365       if (i >= start && j < end) sameDist[0] = PETSC_TRUE;
3366     }
3367 
3368     /* Check if iscol has same processor distribution as mat */
3369     sameDist[1] = PETSC_FALSE;
3370     PetscCall(ISGetLocalSize(iscol, &n));
3371     if (!n) {
3372       sameDist[1] = PETSC_TRUE;
3373     } else {
3374       PetscCall(ISGetMinMax(iscol, &i, &j));
3375       PetscCall(MatGetOwnershipRangeColumn(mat, &start, &end));
3376       if (i >= start && j < end) sameDist[1] = PETSC_TRUE;
3377     }
3378 
3379     PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3380     PetscCall(MPIU_Allreduce(&sameDist, &tsameDist, 2, MPIU_BOOL, MPI_LAND, comm));
3381     sameRowDist = tsameDist[0];
3382   }
3383 
3384   if (sameRowDist) {
3385     if (tsameDist[1]) { /* sameRowDist & sameColDist */
3386       /* isrow and iscol have same processor distribution as mat */
3387       PetscCall(MatCreateSubMatrix_MPIAIJ_SameRowColDist(mat, isrow, iscol, call, newmat));
3388       PetscFunctionReturn(PETSC_SUCCESS);
3389     } else { /* sameRowDist */
3390       /* isrow has same processor distribution as mat */
3391       if (call == MAT_INITIAL_MATRIX) {
3392         PetscBool sorted;
3393         PetscCall(ISGetSeqIS_Private(mat, iscol, &iscol_local));
3394         PetscCall(ISGetLocalSize(iscol_local, &n)); /* local size of iscol_local = global columns of newmat */
3395         PetscCall(ISGetSize(iscol, &i));
3396         PetscCheck(n == i, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "n %" PetscInt_FMT " != size of iscol %" PetscInt_FMT, n, i);
3397 
3398         PetscCall(ISSorted(iscol_local, &sorted));
3399         if (sorted) {
3400           /* MatCreateSubMatrix_MPIAIJ_SameRowDist() requires iscol_local be sorted; it can have duplicate indices */
3401           PetscCall(MatCreateSubMatrix_MPIAIJ_SameRowDist(mat, isrow, iscol, iscol_local, MAT_INITIAL_MATRIX, newmat));
3402           PetscFunctionReturn(PETSC_SUCCESS);
3403         }
3404       } else { /* call == MAT_REUSE_MATRIX */
3405         IS iscol_sub;
3406         PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubIScol", (PetscObject *)&iscol_sub));
3407         if (iscol_sub) {
3408           PetscCall(MatCreateSubMatrix_MPIAIJ_SameRowDist(mat, isrow, iscol, NULL, call, newmat));
3409           PetscFunctionReturn(PETSC_SUCCESS);
3410         }
3411       }
3412     }
3413   }
3414 
3415   /* General case: iscol -> iscol_local which has global size of iscol */
3416   if (call == MAT_REUSE_MATRIX) {
3417     PetscCall(PetscObjectQuery((PetscObject)*newmat, "ISAllGather", (PetscObject *)&iscol_local));
3418     PetscCheck(iscol_local, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Submatrix passed in was not used before, cannot reuse");
3419   } else {
3420     if (!iscol_local) PetscCall(ISGetSeqIS_Private(mat, iscol, &iscol_local));
3421   }
3422 
3423   PetscCall(ISGetLocalSize(iscol, &csize));
3424   PetscCall(MatCreateSubMatrix_MPIAIJ_nonscalable(mat, isrow, iscol_local, csize, call, newmat));
3425 
3426   if (call == MAT_INITIAL_MATRIX) {
3427     PetscCall(PetscObjectCompose((PetscObject)*newmat, "ISAllGather", (PetscObject)iscol_local));
3428     PetscCall(ISDestroy(&iscol_local));
3429   }
3430   PetscFunctionReturn(PETSC_SUCCESS);
3431 }
3432 
3433 /*@C
3434   MatCreateMPIAIJWithSeqAIJ - creates a `MATMPIAIJ` matrix using `MATSEQAIJ` matrices that contain the "diagonal"
3435   and "off-diagonal" part of the matrix in CSR format.
3436 
3437   Collective
3438 
3439   Input Parameters:
3440 + comm   - MPI communicator
3441 . A      - "diagonal" portion of matrix
3442 . B      - "off-diagonal" portion of matrix, may have empty columns, will be destroyed by this routine
3443 - garray - global index of `B` columns
3444 
3445   Output Parameter:
3446 . mat - the matrix, with input `A` as its local diagonal matrix
3447 
3448   Level: advanced
3449 
3450   Notes:
3451   See `MatCreateAIJ()` for the definition of "diagonal" and "off-diagonal" portion of the matrix.
3452 
3453   `A` becomes part of output mat, `B` is destroyed by this routine. The user cannot use `A` and `B` anymore.
3454 
3455 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MATSEQAIJ`, `MatCreateMPIAIJWithSplitArrays()`
3456 @*/
3457 PetscErrorCode MatCreateMPIAIJWithSeqAIJ(MPI_Comm comm, Mat A, Mat B, const PetscInt garray[], Mat *mat)
3458 {
3459   Mat_MPIAIJ        *maij;
3460   Mat_SeqAIJ        *b  = (Mat_SeqAIJ *)B->data, *bnew;
3461   PetscInt          *oi = b->i, *oj = b->j, i, nz, col;
3462   const PetscScalar *oa;
3463   Mat                Bnew;
3464   PetscInt           m, n, N;
3465   MatType            mpi_mat_type;
3466 
3467   PetscFunctionBegin;
3468   PetscCall(MatCreate(comm, mat));
3469   PetscCall(MatGetSize(A, &m, &n));
3470   PetscCheck(m == B->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Am %" PetscInt_FMT " != Bm %" PetscInt_FMT, m, B->rmap->N);
3471   PetscCheck(PetscAbs(A->rmap->bs) == PetscAbs(B->rmap->bs), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "A row bs %" PetscInt_FMT " != B row bs %" PetscInt_FMT, A->rmap->bs, B->rmap->bs);
3472   /* remove check below; When B is created using iscol_o from ISGetSeqIS_SameColDist_Private(), its bs may not be same as A */
3473   /* PetscCheck(A->cmap->bs == B->cmap->bs,PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"A column bs %" PetscInt_FMT " != B column bs %" PetscInt_FMT,A->cmap->bs,B->cmap->bs); */
3474 
3475   /* Get global columns of mat */
3476   PetscCall(MPIU_Allreduce(&n, &N, 1, MPIU_INT, MPI_SUM, comm));
3477 
3478   PetscCall(MatSetSizes(*mat, m, n, PETSC_DECIDE, N));
3479   /* Determine the type of MPI matrix that should be created from the type of matrix A, which holds the "diagonal" portion. */
3480   PetscCall(MatGetMPIMatType_Private(A, &mpi_mat_type));
3481   PetscCall(MatSetType(*mat, mpi_mat_type));
3482 
3483   if (A->rmap->bs > 1 || A->cmap->bs > 1) PetscCall(MatSetBlockSizes(*mat, A->rmap->bs, A->cmap->bs));
3484   maij = (Mat_MPIAIJ *)(*mat)->data;
3485 
3486   (*mat)->preallocated = PETSC_TRUE;
3487 
3488   PetscCall(PetscLayoutSetUp((*mat)->rmap));
3489   PetscCall(PetscLayoutSetUp((*mat)->cmap));
3490 
3491   /* Set A as diagonal portion of *mat */
3492   maij->A = A;
3493 
3494   nz = oi[m];
3495   for (i = 0; i < nz; i++) {
3496     col   = oj[i];
3497     oj[i] = garray[col];
3498   }
3499 
3500   /* Set Bnew as off-diagonal portion of *mat */
3501   PetscCall(MatSeqAIJGetArrayRead(B, &oa));
3502   PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, N, oi, oj, (PetscScalar *)oa, &Bnew));
3503   PetscCall(MatSeqAIJRestoreArrayRead(B, &oa));
3504   bnew        = (Mat_SeqAIJ *)Bnew->data;
3505   bnew->maxnz = b->maxnz; /* allocated nonzeros of B */
3506   maij->B     = Bnew;
3507 
3508   PetscCheck(B->rmap->N == Bnew->rmap->N, PETSC_COMM_SELF, PETSC_ERR_PLIB, "BN %" PetscInt_FMT " != BnewN %" PetscInt_FMT, B->rmap->N, Bnew->rmap->N);
3509 
3510   b->singlemalloc = PETSC_FALSE; /* B arrays are shared by Bnew */
3511   b->free_a       = PETSC_FALSE;
3512   b->free_ij      = PETSC_FALSE;
3513   PetscCall(MatDestroy(&B));
3514 
3515   bnew->singlemalloc = PETSC_TRUE; /* arrays will be freed by MatDestroy(&Bnew) */
3516   bnew->free_a       = PETSC_TRUE;
3517   bnew->free_ij      = PETSC_TRUE;
3518 
3519   /* condense columns of maij->B */
3520   PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
3521   PetscCall(MatAssemblyBegin(*mat, MAT_FINAL_ASSEMBLY));
3522   PetscCall(MatAssemblyEnd(*mat, MAT_FINAL_ASSEMBLY));
3523   PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_FALSE));
3524   PetscCall(MatSetOption(*mat, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
3525   PetscFunctionReturn(PETSC_SUCCESS);
3526 }
3527 
3528 extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat, PetscInt, const IS[], const IS[], MatReuse, PetscBool, Mat *);
3529 
3530 PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameRowDist(Mat mat, IS isrow, IS iscol, IS iscol_local, MatReuse call, Mat *newmat)
3531 {
3532   PetscInt        i, m, n, rstart, row, rend, nz, j, bs, cbs;
3533   PetscInt       *ii, *jj, nlocal, *dlens, *olens, dlen, olen, jend, mglobal;
3534   Mat_MPIAIJ     *a = (Mat_MPIAIJ *)mat->data;
3535   Mat             M, Msub, B = a->B;
3536   MatScalar      *aa;
3537   Mat_SeqAIJ     *aij;
3538   PetscInt       *garray = a->garray, *colsub, Ncols;
3539   PetscInt        count, Bn = B->cmap->N, cstart = mat->cmap->rstart, cend = mat->cmap->rend;
3540   IS              iscol_sub, iscmap;
3541   const PetscInt *is_idx, *cmap;
3542   PetscBool       allcolumns = PETSC_FALSE;
3543   MPI_Comm        comm;
3544 
3545   PetscFunctionBegin;
3546   PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3547   if (call == MAT_REUSE_MATRIX) {
3548     PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubIScol", (PetscObject *)&iscol_sub));
3549     PetscCheck(iscol_sub, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "SubIScol passed in was not used before, cannot reuse");
3550     PetscCall(ISGetLocalSize(iscol_sub, &count));
3551 
3552     PetscCall(PetscObjectQuery((PetscObject)*newmat, "Subcmap", (PetscObject *)&iscmap));
3553     PetscCheck(iscmap, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Subcmap passed in was not used before, cannot reuse");
3554 
3555     PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubMatrix", (PetscObject *)&Msub));
3556     PetscCheck(Msub, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Submatrix passed in was not used before, cannot reuse");
3557 
3558     PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol_sub, MAT_REUSE_MATRIX, PETSC_FALSE, &Msub));
3559 
3560   } else { /* call == MAT_INITIAL_MATRIX) */
3561     PetscBool flg;
3562 
3563     PetscCall(ISGetLocalSize(iscol, &n));
3564     PetscCall(ISGetSize(iscol, &Ncols));
3565 
3566     /* (1) iscol -> nonscalable iscol_local */
3567     /* Check for special case: each processor gets entire matrix columns */
3568     PetscCall(ISIdentity(iscol_local, &flg));
3569     if (flg && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3570     PetscCall(MPIU_Allreduce(MPI_IN_PLACE, &allcolumns, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
3571     if (allcolumns) {
3572       iscol_sub = iscol_local;
3573       PetscCall(PetscObjectReference((PetscObject)iscol_local));
3574       PetscCall(ISCreateStride(PETSC_COMM_SELF, n, 0, 1, &iscmap));
3575 
3576     } else {
3577       /* (2) iscol_local -> iscol_sub and iscmap. Implementation below requires iscol_local be sorted, it can have duplicate indices */
3578       PetscInt *idx, *cmap1, k;
3579       PetscCall(PetscMalloc1(Ncols, &idx));
3580       PetscCall(PetscMalloc1(Ncols, &cmap1));
3581       PetscCall(ISGetIndices(iscol_local, &is_idx));
3582       count = 0;
3583       k     = 0;
3584       for (i = 0; i < Ncols; i++) {
3585         j = is_idx[i];
3586         if (j >= cstart && j < cend) {
3587           /* diagonal part of mat */
3588           idx[count]     = j;
3589           cmap1[count++] = i; /* column index in submat */
3590         } else if (Bn) {
3591           /* off-diagonal part of mat */
3592           if (j == garray[k]) {
3593             idx[count]     = j;
3594             cmap1[count++] = i; /* column index in submat */
3595           } else if (j > garray[k]) {
3596             while (j > garray[k] && k < Bn - 1) k++;
3597             if (j == garray[k]) {
3598               idx[count]     = j;
3599               cmap1[count++] = i; /* column index in submat */
3600             }
3601           }
3602         }
3603       }
3604       PetscCall(ISRestoreIndices(iscol_local, &is_idx));
3605 
3606       PetscCall(ISCreateGeneral(PETSC_COMM_SELF, count, idx, PETSC_OWN_POINTER, &iscol_sub));
3607       PetscCall(ISGetBlockSize(iscol, &cbs));
3608       PetscCall(ISSetBlockSize(iscol_sub, cbs));
3609 
3610       PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)iscol_local), count, cmap1, PETSC_OWN_POINTER, &iscmap));
3611     }
3612 
3613     /* (3) Create sequential Msub */
3614     PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol_sub, MAT_INITIAL_MATRIX, allcolumns, &Msub));
3615   }
3616 
3617   PetscCall(ISGetLocalSize(iscol_sub, &count));
3618   aij = (Mat_SeqAIJ *)(Msub)->data;
3619   ii  = aij->i;
3620   PetscCall(ISGetIndices(iscmap, &cmap));
3621 
3622   /*
3623       m - number of local rows
3624       Ncols - number of columns (same on all processors)
3625       rstart - first row in new global matrix generated
3626   */
3627   PetscCall(MatGetSize(Msub, &m, NULL));
3628 
3629   if (call == MAT_INITIAL_MATRIX) {
3630     /* (4) Create parallel newmat */
3631     PetscMPIInt rank, size;
3632     PetscInt    csize;
3633 
3634     PetscCallMPI(MPI_Comm_size(comm, &size));
3635     PetscCallMPI(MPI_Comm_rank(comm, &rank));
3636 
3637     /*
3638         Determine the number of non-zeros in the diagonal and off-diagonal
3639         portions of the matrix in order to do correct preallocation
3640     */
3641 
3642     /* first get start and end of "diagonal" columns */
3643     PetscCall(ISGetLocalSize(iscol, &csize));
3644     if (csize == PETSC_DECIDE) {
3645       PetscCall(ISGetSize(isrow, &mglobal));
3646       if (mglobal == Ncols) { /* square matrix */
3647         nlocal = m;
3648       } else {
3649         nlocal = Ncols / size + ((Ncols % size) > rank);
3650       }
3651     } else {
3652       nlocal = csize;
3653     }
3654     PetscCallMPI(MPI_Scan(&nlocal, &rend, 1, MPIU_INT, MPI_SUM, comm));
3655     rstart = rend - nlocal;
3656     PetscCheck(rank != size - 1 || rend == Ncols, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Local column sizes %" PetscInt_FMT " do not add up to total number of columns %" PetscInt_FMT, rend, Ncols);
3657 
3658     /* next, compute all the lengths */
3659     jj = aij->j;
3660     PetscCall(PetscMalloc1(2 * m + 1, &dlens));
3661     olens = dlens + m;
3662     for (i = 0; i < m; i++) {
3663       jend = ii[i + 1] - ii[i];
3664       olen = 0;
3665       dlen = 0;
3666       for (j = 0; j < jend; j++) {
3667         if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3668         else dlen++;
3669         jj++;
3670       }
3671       olens[i] = olen;
3672       dlens[i] = dlen;
3673     }
3674 
3675     PetscCall(ISGetBlockSize(isrow, &bs));
3676     PetscCall(ISGetBlockSize(iscol, &cbs));
3677 
3678     PetscCall(MatCreate(comm, &M));
3679     PetscCall(MatSetSizes(M, m, nlocal, PETSC_DECIDE, Ncols));
3680     PetscCall(MatSetBlockSizes(M, bs, cbs));
3681     PetscCall(MatSetType(M, ((PetscObject)mat)->type_name));
3682     PetscCall(MatMPIAIJSetPreallocation(M, 0, dlens, 0, olens));
3683     PetscCall(PetscFree(dlens));
3684 
3685   } else { /* call == MAT_REUSE_MATRIX */
3686     M = *newmat;
3687     PetscCall(MatGetLocalSize(M, &i, NULL));
3688     PetscCheck(i == m, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Previous matrix must be same size/layout as request");
3689     PetscCall(MatZeroEntries(M));
3690     /*
3691          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3692        rather than the slower MatSetValues().
3693     */
3694     M->was_assembled = PETSC_TRUE;
3695     M->assembled     = PETSC_FALSE;
3696   }
3697 
3698   /* (5) Set values of Msub to *newmat */
3699   PetscCall(PetscMalloc1(count, &colsub));
3700   PetscCall(MatGetOwnershipRange(M, &rstart, NULL));
3701 
3702   jj = aij->j;
3703   PetscCall(MatSeqAIJGetArrayRead(Msub, (const PetscScalar **)&aa));
3704   for (i = 0; i < m; i++) {
3705     row = rstart + i;
3706     nz  = ii[i + 1] - ii[i];
3707     for (j = 0; j < nz; j++) colsub[j] = cmap[jj[j]];
3708     PetscCall(MatSetValues_MPIAIJ(M, 1, &row, nz, colsub, aa, INSERT_VALUES));
3709     jj += nz;
3710     aa += nz;
3711   }
3712   PetscCall(MatSeqAIJRestoreArrayRead(Msub, (const PetscScalar **)&aa));
3713   PetscCall(ISRestoreIndices(iscmap, &cmap));
3714 
3715   PetscCall(MatAssemblyBegin(M, MAT_FINAL_ASSEMBLY));
3716   PetscCall(MatAssemblyEnd(M, MAT_FINAL_ASSEMBLY));
3717 
3718   PetscCall(PetscFree(colsub));
3719 
3720   /* save Msub, iscol_sub and iscmap used in processor for next request */
3721   if (call == MAT_INITIAL_MATRIX) {
3722     *newmat = M;
3723     PetscCall(PetscObjectCompose((PetscObject)(*newmat), "SubMatrix", (PetscObject)Msub));
3724     PetscCall(MatDestroy(&Msub));
3725 
3726     PetscCall(PetscObjectCompose((PetscObject)(*newmat), "SubIScol", (PetscObject)iscol_sub));
3727     PetscCall(ISDestroy(&iscol_sub));
3728 
3729     PetscCall(PetscObjectCompose((PetscObject)(*newmat), "Subcmap", (PetscObject)iscmap));
3730     PetscCall(ISDestroy(&iscmap));
3731 
3732     if (iscol_local) {
3733       PetscCall(PetscObjectCompose((PetscObject)(*newmat), "ISAllGather", (PetscObject)iscol_local));
3734       PetscCall(ISDestroy(&iscol_local));
3735     }
3736   }
3737   PetscFunctionReturn(PETSC_SUCCESS);
3738 }
3739 
3740 /*
3741     Not great since it makes two copies of the submatrix, first an SeqAIJ
3742   in local and then by concatenating the local matrices the end result.
3743   Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()
3744 
3745   This requires a sequential iscol with all indices.
3746 */
3747 PetscErrorCode MatCreateSubMatrix_MPIAIJ_nonscalable(Mat mat, IS isrow, IS iscol, PetscInt csize, MatReuse call, Mat *newmat)
3748 {
3749   PetscMPIInt rank, size;
3750   PetscInt    i, m, n, rstart, row, rend, nz, *cwork, j, bs, cbs;
3751   PetscInt   *ii, *jj, nlocal, *dlens, *olens, dlen, olen, jend, mglobal;
3752   Mat         M, Mreuse;
3753   MatScalar  *aa, *vwork;
3754   MPI_Comm    comm;
3755   Mat_SeqAIJ *aij;
3756   PetscBool   colflag, allcolumns = PETSC_FALSE;
3757 
3758   PetscFunctionBegin;
3759   PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
3760   PetscCallMPI(MPI_Comm_rank(comm, &rank));
3761   PetscCallMPI(MPI_Comm_size(comm, &size));
3762 
3763   /* Check for special case: each processor gets entire matrix columns */
3764   PetscCall(ISIdentity(iscol, &colflag));
3765   PetscCall(ISGetLocalSize(iscol, &n));
3766   if (colflag && n == mat->cmap->N) allcolumns = PETSC_TRUE;
3767   PetscCall(MPIU_Allreduce(MPI_IN_PLACE, &allcolumns, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
3768 
3769   if (call == MAT_REUSE_MATRIX) {
3770     PetscCall(PetscObjectQuery((PetscObject)*newmat, "SubMatrix", (PetscObject *)&Mreuse));
3771     PetscCheck(Mreuse, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Submatrix passed in was not used before, cannot reuse");
3772     PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol, MAT_REUSE_MATRIX, allcolumns, &Mreuse));
3773   } else {
3774     PetscCall(MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat, 1, &isrow, &iscol, MAT_INITIAL_MATRIX, allcolumns, &Mreuse));
3775   }
3776 
3777   /*
3778       m - number of local rows
3779       n - number of columns (same on all processors)
3780       rstart - first row in new global matrix generated
3781   */
3782   PetscCall(MatGetSize(Mreuse, &m, &n));
3783   PetscCall(MatGetBlockSizes(Mreuse, &bs, &cbs));
3784   if (call == MAT_INITIAL_MATRIX) {
3785     aij = (Mat_SeqAIJ *)(Mreuse)->data;
3786     ii  = aij->i;
3787     jj  = aij->j;
3788 
3789     /*
3790         Determine the number of non-zeros in the diagonal and off-diagonal
3791         portions of the matrix in order to do correct preallocation
3792     */
3793 
3794     /* first get start and end of "diagonal" columns */
3795     if (csize == PETSC_DECIDE) {
3796       PetscCall(ISGetSize(isrow, &mglobal));
3797       if (mglobal == n) { /* square matrix */
3798         nlocal = m;
3799       } else {
3800         nlocal = n / size + ((n % size) > rank);
3801       }
3802     } else {
3803       nlocal = csize;
3804     }
3805     PetscCallMPI(MPI_Scan(&nlocal, &rend, 1, MPIU_INT, MPI_SUM, comm));
3806     rstart = rend - nlocal;
3807     PetscCheck(rank != size - 1 || rend == n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Local column sizes %" PetscInt_FMT " do not add up to total number of columns %" PetscInt_FMT, rend, n);
3808 
3809     /* next, compute all the lengths */
3810     PetscCall(PetscMalloc1(2 * m + 1, &dlens));
3811     olens = dlens + m;
3812     for (i = 0; i < m; i++) {
3813       jend = ii[i + 1] - ii[i];
3814       olen = 0;
3815       dlen = 0;
3816       for (j = 0; j < jend; j++) {
3817         if (*jj < rstart || *jj >= rend) olen++;
3818         else dlen++;
3819         jj++;
3820       }
3821       olens[i] = olen;
3822       dlens[i] = dlen;
3823     }
3824     PetscCall(MatCreate(comm, &M));
3825     PetscCall(MatSetSizes(M, m, nlocal, PETSC_DECIDE, n));
3826     PetscCall(MatSetBlockSizes(M, bs, cbs));
3827     PetscCall(MatSetType(M, ((PetscObject)mat)->type_name));
3828     PetscCall(MatMPIAIJSetPreallocation(M, 0, dlens, 0, olens));
3829     PetscCall(PetscFree(dlens));
3830   } else {
3831     PetscInt ml, nl;
3832 
3833     M = *newmat;
3834     PetscCall(MatGetLocalSize(M, &ml, &nl));
3835     PetscCheck(ml == m, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Previous matrix must be same size/layout as request");
3836     PetscCall(MatZeroEntries(M));
3837     /*
3838          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3839        rather than the slower MatSetValues().
3840     */
3841     M->was_assembled = PETSC_TRUE;
3842     M->assembled     = PETSC_FALSE;
3843   }
3844   PetscCall(MatGetOwnershipRange(M, &rstart, &rend));
3845   aij = (Mat_SeqAIJ *)(Mreuse)->data;
3846   ii  = aij->i;
3847   jj  = aij->j;
3848 
3849   /* trigger copy to CPU if needed */
3850   PetscCall(MatSeqAIJGetArrayRead(Mreuse, (const PetscScalar **)&aa));
3851   for (i = 0; i < m; i++) {
3852     row   = rstart + i;
3853     nz    = ii[i + 1] - ii[i];
3854     cwork = jj;
3855     jj += nz;
3856     vwork = aa;
3857     aa += nz;
3858     PetscCall(MatSetValues_MPIAIJ(M, 1, &row, nz, cwork, vwork, INSERT_VALUES));
3859   }
3860   PetscCall(MatSeqAIJRestoreArrayRead(Mreuse, (const PetscScalar **)&aa));
3861 
3862   PetscCall(MatAssemblyBegin(M, MAT_FINAL_ASSEMBLY));
3863   PetscCall(MatAssemblyEnd(M, MAT_FINAL_ASSEMBLY));
3864   *newmat = M;
3865 
3866   /* save submatrix used in processor for next request */
3867   if (call == MAT_INITIAL_MATRIX) {
3868     PetscCall(PetscObjectCompose((PetscObject)M, "SubMatrix", (PetscObject)Mreuse));
3869     PetscCall(MatDestroy(&Mreuse));
3870   }
3871   PetscFunctionReturn(PETSC_SUCCESS);
3872 }
3873 
3874 static PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B, const PetscInt Ii[], const PetscInt J[], const PetscScalar v[])
3875 {
3876   PetscInt        m, cstart, cend, j, nnz, i, d, *ld;
3877   PetscInt       *d_nnz, *o_nnz, nnz_max = 0, rstart, ii;
3878   const PetscInt *JJ;
3879   PetscBool       nooffprocentries;
3880   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ *)B->data;
3881 
3882   PetscFunctionBegin;
3883   PetscCheck(Ii[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Ii[0] must be 0 it is %" PetscInt_FMT, Ii[0]);
3884 
3885   PetscCall(PetscLayoutSetUp(B->rmap));
3886   PetscCall(PetscLayoutSetUp(B->cmap));
3887   m      = B->rmap->n;
3888   cstart = B->cmap->rstart;
3889   cend   = B->cmap->rend;
3890   rstart = B->rmap->rstart;
3891 
3892   PetscCall(PetscCalloc2(m, &d_nnz, m, &o_nnz));
3893 
3894   if (PetscDefined(USE_DEBUG)) {
3895     for (i = 0; i < m; i++) {
3896       nnz = Ii[i + 1] - Ii[i];
3897       JJ  = J ? J + Ii[i] : NULL;
3898       PetscCheck(nnz >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Local row %" PetscInt_FMT " has a negative %" PetscInt_FMT " number of columns", i, nnz);
3899       PetscCheck(!nnz || !(JJ[0] < 0), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Row %" PetscInt_FMT " starts with negative column index %" PetscInt_FMT, i, JJ[0]);
3900       PetscCheck(!nnz || !(JJ[nnz - 1] >= B->cmap->N), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Row %" PetscInt_FMT " ends with too large a column index %" PetscInt_FMT " (max allowed %" PetscInt_FMT ")", i, JJ[nnz - 1], B->cmap->N);
3901     }
3902   }
3903 
3904   for (i = 0; i < m; i++) {
3905     nnz     = Ii[i + 1] - Ii[i];
3906     JJ      = J ? J + Ii[i] : NULL;
3907     nnz_max = PetscMax(nnz_max, nnz);
3908     d       = 0;
3909     for (j = 0; j < nnz; j++) {
3910       if (cstart <= JJ[j] && JJ[j] < cend) d++;
3911     }
3912     d_nnz[i] = d;
3913     o_nnz[i] = nnz - d;
3914   }
3915   PetscCall(MatMPIAIJSetPreallocation(B, 0, d_nnz, 0, o_nnz));
3916   PetscCall(PetscFree2(d_nnz, o_nnz));
3917 
3918   for (i = 0; i < m; i++) {
3919     ii = i + rstart;
3920     PetscCall(MatSetValues_MPIAIJ(B, 1, &ii, Ii[i + 1] - Ii[i], J ? J + Ii[i] : NULL, v ? v + Ii[i] : NULL, INSERT_VALUES));
3921   }
3922   nooffprocentries    = B->nooffprocentries;
3923   B->nooffprocentries = PETSC_TRUE;
3924   PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
3925   PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
3926   B->nooffprocentries = nooffprocentries;
3927 
3928   /* count number of entries below block diagonal */
3929   PetscCall(PetscFree(Aij->ld));
3930   PetscCall(PetscCalloc1(m, &ld));
3931   Aij->ld = ld;
3932   for (i = 0; i < m; i++) {
3933     nnz = Ii[i + 1] - Ii[i];
3934     j   = 0;
3935     while (j < nnz && J[j] < cstart) j++;
3936     ld[i] = j;
3937     if (J) J += nnz;
3938   }
3939 
3940   PetscCall(MatSetOption(B, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
3941   PetscFunctionReturn(PETSC_SUCCESS);
3942 }
3943 
3944 /*@
3945   MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in `MATAIJ` format
3946   (the default parallel PETSc format).
3947 
3948   Collective
3949 
3950   Input Parameters:
3951 + B - the matrix
3952 . i - the indices into j for the start of each local row (starts with zero)
3953 . j - the column indices for each local row (starts with zero)
3954 - v - optional values in the matrix
3955 
3956   Level: developer
3957 
3958   Notes:
3959   The `i`, `j`, and `v` arrays ARE copied by this routine into the internal format used by PETSc;
3960   thus you CANNOT change the matrix entries by changing the values of `v` after you have
3961   called this routine. Use `MatCreateMPIAIJWithSplitArrays()` to avoid needing to copy the arrays.
3962 
3963   The `i` and `j` indices are 0 based, and `i` indices are indices corresponding to the local `j` array.
3964 
3965   The format which is used for the sparse matrix input, is equivalent to a
3966   row-major ordering.. i.e for the following matrix, the input data expected is
3967   as shown
3968 
3969 .vb
3970         1 0 0
3971         2 0 3     P0
3972        -------
3973         4 5 6     P1
3974 
3975      Process0 [P0] rows_owned=[0,1]
3976         i =  {0,1,3}  [size = nrow+1  = 2+1]
3977         j =  {0,0,2}  [size = 3]
3978         v =  {1,2,3}  [size = 3]
3979 
3980      Process1 [P1] rows_owned=[2]
3981         i =  {0,3}    [size = nrow+1  = 1+1]
3982         j =  {0,1,2}  [size = 3]
3983         v =  {4,5,6}  [size = 3]
3984 .ve
3985 
3986 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatCreateAIJ()`,
3987           `MatCreateSeqAIJWithArrays()`, `MatCreateMPIAIJWithSplitArrays()`
3988 @*/
3989 PetscErrorCode MatMPIAIJSetPreallocationCSR(Mat B, const PetscInt i[], const PetscInt j[], const PetscScalar v[])
3990 {
3991   PetscFunctionBegin;
3992   PetscTryMethod(B, "MatMPIAIJSetPreallocationCSR_C", (Mat, const PetscInt[], const PetscInt[], const PetscScalar[]), (B, i, j, v));
3993   PetscFunctionReturn(PETSC_SUCCESS);
3994 }
3995 
3996 /*@C
3997   MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in `MATMPIAIJ` format
3998   (the default parallel PETSc format).  For good matrix assembly performance
3999   the user should preallocate the matrix storage by setting the parameters
4000   `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`).
4001 
4002   Collective
4003 
4004   Input Parameters:
4005 + B     - the matrix
4006 . d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4007            (same value is used for all local rows)
4008 . d_nnz - array containing the number of nonzeros in the various rows of the
4009            DIAGONAL portion of the local submatrix (possibly different for each row)
4010            or `NULL` (`PETSC_NULL_INTEGER` in Fortran), if `d_nz` is used to specify the nonzero structure.
4011            The size of this array is equal to the number of local rows, i.e 'm'.
4012            For matrices that will be factored, you must leave room for (and set)
4013            the diagonal entry even if it is zero.
4014 . o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4015            submatrix (same value is used for all local rows).
4016 - o_nnz - array containing the number of nonzeros in the various rows of the
4017            OFF-DIAGONAL portion of the local submatrix (possibly different for
4018            each row) or `NULL` (`PETSC_NULL_INTEGER` in Fortran), if `o_nz` is used to specify the nonzero
4019            structure. The size of this array is equal to the number
4020            of local rows, i.e 'm'.
4021 
4022   Example Usage:
4023   Consider the following 8x8 matrix with 34 non-zero values, that is
4024   assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4025   proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4026   as follows
4027 
4028 .vb
4029             1  2  0  |  0  3  0  |  0  4
4030     Proc0   0  5  6  |  7  0  0  |  8  0
4031             9  0 10  | 11  0  0  | 12  0
4032     -------------------------------------
4033            13  0 14  | 15 16 17  |  0  0
4034     Proc1   0 18  0  | 19 20 21  |  0  0
4035             0  0  0  | 22 23  0  | 24  0
4036     -------------------------------------
4037     Proc2  25 26 27  |  0  0 28  | 29  0
4038            30  0  0  | 31 32 33  |  0 34
4039 .ve
4040 
4041   This can be represented as a collection of submatrices as
4042 .vb
4043       A B C
4044       D E F
4045       G H I
4046 .ve
4047 
4048   Where the submatrices A,B,C are owned by proc0, D,E,F are
4049   owned by proc1, G,H,I are owned by proc2.
4050 
4051   The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4052   The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4053   The 'M','N' parameters are 8,8, and have the same values on all procs.
4054 
4055   The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4056   submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4057   corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4058   Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4059   part as `MATSEQAIJ` matrices. For example, proc1 will store [E] as a `MATSEQAIJ`
4060   matrix, ans [DF] as another `MATSEQAIJ` matrix.
4061 
4062   When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are
4063   allocated for every row of the local diagonal submatrix, and `o_nz`
4064   storage locations are allocated for every row of the OFF-DIAGONAL submat.
4065   One way to choose `d_nz` and `o_nz` is to use the max nonzerors per local
4066   rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4067   In this case, the values of `d_nz`, `o_nz` are
4068 .vb
4069      proc0  dnz = 2, o_nz = 2
4070      proc1  dnz = 3, o_nz = 2
4071      proc2  dnz = 1, o_nz = 4
4072 .ve
4073   We are allocating `m`*(`d_nz`+`o_nz`) storage locations for every proc. This
4074   translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4075   for proc3. i.e we are using 12+15+10=37 storage locations to store
4076   34 values.
4077 
4078   When `d_nnz`, `o_nnz` parameters are specified, the storage is specified
4079   for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4080   In the above case the values for `d_nnz`, `o_nnz` are
4081 .vb
4082      proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2]
4083      proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1]
4084      proc2 d_nnz = [1,1]   and o_nnz = [4,4]
4085 .ve
4086   Here the space allocated is sum of all the above values i.e 34, and
4087   hence pre-allocation is perfect.
4088 
4089   Level: intermediate
4090 
4091   Notes:
4092   If the *_nnz parameter is given then the *_nz parameter is ignored
4093 
4094   The `MATAIJ` format, also called compressed row storage (CSR), is compatible with standard Fortran
4095   storage.  The stored row and column indices begin with zero.
4096   See [Sparse Matrices](sec_matsparse) for details.
4097 
4098   The parallel matrix is partitioned such that the first m0 rows belong to
4099   process 0, the next m1 rows belong to process 1, the next m2 rows belong
4100   to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
4101 
4102   The DIAGONAL portion of the local submatrix of a processor can be defined
4103   as the submatrix which is obtained by extraction the part corresponding to
4104   the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
4105   first row that belongs to the processor, r2 is the last row belonging to
4106   the this processor, and c1-c2 is range of indices of the local part of a
4107   vector suitable for applying the matrix to.  This is an mxn matrix.  In the
4108   common case of a square matrix, the row and column ranges are the same and
4109   the DIAGONAL part is also square. The remaining portion of the local
4110   submatrix (mxN) constitute the OFF-DIAGONAL portion.
4111 
4112   If `o_nnz` and `d_nnz` are specified, then `o_nz` and `d_nz` are ignored.
4113 
4114   You can call `MatGetInfo()` to get information on how effective the preallocation was;
4115   for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
4116   You can also run with the option `-info` and look for messages with the string
4117   malloc in them to see if additional memory allocation was needed.
4118 
4119 .seealso: [](ch_matrices), `Mat`, [Sparse Matrices](sec_matsparse), `MATMPIAIJ`, `MATAIJ`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatCreateAIJ()`, `MatMPIAIJSetPreallocationCSR()`,
4120           `MatGetInfo()`, `PetscSplitOwnership()`
4121 @*/
4122 PetscErrorCode MatMPIAIJSetPreallocation(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
4123 {
4124   PetscFunctionBegin;
4125   PetscValidHeaderSpecific(B, MAT_CLASSID, 1);
4126   PetscValidType(B, 1);
4127   PetscTryMethod(B, "MatMPIAIJSetPreallocation_C", (Mat, PetscInt, const PetscInt[], PetscInt, const PetscInt[]), (B, d_nz, d_nnz, o_nz, o_nnz));
4128   PetscFunctionReturn(PETSC_SUCCESS);
4129 }
4130 
4131 /*@
4132   MatCreateMPIAIJWithArrays - creates a `MATMPIAIJ` matrix using arrays that contain in standard
4133   CSR format for the local rows.
4134 
4135   Collective
4136 
4137   Input Parameters:
4138 + comm - MPI communicator
4139 . m    - number of local rows (Cannot be `PETSC_DECIDE`)
4140 . n    - This value should be the same as the local size used in creating the
4141        x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
4142        calculated if N is given) For square matrices n is almost always m.
4143 . M    - number of global rows (or `PETSC_DETERMINE` to have calculated if m is given)
4144 . N    - number of global columns (or `PETSC_DETERMINE` to have calculated if n is given)
4145 . i    - row indices; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
4146 . j    - column indices
4147 - a    - optional matrix values
4148 
4149   Output Parameter:
4150 . mat - the matrix
4151 
4152   Level: intermediate
4153 
4154   Notes:
4155   The `i`, `j`, and `a` arrays ARE copied by this routine into the internal format used by PETSc;
4156   thus you CANNOT change the matrix entries by changing the values of a[] after you have
4157   called this routine. Use `MatCreateMPIAIJWithSplitArrays()` to avoid needing to copy the arrays.
4158 
4159   The `i` and `j` indices are 0 based, and `i` indices are indices corresponding to the local `j` array.
4160 
4161   The format which is used for the sparse matrix input, is equivalent to a
4162   row-major ordering.. i.e for the following matrix, the input data expected is
4163   as shown
4164 
4165   Once you have created the matrix you can update it with new numerical values using MatUpdateMPIAIJWithArrays
4166 .vb
4167         1 0 0
4168         2 0 3     P0
4169        -------
4170         4 5 6     P1
4171 
4172      Process0 [P0] rows_owned=[0,1]
4173         i =  {0,1,3}  [size = nrow+1  = 2+1]
4174         j =  {0,0,2}  [size = 3]
4175         v =  {1,2,3}  [size = 3]
4176 
4177      Process1 [P1] rows_owned=[2]
4178         i =  {0,3}    [size = nrow+1  = 1+1]
4179         j =  {0,1,2}  [size = 3]
4180         v =  {4,5,6}  [size = 3]
4181 .ve
4182 
4183 .seealso: [](ch_matrices), `Mat`, `MATMPIAIK`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
4184           `MATMPIAIJ`, `MatCreateAIJ()`, `MatCreateMPIAIJWithSplitArrays()`, `MatUpdateMPIAIJWithArrays()`
4185 @*/
4186 PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, const PetscInt i[], const PetscInt j[], const PetscScalar a[], Mat *mat)
4187 {
4188   PetscFunctionBegin;
4189   PetscCheck(!i || !i[0], PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "i (row indices) must start with 0");
4190   PetscCheck(m >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "local number of rows (m) cannot be PETSC_DECIDE, or negative");
4191   PetscCall(MatCreate(comm, mat));
4192   PetscCall(MatSetSizes(*mat, m, n, M, N));
4193   /* PetscCall(MatSetBlockSizes(M,bs,cbs)); */
4194   PetscCall(MatSetType(*mat, MATMPIAIJ));
4195   PetscCall(MatMPIAIJSetPreallocationCSR(*mat, i, j, a));
4196   PetscFunctionReturn(PETSC_SUCCESS);
4197 }
4198 
4199 /*@
4200   MatUpdateMPIAIJWithArrays - updates a `MATMPIAIJ` matrix using arrays that contain in standard
4201   CSR format for the local rows. Only the numerical values are updated the other arrays must be identical to what was passed
4202   from `MatCreateMPIAIJWithArrays()`
4203 
4204   Deprecated: Use `MatUpdateMPIAIJWithArray()`
4205 
4206   Collective
4207 
4208   Input Parameters:
4209 + mat - the matrix
4210 . m   - number of local rows (Cannot be `PETSC_DECIDE`)
4211 . n   - This value should be the same as the local size used in creating the
4212        x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
4213        calculated if N is given) For square matrices n is almost always m.
4214 . M   - number of global rows (or `PETSC_DETERMINE` to have calculated if m is given)
4215 . N   - number of global columns (or `PETSC_DETERMINE` to have calculated if n is given)
4216 . Ii  - row indices; that is Ii[0] = 0, Ii[row] = Ii[row-1] + number of elements in that row of the matrix
4217 . J   - column indices
4218 - v   - matrix values
4219 
4220   Level: deprecated
4221 
4222 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
4223           `MatCreateAIJ()`, `MatCreateMPIAIJWithSplitArrays()`, `MatUpdateMPIAIJWithArray()`
4224 @*/
4225 PetscErrorCode MatUpdateMPIAIJWithArrays(Mat mat, PetscInt m, PetscInt n, PetscInt M, PetscInt N, const PetscInt Ii[], const PetscInt J[], const PetscScalar v[])
4226 {
4227   PetscInt        nnz, i;
4228   PetscBool       nooffprocentries;
4229   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ *)mat->data;
4230   Mat_SeqAIJ     *Ad  = (Mat_SeqAIJ *)Aij->A->data;
4231   PetscScalar    *ad, *ao;
4232   PetscInt        ldi, Iii, md;
4233   const PetscInt *Adi = Ad->i;
4234   PetscInt       *ld  = Aij->ld;
4235 
4236   PetscFunctionBegin;
4237   PetscCheck(Ii[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "i (row indices) must start with 0");
4238   PetscCheck(m >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "local number of rows (m) cannot be PETSC_DECIDE, or negative");
4239   PetscCheck(m == mat->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Local number of rows cannot change from call to MatUpdateMPIAIJWithArrays()");
4240   PetscCheck(n == mat->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Local number of columns cannot change from call to MatUpdateMPIAIJWithArrays()");
4241 
4242   PetscCall(MatSeqAIJGetArrayWrite(Aij->A, &ad));
4243   PetscCall(MatSeqAIJGetArrayWrite(Aij->B, &ao));
4244 
4245   for (i = 0; i < m; i++) {
4246     nnz = Ii[i + 1] - Ii[i];
4247     Iii = Ii[i];
4248     ldi = ld[i];
4249     md  = Adi[i + 1] - Adi[i];
4250     PetscCall(PetscArraycpy(ao, v + Iii, ldi));
4251     PetscCall(PetscArraycpy(ad, v + Iii + ldi, md));
4252     PetscCall(PetscArraycpy(ao + ldi, v + Iii + ldi + md, nnz - ldi - md));
4253     ad += md;
4254     ao += nnz - md;
4255   }
4256   nooffprocentries      = mat->nooffprocentries;
4257   mat->nooffprocentries = PETSC_TRUE;
4258   PetscCall(MatSeqAIJRestoreArrayWrite(Aij->A, &ad));
4259   PetscCall(MatSeqAIJRestoreArrayWrite(Aij->B, &ao));
4260   PetscCall(PetscObjectStateIncrease((PetscObject)Aij->A));
4261   PetscCall(PetscObjectStateIncrease((PetscObject)Aij->B));
4262   PetscCall(PetscObjectStateIncrease((PetscObject)mat));
4263   PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
4264   PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
4265   mat->nooffprocentries = nooffprocentries;
4266   PetscFunctionReturn(PETSC_SUCCESS);
4267 }
4268 
4269 /*@
4270   MatUpdateMPIAIJWithArray - updates an `MATMPIAIJ` matrix using an array that contains the nonzero values
4271 
4272   Collective
4273 
4274   Input Parameters:
4275 + mat - the matrix
4276 - v   - matrix values, stored by row
4277 
4278   Level: intermediate
4279 
4280   Note:
4281   The matrix must have been obtained with `MatCreateMPIAIJWithArrays()` or `MatMPIAIJSetPreallocationCSR()`
4282 
4283 .seealso: [](ch_matrices), `Mat`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
4284           `MATMPIAIJ`, `MatCreateAIJ()`, `MatCreateMPIAIJWithSplitArrays()`, `MatUpdateMPIAIJWithArrays()`
4285 @*/
4286 PetscErrorCode MatUpdateMPIAIJWithArray(Mat mat, const PetscScalar v[])
4287 {
4288   PetscInt        nnz, i, m;
4289   PetscBool       nooffprocentries;
4290   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ *)mat->data;
4291   Mat_SeqAIJ     *Ad  = (Mat_SeqAIJ *)Aij->A->data;
4292   Mat_SeqAIJ     *Ao  = (Mat_SeqAIJ *)Aij->B->data;
4293   PetscScalar    *ad, *ao;
4294   const PetscInt *Adi = Ad->i, *Adj = Ao->i;
4295   PetscInt        ldi, Iii, md;
4296   PetscInt       *ld = Aij->ld;
4297 
4298   PetscFunctionBegin;
4299   m = mat->rmap->n;
4300 
4301   PetscCall(MatSeqAIJGetArrayWrite(Aij->A, &ad));
4302   PetscCall(MatSeqAIJGetArrayWrite(Aij->B, &ao));
4303   Iii = 0;
4304   for (i = 0; i < m; i++) {
4305     nnz = Adi[i + 1] - Adi[i] + Adj[i + 1] - Adj[i];
4306     ldi = ld[i];
4307     md  = Adi[i + 1] - Adi[i];
4308     PetscCall(PetscArraycpy(ao, v + Iii, ldi));
4309     PetscCall(PetscArraycpy(ad, v + Iii + ldi, md));
4310     PetscCall(PetscArraycpy(ao + ldi, v + Iii + ldi + md, nnz - ldi - md));
4311     ad += md;
4312     ao += nnz - md;
4313     Iii += nnz;
4314   }
4315   nooffprocentries      = mat->nooffprocentries;
4316   mat->nooffprocentries = PETSC_TRUE;
4317   PetscCall(MatSeqAIJRestoreArrayWrite(Aij->A, &ad));
4318   PetscCall(MatSeqAIJRestoreArrayWrite(Aij->B, &ao));
4319   PetscCall(PetscObjectStateIncrease((PetscObject)Aij->A));
4320   PetscCall(PetscObjectStateIncrease((PetscObject)Aij->B));
4321   PetscCall(PetscObjectStateIncrease((PetscObject)mat));
4322   PetscCall(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
4323   PetscCall(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
4324   mat->nooffprocentries = nooffprocentries;
4325   PetscFunctionReturn(PETSC_SUCCESS);
4326 }
4327 
4328 /*@C
4329   MatCreateAIJ - Creates a sparse parallel matrix in `MATAIJ` format
4330   (the default parallel PETSc format).  For good matrix assembly performance
4331   the user should preallocate the matrix storage by setting the parameters
4332   `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`).
4333 
4334   Collective
4335 
4336   Input Parameters:
4337 + comm  - MPI communicator
4338 . m     - number of local rows (or `PETSC_DECIDE` to have calculated if M is given)
4339            This value should be the same as the local size used in creating the
4340            y vector for the matrix-vector product y = Ax.
4341 . n     - This value should be the same as the local size used in creating the
4342        x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
4343        calculated if N is given) For square matrices n is almost always m.
4344 . M     - number of global rows (or `PETSC_DETERMINE` to have calculated if m is given)
4345 . N     - number of global columns (or `PETSC_DETERMINE` to have calculated if n is given)
4346 . d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
4347            (same value is used for all local rows)
4348 . d_nnz - array containing the number of nonzeros in the various rows of the
4349            DIAGONAL portion of the local submatrix (possibly different for each row)
4350            or `NULL`, if `d_nz` is used to specify the nonzero structure.
4351            The size of this array is equal to the number of local rows, i.e 'm'.
4352 . o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
4353            submatrix (same value is used for all local rows).
4354 - o_nnz - array containing the number of nonzeros in the various rows of the
4355            OFF-DIAGONAL portion of the local submatrix (possibly different for
4356            each row) or `NULL`, if `o_nz` is used to specify the nonzero
4357            structure. The size of this array is equal to the number
4358            of local rows, i.e 'm'.
4359 
4360   Output Parameter:
4361 . A - the matrix
4362 
4363   Options Database Keys:
4364 + -mat_no_inode                     - Do not use inodes
4365 . -mat_inode_limit <limit>          - Sets inode limit (max limit=5)
4366 - -matmult_vecscatter_view <viewer> - View the vecscatter (i.e., communication pattern) used in `MatMult()` of sparse parallel matrices.
4367         See viewer types in manual of `MatView()`. Of them, ascii_matlab, draw or binary cause the vecscatter be viewed as a matrix.
4368         Entry (i,j) is the size of message (in bytes) rank i sends to rank j in one `MatMult()` call.
4369 
4370   Level: intermediate
4371 
4372   Notes:
4373   It is recommended that one use `MatCreateFromOptions()` or the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`,
4374   MatXXXXSetPreallocation() paradigm instead of this routine directly.
4375   [MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`]
4376 
4377   If the *_nnz parameter is given then the *_nz parameter is ignored
4378 
4379   The `m`,`n`,`M`,`N` parameters specify the size of the matrix, and its partitioning across
4380   processors, while `d_nz`,`d_nnz`,`o_nz`,`o_nnz` parameters specify the approximate
4381   storage requirements for this matrix.
4382 
4383   If `PETSC_DECIDE` or  `PETSC_DETERMINE` is used for a particular argument on one
4384   processor than it must be used on all processors that share the object for
4385   that argument.
4386 
4387   The user MUST specify either the local or global matrix dimensions
4388   (possibly both).
4389 
4390   The parallel matrix is partitioned across processors such that the
4391   first m0 rows belong to process 0, the next m1 rows belong to
4392   process 1, the next m2 rows belong to process 2 etc.. where
4393   m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
4394   values corresponding to [m x N] submatrix.
4395 
4396   The columns are logically partitioned with the n0 columns belonging
4397   to 0th partition, the next n1 columns belonging to the next
4398   partition etc.. where n0,n1,n2... are the input parameter 'n'.
4399 
4400   The DIAGONAL portion of the local submatrix on any given processor
4401   is the submatrix corresponding to the rows and columns m,n
4402   corresponding to the given processor. i.e diagonal matrix on
4403   process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
4404   etc. The remaining portion of the local submatrix [m x (N-n)]
4405   constitute the OFF-DIAGONAL portion. The example below better
4406   illustrates this concept.
4407 
4408   For a square global matrix we define each processor's diagonal portion
4409   to be its local rows and the corresponding columns (a square submatrix);
4410   each processor's off-diagonal portion encompasses the remainder of the
4411   local matrix (a rectangular submatrix).
4412 
4413   If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.
4414 
4415   When calling this routine with a single process communicator, a matrix of
4416   type `MATSEQAIJ` is returned.  If a matrix of type `MATMPIAIJ` is desired for this
4417   type of communicator, use the construction mechanism
4418 .vb
4419   MatCreate(..., &A);
4420   MatSetType(A, MATMPIAIJ);
4421   MatSetSizes(A, m, n, M, N);
4422   MatMPIAIJSetPreallocation(A, ...);
4423 .ve
4424 
4425   By default, this format uses inodes (identical nodes) when possible.
4426   We search for consecutive rows with the same nonzero structure, thereby
4427   reusing matrix information to achieve increased efficiency.
4428 
4429   Example Usage:
4430   Consider the following 8x8 matrix with 34 non-zero values, that is
4431   assembled across 3 processors. Lets assume that proc0 owns 3 rows,
4432   proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
4433   as follows
4434 
4435 .vb
4436             1  2  0  |  0  3  0  |  0  4
4437     Proc0   0  5  6  |  7  0  0  |  8  0
4438             9  0 10  | 11  0  0  | 12  0
4439     -------------------------------------
4440            13  0 14  | 15 16 17  |  0  0
4441     Proc1   0 18  0  | 19 20 21  |  0  0
4442             0  0  0  | 22 23  0  | 24  0
4443     -------------------------------------
4444     Proc2  25 26 27  |  0  0 28  | 29  0
4445            30  0  0  | 31 32 33  |  0 34
4446 .ve
4447 
4448   This can be represented as a collection of submatrices as
4449 
4450 .vb
4451       A B C
4452       D E F
4453       G H I
4454 .ve
4455 
4456   Where the submatrices A,B,C are owned by proc0, D,E,F are
4457   owned by proc1, G,H,I are owned by proc2.
4458 
4459   The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4460   The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4461   The 'M','N' parameters are 8,8, and have the same values on all procs.
4462 
4463   The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4464   submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4465   corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4466   Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4467   part as `MATSEQAIJ` matrices. For example, proc1 will store [E] as a `MATSEQAIJ`
4468   matrix, ans [DF] as another SeqAIJ matrix.
4469 
4470   When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are
4471   allocated for every row of the local diagonal submatrix, and `o_nz`
4472   storage locations are allocated for every row of the OFF-DIAGONAL submat.
4473   One way to choose `d_nz` and `o_nz` is to use the max nonzerors per local
4474   rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4475   In this case, the values of `d_nz`,`o_nz` are
4476 .vb
4477      proc0  dnz = 2, o_nz = 2
4478      proc1  dnz = 3, o_nz = 2
4479      proc2  dnz = 1, o_nz = 4
4480 .ve
4481   We are allocating m*(`d_nz`+`o_nz`) storage locations for every proc. This
4482   translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4483   for proc3. i.e we are using 12+15+10=37 storage locations to store
4484   34 values.
4485 
4486   When `d_nnz`, `o_nnz` parameters are specified, the storage is specified
4487   for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4488   In the above case the values for d_nnz,o_nnz are
4489 .vb
4490      proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2]
4491      proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1]
4492      proc2 d_nnz = [1,1]   and o_nnz = [4,4]
4493 .ve
4494   Here the space allocated is sum of all the above values i.e 34, and
4495   hence pre-allocation is perfect.
4496 
4497 .seealso: [](ch_matrices), `Mat`, [Sparse Matrix Creation](sec_matsparse), `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
4498           `MATMPIAIJ`, `MatCreateMPIAIJWithArrays()`
4499 @*/
4500 PetscErrorCode MatCreateAIJ(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[], Mat *A)
4501 {
4502   PetscMPIInt size;
4503 
4504   PetscFunctionBegin;
4505   PetscCall(MatCreate(comm, A));
4506   PetscCall(MatSetSizes(*A, m, n, M, N));
4507   PetscCallMPI(MPI_Comm_size(comm, &size));
4508   if (size > 1) {
4509     PetscCall(MatSetType(*A, MATMPIAIJ));
4510     PetscCall(MatMPIAIJSetPreallocation(*A, d_nz, d_nnz, o_nz, o_nnz));
4511   } else {
4512     PetscCall(MatSetType(*A, MATSEQAIJ));
4513     PetscCall(MatSeqAIJSetPreallocation(*A, d_nz, d_nnz));
4514   }
4515   PetscFunctionReturn(PETSC_SUCCESS);
4516 }
4517 
4518 /*MC
4519     MatMPIAIJGetSeqAIJF90 - Returns the local pieces of this distributed matrix
4520 
4521     Synopsis:
4522     MatMPIAIJGetSeqAIJF90(Mat A, Mat Ad, Mat Ao, {PetscInt, pointer :: colmap(:)},integer ierr)
4523 
4524     Not Collective
4525 
4526     Input Parameter:
4527 .   A - the `MATMPIAIJ` matrix
4528 
4529     Output Parameters:
4530 +   Ad - the diagonal portion of the matrix
4531 .   Ao - the off-diagonal portion of the matrix
4532 .   colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix
4533 -   ierr - error code
4534 
4535      Level: advanced
4536 
4537     Note:
4538     Use  `MatMPIAIJRestoreSeqAIJF90()` when you no longer need access to the matrices and `colmap`
4539 
4540 .seealso: [](ch_matrices), `Mat`, [](sec_fortranarrays), `Mat`, `MATMPIAIJ`, `MatMPIAIJGetSeqAIJ()`, `MatMPIAIJRestoreSeqAIJF90()`
4541 M*/
4542 
4543 /*MC
4544     MatMPIAIJRestoreSeqAIJF90 - call after `MatMPIAIJGetSeqAIJF90()` when you no longer need access to the matrices and `colmap`
4545 
4546     Synopsis:
4547     MatMPIAIJRestoreSeqAIJF90(Mat A, Mat Ad, Mat Ao, {PetscInt, pointer :: colmap(:)},integer ierr)
4548 
4549     Not Collective
4550 
4551     Input Parameters:
4552 +   A - the `MATMPIAIJ` matrix
4553 .   Ad - the diagonal portion of the matrix
4554 .   Ao - the off-diagonal portion of the matrix
4555 .   colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix
4556 -   ierr - error code
4557 
4558      Level: advanced
4559 
4560 .seealso: [](ch_matrices), `Mat`, [](sec_fortranarrays), `Mat`, `MATMPIAIJ`, `MatMPIAIJGetSeqAIJ()`, `MatMPIAIJGetSeqAIJF90()`
4561 M*/
4562 
4563 /*@C
4564   MatMPIAIJGetSeqAIJ - Returns the local pieces of this distributed matrix
4565 
4566   Not Collective
4567 
4568   Input Parameter:
4569 . A - The `MATMPIAIJ` matrix
4570 
4571   Output Parameters:
4572 + Ad     - The local diagonal block as a `MATSEQAIJ` matrix
4573 . Ao     - The local off-diagonal block as a `MATSEQAIJ` matrix
4574 - colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix
4575 
4576   Level: intermediate
4577 
4578   Note:
4579   The rows in `Ad` and `Ao` are in [0, Nr), where Nr is the number of local rows on this process. The columns
4580   in `Ad` are in [0, Nc) where Nc is the number of local columns. The columns are `Ao` are in [0, Nco), where Nco is
4581   the number of nonzero columns in the local off-diagonal piece of the matrix `A`. The array colmap maps these
4582   local column numbers to global column numbers in the original matrix.
4583 
4584   Fortran Notes:
4585   `MatMPIAIJGetSeqAIJ()` Fortran binding is deprecated (since PETSc 3.19), use `MatMPIAIJGetSeqAIJF90()`
4586 
4587 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatMPIAIJGetSeqAIJF90()`, `MatMPIAIJRestoreSeqAIJF90()`, `MatMPIAIJGetLocalMat()`, `MatMPIAIJGetLocalMatCondensed()`, `MatCreateAIJ()`, `MATSEQAIJ`
4588 @*/
4589 PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A, Mat *Ad, Mat *Ao, const PetscInt *colmap[])
4590 {
4591   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
4592   PetscBool   flg;
4593 
4594   PetscFunctionBegin;
4595   PetscCall(PetscStrbeginswith(((PetscObject)A)->type_name, MATMPIAIJ, &flg));
4596   PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "This function requires a MATMPIAIJ matrix as input");
4597   if (Ad) *Ad = a->A;
4598   if (Ao) *Ao = a->B;
4599   if (colmap) *colmap = a->garray;
4600   PetscFunctionReturn(PETSC_SUCCESS);
4601 }
4602 
4603 PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm, Mat inmat, PetscInt n, MatReuse scall, Mat *outmat)
4604 {
4605   PetscInt     m, N, i, rstart, nnz, Ii;
4606   PetscInt    *indx;
4607   PetscScalar *values;
4608   MatType      rootType;
4609 
4610   PetscFunctionBegin;
4611   PetscCall(MatGetSize(inmat, &m, &N));
4612   if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4613     PetscInt *dnz, *onz, sum, bs, cbs;
4614 
4615     if (n == PETSC_DECIDE) PetscCall(PetscSplitOwnership(comm, &n, &N));
4616     /* Check sum(n) = N */
4617     PetscCall(MPIU_Allreduce(&n, &sum, 1, MPIU_INT, MPI_SUM, comm));
4618     PetscCheck(sum == N, PETSC_COMM_SELF, PETSC_ERR_ARG_INCOMP, "Sum of local columns %" PetscInt_FMT " != global columns %" PetscInt_FMT, sum, N);
4619 
4620     PetscCallMPI(MPI_Scan(&m, &rstart, 1, MPIU_INT, MPI_SUM, comm));
4621     rstart -= m;
4622 
4623     MatPreallocateBegin(comm, m, n, dnz, onz);
4624     for (i = 0; i < m; i++) {
4625       PetscCall(MatGetRow_SeqAIJ(inmat, i, &nnz, &indx, NULL));
4626       PetscCall(MatPreallocateSet(i + rstart, nnz, indx, dnz, onz));
4627       PetscCall(MatRestoreRow_SeqAIJ(inmat, i, &nnz, &indx, NULL));
4628     }
4629 
4630     PetscCall(MatCreate(comm, outmat));
4631     PetscCall(MatSetSizes(*outmat, m, n, PETSC_DETERMINE, PETSC_DETERMINE));
4632     PetscCall(MatGetBlockSizes(inmat, &bs, &cbs));
4633     PetscCall(MatSetBlockSizes(*outmat, bs, cbs));
4634     PetscCall(MatGetRootType_Private(inmat, &rootType));
4635     PetscCall(MatSetType(*outmat, rootType));
4636     PetscCall(MatSeqAIJSetPreallocation(*outmat, 0, dnz));
4637     PetscCall(MatMPIAIJSetPreallocation(*outmat, 0, dnz, 0, onz));
4638     MatPreallocateEnd(dnz, onz);
4639     PetscCall(MatSetOption(*outmat, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
4640   }
4641 
4642   /* numeric phase */
4643   PetscCall(MatGetOwnershipRange(*outmat, &rstart, NULL));
4644   for (i = 0; i < m; i++) {
4645     PetscCall(MatGetRow_SeqAIJ(inmat, i, &nnz, &indx, &values));
4646     Ii = i + rstart;
4647     PetscCall(MatSetValues(*outmat, 1, &Ii, nnz, indx, values, INSERT_VALUES));
4648     PetscCall(MatRestoreRow_SeqAIJ(inmat, i, &nnz, &indx, &values));
4649   }
4650   PetscCall(MatAssemblyBegin(*outmat, MAT_FINAL_ASSEMBLY));
4651   PetscCall(MatAssemblyEnd(*outmat, MAT_FINAL_ASSEMBLY));
4652   PetscFunctionReturn(PETSC_SUCCESS);
4653 }
4654 
4655 static PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(void *data)
4656 {
4657   Mat_Merge_SeqsToMPI *merge = (Mat_Merge_SeqsToMPI *)data;
4658 
4659   PetscFunctionBegin;
4660   if (!merge) PetscFunctionReturn(PETSC_SUCCESS);
4661   PetscCall(PetscFree(merge->id_r));
4662   PetscCall(PetscFree(merge->len_s));
4663   PetscCall(PetscFree(merge->len_r));
4664   PetscCall(PetscFree(merge->bi));
4665   PetscCall(PetscFree(merge->bj));
4666   PetscCall(PetscFree(merge->buf_ri[0]));
4667   PetscCall(PetscFree(merge->buf_ri));
4668   PetscCall(PetscFree(merge->buf_rj[0]));
4669   PetscCall(PetscFree(merge->buf_rj));
4670   PetscCall(PetscFree(merge->coi));
4671   PetscCall(PetscFree(merge->coj));
4672   PetscCall(PetscFree(merge->owners_co));
4673   PetscCall(PetscLayoutDestroy(&merge->rowmap));
4674   PetscCall(PetscFree(merge));
4675   PetscFunctionReturn(PETSC_SUCCESS);
4676 }
4677 
4678 #include <../src/mat/utils/freespace.h>
4679 #include <petscbt.h>
4680 
4681 PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat, Mat mpimat)
4682 {
4683   MPI_Comm             comm;
4684   Mat_SeqAIJ          *a = (Mat_SeqAIJ *)seqmat->data;
4685   PetscMPIInt          size, rank, taga, *len_s;
4686   PetscInt             N = mpimat->cmap->N, i, j, *owners, *ai = a->i, *aj;
4687   PetscInt             proc, m;
4688   PetscInt           **buf_ri, **buf_rj;
4689   PetscInt             k, anzi, *bj_i, *bi, *bj, arow, bnzi, nextaj;
4690   PetscInt             nrows, **buf_ri_k, **nextrow, **nextai;
4691   MPI_Request         *s_waits, *r_waits;
4692   MPI_Status          *status;
4693   const MatScalar     *aa, *a_a;
4694   MatScalar          **abuf_r, *ba_i;
4695   Mat_Merge_SeqsToMPI *merge;
4696   PetscContainer       container;
4697 
4698   PetscFunctionBegin;
4699   PetscCall(PetscObjectGetComm((PetscObject)mpimat, &comm));
4700   PetscCall(PetscLogEventBegin(MAT_Seqstompinum, seqmat, 0, 0, 0));
4701 
4702   PetscCallMPI(MPI_Comm_size(comm, &size));
4703   PetscCallMPI(MPI_Comm_rank(comm, &rank));
4704 
4705   PetscCall(PetscObjectQuery((PetscObject)mpimat, "MatMergeSeqsToMPI", (PetscObject *)&container));
4706   PetscCheck(container, PetscObjectComm((PetscObject)mpimat), PETSC_ERR_PLIB, "Mat not created from MatCreateMPIAIJSumSeqAIJSymbolic");
4707   PetscCall(PetscContainerGetPointer(container, (void **)&merge));
4708   PetscCall(MatSeqAIJGetArrayRead(seqmat, &a_a));
4709   aa = a_a;
4710 
4711   bi     = merge->bi;
4712   bj     = merge->bj;
4713   buf_ri = merge->buf_ri;
4714   buf_rj = merge->buf_rj;
4715 
4716   PetscCall(PetscMalloc1(size, &status));
4717   owners = merge->rowmap->range;
4718   len_s  = merge->len_s;
4719 
4720   /* send and recv matrix values */
4721   PetscCall(PetscObjectGetNewTag((PetscObject)mpimat, &taga));
4722   PetscCall(PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits));
4723 
4724   PetscCall(PetscMalloc1(merge->nsend + 1, &s_waits));
4725   for (proc = 0, k = 0; proc < size; proc++) {
4726     if (!len_s[proc]) continue;
4727     i = owners[proc];
4728     PetscCallMPI(MPI_Isend(aa + ai[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k));
4729     k++;
4730   }
4731 
4732   if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, r_waits, status));
4733   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, s_waits, status));
4734   PetscCall(PetscFree(status));
4735 
4736   PetscCall(PetscFree(s_waits));
4737   PetscCall(PetscFree(r_waits));
4738 
4739   /* insert mat values of mpimat */
4740   PetscCall(PetscMalloc1(N, &ba_i));
4741   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextai));
4742 
4743   for (k = 0; k < merge->nrecv; k++) {
4744     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4745     nrows       = *(buf_ri_k[k]);
4746     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
4747     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
4748   }
4749 
4750   /* set values of ba */
4751   m = merge->rowmap->n;
4752   for (i = 0; i < m; i++) {
4753     arow = owners[rank] + i;
4754     bj_i = bj + bi[i]; /* col indices of the i-th row of mpimat */
4755     bnzi = bi[i + 1] - bi[i];
4756     PetscCall(PetscArrayzero(ba_i, bnzi));
4757 
4758     /* add local non-zero vals of this proc's seqmat into ba */
4759     anzi   = ai[arow + 1] - ai[arow];
4760     aj     = a->j + ai[arow];
4761     aa     = a_a + ai[arow];
4762     nextaj = 0;
4763     for (j = 0; nextaj < anzi; j++) {
4764       if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4765         ba_i[j] += aa[nextaj++];
4766       }
4767     }
4768 
4769     /* add received vals into ba */
4770     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
4771       /* i-th row */
4772       if (i == *nextrow[k]) {
4773         anzi   = *(nextai[k] + 1) - *nextai[k];
4774         aj     = buf_rj[k] + *(nextai[k]);
4775         aa     = abuf_r[k] + *(nextai[k]);
4776         nextaj = 0;
4777         for (j = 0; nextaj < anzi; j++) {
4778           if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4779             ba_i[j] += aa[nextaj++];
4780           }
4781         }
4782         nextrow[k]++;
4783         nextai[k]++;
4784       }
4785     }
4786     PetscCall(MatSetValues(mpimat, 1, &arow, bnzi, bj_i, ba_i, INSERT_VALUES));
4787   }
4788   PetscCall(MatSeqAIJRestoreArrayRead(seqmat, &a_a));
4789   PetscCall(MatAssemblyBegin(mpimat, MAT_FINAL_ASSEMBLY));
4790   PetscCall(MatAssemblyEnd(mpimat, MAT_FINAL_ASSEMBLY));
4791 
4792   PetscCall(PetscFree(abuf_r[0]));
4793   PetscCall(PetscFree(abuf_r));
4794   PetscCall(PetscFree(ba_i));
4795   PetscCall(PetscFree3(buf_ri_k, nextrow, nextai));
4796   PetscCall(PetscLogEventEnd(MAT_Seqstompinum, seqmat, 0, 0, 0));
4797   PetscFunctionReturn(PETSC_SUCCESS);
4798 }
4799 
4800 PetscErrorCode MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm, Mat seqmat, PetscInt m, PetscInt n, Mat *mpimat)
4801 {
4802   Mat                  B_mpi;
4803   Mat_SeqAIJ          *a = (Mat_SeqAIJ *)seqmat->data;
4804   PetscMPIInt          size, rank, tagi, tagj, *len_s, *len_si, *len_ri;
4805   PetscInt           **buf_rj, **buf_ri, **buf_ri_k;
4806   PetscInt             M = seqmat->rmap->n, N = seqmat->cmap->n, i, *owners, *ai = a->i, *aj = a->j;
4807   PetscInt             len, proc, *dnz, *onz, bs, cbs;
4808   PetscInt             k, anzi, *bi, *bj, *lnk, nlnk, arow, bnzi;
4809   PetscInt             nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextai;
4810   MPI_Request         *si_waits, *sj_waits, *ri_waits, *rj_waits;
4811   MPI_Status          *status;
4812   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
4813   PetscBT              lnkbt;
4814   Mat_Merge_SeqsToMPI *merge;
4815   PetscContainer       container;
4816 
4817   PetscFunctionBegin;
4818   PetscCall(PetscLogEventBegin(MAT_Seqstompisym, seqmat, 0, 0, 0));
4819 
4820   /* make sure it is a PETSc comm */
4821   PetscCall(PetscCommDuplicate(comm, &comm, NULL));
4822   PetscCallMPI(MPI_Comm_size(comm, &size));
4823   PetscCallMPI(MPI_Comm_rank(comm, &rank));
4824 
4825   PetscCall(PetscNew(&merge));
4826   PetscCall(PetscMalloc1(size, &status));
4827 
4828   /* determine row ownership */
4829   PetscCall(PetscLayoutCreate(comm, &merge->rowmap));
4830   PetscCall(PetscLayoutSetLocalSize(merge->rowmap, m));
4831   PetscCall(PetscLayoutSetSize(merge->rowmap, M));
4832   PetscCall(PetscLayoutSetBlockSize(merge->rowmap, 1));
4833   PetscCall(PetscLayoutSetUp(merge->rowmap));
4834   PetscCall(PetscMalloc1(size, &len_si));
4835   PetscCall(PetscMalloc1(size, &merge->len_s));
4836 
4837   m      = merge->rowmap->n;
4838   owners = merge->rowmap->range;
4839 
4840   /* determine the number of messages to send, their lengths */
4841   len_s = merge->len_s;
4842 
4843   len          = 0; /* length of buf_si[] */
4844   merge->nsend = 0;
4845   for (proc = 0; proc < size; proc++) {
4846     len_si[proc] = 0;
4847     if (proc == rank) {
4848       len_s[proc] = 0;
4849     } else {
4850       len_si[proc] = owners[proc + 1] - owners[proc] + 1;
4851       len_s[proc]  = ai[owners[proc + 1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4852     }
4853     if (len_s[proc]) {
4854       merge->nsend++;
4855       nrows = 0;
4856       for (i = owners[proc]; i < owners[proc + 1]; i++) {
4857         if (ai[i + 1] > ai[i]) nrows++;
4858       }
4859       len_si[proc] = 2 * (nrows + 1);
4860       len += len_si[proc];
4861     }
4862   }
4863 
4864   /* determine the number and length of messages to receive for ij-structure */
4865   PetscCall(PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv));
4866   PetscCall(PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri));
4867 
4868   /* post the Irecv of j-structure */
4869   PetscCall(PetscCommGetNewTag(comm, &tagj));
4870   PetscCall(PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rj_waits));
4871 
4872   /* post the Isend of j-structure */
4873   PetscCall(PetscMalloc2(merge->nsend, &si_waits, merge->nsend, &sj_waits));
4874 
4875   for (proc = 0, k = 0; proc < size; proc++) {
4876     if (!len_s[proc]) continue;
4877     i = owners[proc];
4878     PetscCallMPI(MPI_Isend(aj + ai[i], len_s[proc], MPIU_INT, proc, tagj, comm, sj_waits + k));
4879     k++;
4880   }
4881 
4882   /* receives and sends of j-structure are complete */
4883   if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, rj_waits, status));
4884   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, sj_waits, status));
4885 
4886   /* send and recv i-structure */
4887   PetscCall(PetscCommGetNewTag(comm, &tagi));
4888   PetscCall(PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &ri_waits));
4889 
4890   PetscCall(PetscMalloc1(len + 1, &buf_s));
4891   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
4892   for (proc = 0, k = 0; proc < size; proc++) {
4893     if (!len_s[proc]) continue;
4894     /* form outgoing message for i-structure:
4895          buf_si[0]:                 nrows to be sent
4896                [1:nrows]:           row index (global)
4897                [nrows+1:2*nrows+1]: i-structure index
4898     */
4899     nrows       = len_si[proc] / 2 - 1;
4900     buf_si_i    = buf_si + nrows + 1;
4901     buf_si[0]   = nrows;
4902     buf_si_i[0] = 0;
4903     nrows       = 0;
4904     for (i = owners[proc]; i < owners[proc + 1]; i++) {
4905       anzi = ai[i + 1] - ai[i];
4906       if (anzi) {
4907         buf_si_i[nrows + 1] = buf_si_i[nrows] + anzi; /* i-structure */
4908         buf_si[nrows + 1]   = i - owners[proc];       /* local row index */
4909         nrows++;
4910       }
4911     }
4912     PetscCallMPI(MPI_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, si_waits + k));
4913     k++;
4914     buf_si += len_si[proc];
4915   }
4916 
4917   if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv, ri_waits, status));
4918   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend, si_waits, status));
4919 
4920   PetscCall(PetscInfo(seqmat, "nsend: %d, nrecv: %d\n", merge->nsend, merge->nrecv));
4921   for (i = 0; i < merge->nrecv; i++) PetscCall(PetscInfo(seqmat, "recv len_ri=%d, len_rj=%d from [%d]\n", len_ri[i], merge->len_r[i], merge->id_r[i]));
4922 
4923   PetscCall(PetscFree(len_si));
4924   PetscCall(PetscFree(len_ri));
4925   PetscCall(PetscFree(rj_waits));
4926   PetscCall(PetscFree2(si_waits, sj_waits));
4927   PetscCall(PetscFree(ri_waits));
4928   PetscCall(PetscFree(buf_s));
4929   PetscCall(PetscFree(status));
4930 
4931   /* compute a local seq matrix in each processor */
4932   /* allocate bi array and free space for accumulating nonzero column info */
4933   PetscCall(PetscMalloc1(m + 1, &bi));
4934   bi[0] = 0;
4935 
4936   /* create and initialize a linked list */
4937   nlnk = N + 1;
4938   PetscCall(PetscLLCreate(N, N, nlnk, lnk, lnkbt));
4939 
4940   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4941   len = ai[owners[rank + 1]] - ai[owners[rank]];
4942   PetscCall(PetscFreeSpaceGet(PetscIntMultTruncate(2, len) + 1, &free_space));
4943 
4944   current_space = free_space;
4945 
4946   /* determine symbolic info for each local row */
4947   PetscCall(PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextai));
4948 
4949   for (k = 0; k < merge->nrecv; k++) {
4950     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4951     nrows       = *buf_ri_k[k];
4952     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
4953     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
4954   }
4955 
4956   MatPreallocateBegin(comm, m, n, dnz, onz);
4957   len = 0;
4958   for (i = 0; i < m; i++) {
4959     bnzi = 0;
4960     /* add local non-zero cols of this proc's seqmat into lnk */
4961     arow = owners[rank] + i;
4962     anzi = ai[arow + 1] - ai[arow];
4963     aj   = a->j + ai[arow];
4964     PetscCall(PetscLLAddSorted(anzi, aj, N, &nlnk, lnk, lnkbt));
4965     bnzi += nlnk;
4966     /* add received col data into lnk */
4967     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
4968       if (i == *nextrow[k]) {            /* i-th row */
4969         anzi = *(nextai[k] + 1) - *nextai[k];
4970         aj   = buf_rj[k] + *nextai[k];
4971         PetscCall(PetscLLAddSorted(anzi, aj, N, &nlnk, lnk, lnkbt));
4972         bnzi += nlnk;
4973         nextrow[k]++;
4974         nextai[k]++;
4975       }
4976     }
4977     if (len < bnzi) len = bnzi; /* =max(bnzi) */
4978 
4979     /* if free space is not available, make more free space */
4980     if (current_space->local_remaining < bnzi) PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(bnzi, current_space->total_array_size), &current_space));
4981     /* copy data into free space, then initialize lnk */
4982     PetscCall(PetscLLClean(N, N, bnzi, lnk, current_space->array, lnkbt));
4983     PetscCall(MatPreallocateSet(i + owners[rank], bnzi, current_space->array, dnz, onz));
4984 
4985     current_space->array += bnzi;
4986     current_space->local_used += bnzi;
4987     current_space->local_remaining -= bnzi;
4988 
4989     bi[i + 1] = bi[i] + bnzi;
4990   }
4991 
4992   PetscCall(PetscFree3(buf_ri_k, nextrow, nextai));
4993 
4994   PetscCall(PetscMalloc1(bi[m] + 1, &bj));
4995   PetscCall(PetscFreeSpaceContiguous(&free_space, bj));
4996   PetscCall(PetscLLDestroy(lnk, lnkbt));
4997 
4998   /* create symbolic parallel matrix B_mpi */
4999   PetscCall(MatGetBlockSizes(seqmat, &bs, &cbs));
5000   PetscCall(MatCreate(comm, &B_mpi));
5001   if (n == PETSC_DECIDE) {
5002     PetscCall(MatSetSizes(B_mpi, m, n, PETSC_DETERMINE, N));
5003   } else {
5004     PetscCall(MatSetSizes(B_mpi, m, n, PETSC_DETERMINE, PETSC_DETERMINE));
5005   }
5006   PetscCall(MatSetBlockSizes(B_mpi, bs, cbs));
5007   PetscCall(MatSetType(B_mpi, MATMPIAIJ));
5008   PetscCall(MatMPIAIJSetPreallocation(B_mpi, 0, dnz, 0, onz));
5009   MatPreallocateEnd(dnz, onz);
5010   PetscCall(MatSetOption(B_mpi, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
5011 
5012   /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
5013   B_mpi->assembled = PETSC_FALSE;
5014   merge->bi        = bi;
5015   merge->bj        = bj;
5016   merge->buf_ri    = buf_ri;
5017   merge->buf_rj    = buf_rj;
5018   merge->coi       = NULL;
5019   merge->coj       = NULL;
5020   merge->owners_co = NULL;
5021 
5022   PetscCall(PetscCommDestroy(&comm));
5023 
5024   /* attach the supporting struct to B_mpi for reuse */
5025   PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
5026   PetscCall(PetscContainerSetPointer(container, merge));
5027   PetscCall(PetscContainerSetUserDestroy(container, MatDestroy_MPIAIJ_SeqsToMPI));
5028   PetscCall(PetscObjectCompose((PetscObject)B_mpi, "MatMergeSeqsToMPI", (PetscObject)container));
5029   PetscCall(PetscContainerDestroy(&container));
5030   *mpimat = B_mpi;
5031 
5032   PetscCall(PetscLogEventEnd(MAT_Seqstompisym, seqmat, 0, 0, 0));
5033   PetscFunctionReturn(PETSC_SUCCESS);
5034 }
5035 
5036 /*@C
5037   MatCreateMPIAIJSumSeqAIJ - Creates a `MATMPIAIJ` matrix by adding sequential
5038   matrices from each processor
5039 
5040   Collective
5041 
5042   Input Parameters:
5043 + comm   - the communicators the parallel matrix will live on
5044 . seqmat - the input sequential matrices
5045 . m      - number of local rows (or `PETSC_DECIDE`)
5046 . n      - number of local columns (or `PETSC_DECIDE`)
5047 - scall  - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5048 
5049   Output Parameter:
5050 . mpimat - the parallel matrix generated
5051 
5052   Level: advanced
5053 
5054   Note:
5055   The dimensions of the sequential matrix in each processor MUST be the same.
5056   The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
5057   destroyed when mpimat is destroyed. Call `PetscObjectQuery()` to access seqmat.
5058 
5059 .seealso: [](ch_matrices), `Mat`, `MatCreateAIJ()`
5060 @*/
5061 PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm, Mat seqmat, PetscInt m, PetscInt n, MatReuse scall, Mat *mpimat)
5062 {
5063   PetscMPIInt size;
5064 
5065   PetscFunctionBegin;
5066   PetscCallMPI(MPI_Comm_size(comm, &size));
5067   if (size == 1) {
5068     PetscCall(PetscLogEventBegin(MAT_Seqstompi, seqmat, 0, 0, 0));
5069     if (scall == MAT_INITIAL_MATRIX) {
5070       PetscCall(MatDuplicate(seqmat, MAT_COPY_VALUES, mpimat));
5071     } else {
5072       PetscCall(MatCopy(seqmat, *mpimat, SAME_NONZERO_PATTERN));
5073     }
5074     PetscCall(PetscLogEventEnd(MAT_Seqstompi, seqmat, 0, 0, 0));
5075     PetscFunctionReturn(PETSC_SUCCESS);
5076   }
5077   PetscCall(PetscLogEventBegin(MAT_Seqstompi, seqmat, 0, 0, 0));
5078   if (scall == MAT_INITIAL_MATRIX) PetscCall(MatCreateMPIAIJSumSeqAIJSymbolic(comm, seqmat, m, n, mpimat));
5079   PetscCall(MatCreateMPIAIJSumSeqAIJNumeric(seqmat, *mpimat));
5080   PetscCall(PetscLogEventEnd(MAT_Seqstompi, seqmat, 0, 0, 0));
5081   PetscFunctionReturn(PETSC_SUCCESS);
5082 }
5083 
5084 /*@
5085   MatAIJGetLocalMat - Creates a `MATSEQAIJ` from a `MATAIJ` matrix.
5086 
5087   Not Collective
5088 
5089   Input Parameter:
5090 . A - the matrix
5091 
5092   Output Parameter:
5093 . A_loc - the local sequential matrix generated
5094 
5095   Level: developer
5096 
5097   Notes:
5098   The matrix is created by taking `A`'s local rows and putting them into a sequential matrix
5099   with `mlocal` rows and `n` columns. Where `mlocal` is obtained with `MatGetLocalSize()` and
5100   `n` is the global column count obtained with `MatGetSize()`
5101 
5102   In other words combines the two parts of a parallel `MATMPIAIJ` matrix on each process to a single matrix.
5103 
5104   For parallel matrices this creates an entirely new matrix. If the matrix is sequential it merely increases the reference count.
5105 
5106   Destroy the matrix with `MatDestroy()`
5107 
5108 .seealso: [](ch_matrices), `Mat`, `MatMPIAIJGetLocalMat()`
5109 @*/
5110 PetscErrorCode MatAIJGetLocalMat(Mat A, Mat *A_loc)
5111 {
5112   PetscBool mpi;
5113 
5114   PetscFunctionBegin;
5115   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIAIJ, &mpi));
5116   if (mpi) {
5117     PetscCall(MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, A_loc));
5118   } else {
5119     *A_loc = A;
5120     PetscCall(PetscObjectReference((PetscObject)*A_loc));
5121   }
5122   PetscFunctionReturn(PETSC_SUCCESS);
5123 }
5124 
5125 /*@
5126   MatMPIAIJGetLocalMat - Creates a `MATSEQAIJ` from a `MATMPIAIJ` matrix.
5127 
5128   Not Collective
5129 
5130   Input Parameters:
5131 + A     - the matrix
5132 - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5133 
5134   Output Parameter:
5135 . A_loc - the local sequential matrix generated
5136 
5137   Level: developer
5138 
5139   Notes:
5140   The matrix is created by taking all `A`'s local rows and putting them into a sequential
5141   matrix with `mlocal` rows and `n` columns.`mlocal` is the row count obtained with
5142   `MatGetLocalSize()` and `n` is the global column count obtained with `MatGetSize()`.
5143 
5144   In other words combines the two parts of a parallel `MATMPIAIJ` matrix on each process to a single matrix.
5145 
5146   When `A` is sequential and `MAT_INITIAL_MATRIX` is requested, the matrix returned is the diagonal part of `A` (which contains the entire matrix),
5147   with its reference count increased by one. Hence changing values of `A_loc` changes `A`. If `MAT_REUSE_MATRIX` is requested on a sequential matrix
5148   then `MatCopy`(Adiag,*`A_loc`,`SAME_NONZERO_PATTERN`) is called to fill `A_loc`. Thus one can preallocate the appropriate sequential matrix `A_loc`
5149   and then call this routine with `MAT_REUSE_MATRIX`. In this case, one can modify the values of `A_loc` without affecting the original sequential matrix.
5150 
5151 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatGetOwnershipRange()`, `MatMPIAIJGetLocalMatCondensed()`, `MatMPIAIJGetLocalMatMerge()`
5152 @*/
5153 PetscErrorCode MatMPIAIJGetLocalMat(Mat A, MatReuse scall, Mat *A_loc)
5154 {
5155   Mat_MPIAIJ        *mpimat = (Mat_MPIAIJ *)A->data;
5156   Mat_SeqAIJ        *mat, *a, *b;
5157   PetscInt          *ai, *aj, *bi, *bj, *cmap = mpimat->garray;
5158   const PetscScalar *aa, *ba, *aav, *bav;
5159   PetscScalar       *ca, *cam;
5160   PetscMPIInt        size;
5161   PetscInt           am = A->rmap->n, i, j, k, cstart = A->cmap->rstart;
5162   PetscInt          *ci, *cj, col, ncols_d, ncols_o, jo;
5163   PetscBool          match;
5164 
5165   PetscFunctionBegin;
5166   PetscCall(PetscStrbeginswith(((PetscObject)A)->type_name, MATMPIAIJ, &match));
5167   PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPIAIJ matrix as input");
5168   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
5169   if (size == 1) {
5170     if (scall == MAT_INITIAL_MATRIX) {
5171       PetscCall(PetscObjectReference((PetscObject)mpimat->A));
5172       *A_loc = mpimat->A;
5173     } else if (scall == MAT_REUSE_MATRIX) {
5174       PetscCall(MatCopy(mpimat->A, *A_loc, SAME_NONZERO_PATTERN));
5175     }
5176     PetscFunctionReturn(PETSC_SUCCESS);
5177   }
5178 
5179   PetscCall(PetscLogEventBegin(MAT_Getlocalmat, A, 0, 0, 0));
5180   a  = (Mat_SeqAIJ *)(mpimat->A)->data;
5181   b  = (Mat_SeqAIJ *)(mpimat->B)->data;
5182   ai = a->i;
5183   aj = a->j;
5184   bi = b->i;
5185   bj = b->j;
5186   PetscCall(MatSeqAIJGetArrayRead(mpimat->A, &aav));
5187   PetscCall(MatSeqAIJGetArrayRead(mpimat->B, &bav));
5188   aa = aav;
5189   ba = bav;
5190   if (scall == MAT_INITIAL_MATRIX) {
5191     PetscCall(PetscMalloc1(1 + am, &ci));
5192     ci[0] = 0;
5193     for (i = 0; i < am; i++) ci[i + 1] = ci[i] + (ai[i + 1] - ai[i]) + (bi[i + 1] - bi[i]);
5194     PetscCall(PetscMalloc1(1 + ci[am], &cj));
5195     PetscCall(PetscMalloc1(1 + ci[am], &ca));
5196     k = 0;
5197     for (i = 0; i < am; i++) {
5198       ncols_o = bi[i + 1] - bi[i];
5199       ncols_d = ai[i + 1] - ai[i];
5200       /* off-diagonal portion of A */
5201       for (jo = 0; jo < ncols_o; jo++) {
5202         col = cmap[*bj];
5203         if (col >= cstart) break;
5204         cj[k] = col;
5205         bj++;
5206         ca[k++] = *ba++;
5207       }
5208       /* diagonal portion of A */
5209       for (j = 0; j < ncols_d; j++) {
5210         cj[k]   = cstart + *aj++;
5211         ca[k++] = *aa++;
5212       }
5213       /* off-diagonal portion of A */
5214       for (j = jo; j < ncols_o; j++) {
5215         cj[k]   = cmap[*bj++];
5216         ca[k++] = *ba++;
5217       }
5218     }
5219     /* put together the new matrix */
5220     PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, am, A->cmap->N, ci, cj, ca, A_loc));
5221     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5222     /* Since these are PETSc arrays, change flags to free them as necessary. */
5223     mat          = (Mat_SeqAIJ *)(*A_loc)->data;
5224     mat->free_a  = PETSC_TRUE;
5225     mat->free_ij = PETSC_TRUE;
5226     mat->nonew   = 0;
5227   } else if (scall == MAT_REUSE_MATRIX) {
5228     mat = (Mat_SeqAIJ *)(*A_loc)->data;
5229     ci  = mat->i;
5230     cj  = mat->j;
5231     PetscCall(MatSeqAIJGetArrayWrite(*A_loc, &cam));
5232     for (i = 0; i < am; i++) {
5233       /* off-diagonal portion of A */
5234       ncols_o = bi[i + 1] - bi[i];
5235       for (jo = 0; jo < ncols_o; jo++) {
5236         col = cmap[*bj];
5237         if (col >= cstart) break;
5238         *cam++ = *ba++;
5239         bj++;
5240       }
5241       /* diagonal portion of A */
5242       ncols_d = ai[i + 1] - ai[i];
5243       for (j = 0; j < ncols_d; j++) *cam++ = *aa++;
5244       /* off-diagonal portion of A */
5245       for (j = jo; j < ncols_o; j++) {
5246         *cam++ = *ba++;
5247         bj++;
5248       }
5249     }
5250     PetscCall(MatSeqAIJRestoreArrayWrite(*A_loc, &cam));
5251   } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Invalid MatReuse %d", (int)scall);
5252   PetscCall(MatSeqAIJRestoreArrayRead(mpimat->A, &aav));
5253   PetscCall(MatSeqAIJRestoreArrayRead(mpimat->B, &bav));
5254   PetscCall(PetscLogEventEnd(MAT_Getlocalmat, A, 0, 0, 0));
5255   PetscFunctionReturn(PETSC_SUCCESS);
5256 }
5257 
5258 /*@
5259   MatMPIAIJGetLocalMatMerge - Creates a `MATSEQAIJ` from a `MATMPIAIJ` matrix by taking all its local rows and putting them into a sequential matrix with
5260   mlocal rows and n columns. Where n is the sum of the number of columns of the diagonal and off-diagonal part
5261 
5262   Not Collective
5263 
5264   Input Parameters:
5265 + A     - the matrix
5266 - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5267 
5268   Output Parameters:
5269 + glob  - sequential `IS` with global indices associated with the columns of the local sequential matrix generated (can be `NULL`)
5270 - A_loc - the local sequential matrix generated
5271 
5272   Level: developer
5273 
5274   Note:
5275   This is different from `MatMPIAIJGetLocalMat()` since the first columns in the returning matrix are those associated with the diagonal
5276   part, then those associated with the off-diagonal part (in its local ordering)
5277 
5278 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatGetOwnershipRange()`, `MatMPIAIJGetLocalMat()`, `MatMPIAIJGetLocalMatCondensed()`
5279 @*/
5280 PetscErrorCode MatMPIAIJGetLocalMatMerge(Mat A, MatReuse scall, IS *glob, Mat *A_loc)
5281 {
5282   Mat             Ao, Ad;
5283   const PetscInt *cmap;
5284   PetscMPIInt     size;
5285   PetscErrorCode (*f)(Mat, MatReuse, IS *, Mat *);
5286 
5287   PetscFunctionBegin;
5288   PetscCall(MatMPIAIJGetSeqAIJ(A, &Ad, &Ao, &cmap));
5289   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
5290   if (size == 1) {
5291     if (scall == MAT_INITIAL_MATRIX) {
5292       PetscCall(PetscObjectReference((PetscObject)Ad));
5293       *A_loc = Ad;
5294     } else if (scall == MAT_REUSE_MATRIX) {
5295       PetscCall(MatCopy(Ad, *A_loc, SAME_NONZERO_PATTERN));
5296     }
5297     if (glob) PetscCall(ISCreateStride(PetscObjectComm((PetscObject)Ad), Ad->cmap->n, Ad->cmap->rstart, 1, glob));
5298     PetscFunctionReturn(PETSC_SUCCESS);
5299   }
5300   PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatMPIAIJGetLocalMatMerge_C", &f));
5301   PetscCall(PetscLogEventBegin(MAT_Getlocalmat, A, 0, 0, 0));
5302   if (f) {
5303     PetscCall((*f)(A, scall, glob, A_loc));
5304   } else {
5305     Mat_SeqAIJ        *a = (Mat_SeqAIJ *)Ad->data;
5306     Mat_SeqAIJ        *b = (Mat_SeqAIJ *)Ao->data;
5307     Mat_SeqAIJ        *c;
5308     PetscInt          *ai = a->i, *aj = a->j;
5309     PetscInt          *bi = b->i, *bj = b->j;
5310     PetscInt          *ci, *cj;
5311     const PetscScalar *aa, *ba;
5312     PetscScalar       *ca;
5313     PetscInt           i, j, am, dn, on;
5314 
5315     PetscCall(MatGetLocalSize(Ad, &am, &dn));
5316     PetscCall(MatGetLocalSize(Ao, NULL, &on));
5317     PetscCall(MatSeqAIJGetArrayRead(Ad, &aa));
5318     PetscCall(MatSeqAIJGetArrayRead(Ao, &ba));
5319     if (scall == MAT_INITIAL_MATRIX) {
5320       PetscInt k;
5321       PetscCall(PetscMalloc1(1 + am, &ci));
5322       PetscCall(PetscMalloc1(ai[am] + bi[am], &cj));
5323       PetscCall(PetscMalloc1(ai[am] + bi[am], &ca));
5324       ci[0] = 0;
5325       for (i = 0, k = 0; i < am; i++) {
5326         const PetscInt ncols_o = bi[i + 1] - bi[i];
5327         const PetscInt ncols_d = ai[i + 1] - ai[i];
5328         ci[i + 1]              = ci[i] + ncols_o + ncols_d;
5329         /* diagonal portion of A */
5330         for (j = 0; j < ncols_d; j++, k++) {
5331           cj[k] = *aj++;
5332           ca[k] = *aa++;
5333         }
5334         /* off-diagonal portion of A */
5335         for (j = 0; j < ncols_o; j++, k++) {
5336           cj[k] = dn + *bj++;
5337           ca[k] = *ba++;
5338         }
5339       }
5340       /* put together the new matrix */
5341       PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, am, dn + on, ci, cj, ca, A_loc));
5342       /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5343       /* Since these are PETSc arrays, change flags to free them as necessary. */
5344       c          = (Mat_SeqAIJ *)(*A_loc)->data;
5345       c->free_a  = PETSC_TRUE;
5346       c->free_ij = PETSC_TRUE;
5347       c->nonew   = 0;
5348       PetscCall(MatSetType(*A_loc, ((PetscObject)Ad)->type_name));
5349     } else if (scall == MAT_REUSE_MATRIX) {
5350       PetscCall(MatSeqAIJGetArrayWrite(*A_loc, &ca));
5351       for (i = 0; i < am; i++) {
5352         const PetscInt ncols_d = ai[i + 1] - ai[i];
5353         const PetscInt ncols_o = bi[i + 1] - bi[i];
5354         /* diagonal portion of A */
5355         for (j = 0; j < ncols_d; j++) *ca++ = *aa++;
5356         /* off-diagonal portion of A */
5357         for (j = 0; j < ncols_o; j++) *ca++ = *ba++;
5358       }
5359       PetscCall(MatSeqAIJRestoreArrayWrite(*A_loc, &ca));
5360     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Invalid MatReuse %d", (int)scall);
5361     PetscCall(MatSeqAIJRestoreArrayRead(Ad, &aa));
5362     PetscCall(MatSeqAIJRestoreArrayRead(Ao, &aa));
5363     if (glob) {
5364       PetscInt cst, *gidx;
5365 
5366       PetscCall(MatGetOwnershipRangeColumn(A, &cst, NULL));
5367       PetscCall(PetscMalloc1(dn + on, &gidx));
5368       for (i = 0; i < dn; i++) gidx[i] = cst + i;
5369       for (i = 0; i < on; i++) gidx[i + dn] = cmap[i];
5370       PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)Ad), dn + on, gidx, PETSC_OWN_POINTER, glob));
5371     }
5372   }
5373   PetscCall(PetscLogEventEnd(MAT_Getlocalmat, A, 0, 0, 0));
5374   PetscFunctionReturn(PETSC_SUCCESS);
5375 }
5376 
5377 /*@C
5378   MatMPIAIJGetLocalMatCondensed - Creates a `MATSEQAIJ` matrix from an `MATMPIAIJ` matrix by taking all its local rows and NON-ZERO columns
5379 
5380   Not Collective
5381 
5382   Input Parameters:
5383 + A     - the matrix
5384 . scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5385 . row   - index set of rows to extract (or `NULL`)
5386 - col   - index set of columns to extract (or `NULL`)
5387 
5388   Output Parameter:
5389 . A_loc - the local sequential matrix generated
5390 
5391   Level: developer
5392 
5393 .seealso: [](ch_matrices), `Mat`, `MATMPIAIJ`, `MatGetOwnershipRange()`, `MatMPIAIJGetLocalMat()`
5394 @*/
5395 PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A, MatReuse scall, IS *row, IS *col, Mat *A_loc)
5396 {
5397   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
5398   PetscInt    i, start, end, ncols, nzA, nzB, *cmap, imark, *idx;
5399   IS          isrowa, iscola;
5400   Mat        *aloc;
5401   PetscBool   match;
5402 
5403   PetscFunctionBegin;
5404   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPIAIJ, &match));
5405   PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPIAIJ matrix as input");
5406   PetscCall(PetscLogEventBegin(MAT_Getlocalmatcondensed, A, 0, 0, 0));
5407   if (!row) {
5408     start = A->rmap->rstart;
5409     end   = A->rmap->rend;
5410     PetscCall(ISCreateStride(PETSC_COMM_SELF, end - start, start, 1, &isrowa));
5411   } else {
5412     isrowa = *row;
5413   }
5414   if (!col) {
5415     start = A->cmap->rstart;
5416     cmap  = a->garray;
5417     nzA   = a->A->cmap->n;
5418     nzB   = a->B->cmap->n;
5419     PetscCall(PetscMalloc1(nzA + nzB, &idx));
5420     ncols = 0;
5421     for (i = 0; i < nzB; i++) {
5422       if (cmap[i] < start) idx[ncols++] = cmap[i];
5423       else break;
5424     }
5425     imark = i;
5426     for (i = 0; i < nzA; i++) idx[ncols++] = start + i;
5427     for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i];
5428     PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &iscola));
5429   } else {
5430     iscola = *col;
5431   }
5432   if (scall != MAT_INITIAL_MATRIX) {
5433     PetscCall(PetscMalloc1(1, &aloc));
5434     aloc[0] = *A_loc;
5435   }
5436   PetscCall(MatCreateSubMatrices(A, 1, &isrowa, &iscola, scall, &aloc));
5437   if (!col) { /* attach global id of condensed columns */
5438     PetscCall(PetscObjectCompose((PetscObject)aloc[0], "_petsc_GetLocalMatCondensed_iscol", (PetscObject)iscola));
5439   }
5440   *A_loc = aloc[0];
5441   PetscCall(PetscFree(aloc));
5442   if (!row) PetscCall(ISDestroy(&isrowa));
5443   if (!col) PetscCall(ISDestroy(&iscola));
5444   PetscCall(PetscLogEventEnd(MAT_Getlocalmatcondensed, A, 0, 0, 0));
5445   PetscFunctionReturn(PETSC_SUCCESS);
5446 }
5447 
5448 /*
5449  * Create a sequential AIJ matrix based on row indices. a whole column is extracted once a row is matched.
5450  * Row could be local or remote.The routine is designed to be scalable in memory so that nothing is based
5451  * on a global size.
5452  * */
5453 static PetscErrorCode MatCreateSeqSubMatrixWithRows_Private(Mat P, IS rows, Mat *P_oth)
5454 {
5455   Mat_MPIAIJ            *p  = (Mat_MPIAIJ *)P->data;
5456   Mat_SeqAIJ            *pd = (Mat_SeqAIJ *)(p->A)->data, *po = (Mat_SeqAIJ *)(p->B)->data, *p_oth;
5457   PetscInt               plocalsize, nrows, *ilocal, *oilocal, i, lidx, *nrcols, *nlcols, ncol;
5458   PetscMPIInt            owner;
5459   PetscSFNode           *iremote, *oiremote;
5460   const PetscInt        *lrowindices;
5461   PetscSF                sf, osf;
5462   PetscInt               pcstart, *roffsets, *loffsets, *pnnz, j;
5463   PetscInt               ontotalcols, dntotalcols, ntotalcols, nout;
5464   MPI_Comm               comm;
5465   ISLocalToGlobalMapping mapping;
5466   const PetscScalar     *pd_a, *po_a;
5467 
5468   PetscFunctionBegin;
5469   PetscCall(PetscObjectGetComm((PetscObject)P, &comm));
5470   /* plocalsize is the number of roots
5471    * nrows is the number of leaves
5472    * */
5473   PetscCall(MatGetLocalSize(P, &plocalsize, NULL));
5474   PetscCall(ISGetLocalSize(rows, &nrows));
5475   PetscCall(PetscCalloc1(nrows, &iremote));
5476   PetscCall(ISGetIndices(rows, &lrowindices));
5477   for (i = 0; i < nrows; i++) {
5478     /* Find a remote index and an owner for a row
5479      * The row could be local or remote
5480      * */
5481     owner = 0;
5482     lidx  = 0;
5483     PetscCall(PetscLayoutFindOwnerIndex(P->rmap, lrowindices[i], &owner, &lidx));
5484     iremote[i].index = lidx;
5485     iremote[i].rank  = owner;
5486   }
5487   /* Create SF to communicate how many nonzero columns for each row */
5488   PetscCall(PetscSFCreate(comm, &sf));
5489   /* SF will figure out the number of nonzero columns for each row, and their
5490    * offsets
5491    * */
5492   PetscCall(PetscSFSetGraph(sf, plocalsize, nrows, NULL, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
5493   PetscCall(PetscSFSetFromOptions(sf));
5494   PetscCall(PetscSFSetUp(sf));
5495 
5496   PetscCall(PetscCalloc1(2 * (plocalsize + 1), &roffsets));
5497   PetscCall(PetscCalloc1(2 * plocalsize, &nrcols));
5498   PetscCall(PetscCalloc1(nrows, &pnnz));
5499   roffsets[0] = 0;
5500   roffsets[1] = 0;
5501   for (i = 0; i < plocalsize; i++) {
5502     /* diagonal */
5503     nrcols[i * 2 + 0] = pd->i[i + 1] - pd->i[i];
5504     /* off-diagonal */
5505     nrcols[i * 2 + 1] = po->i[i + 1] - po->i[i];
5506     /* compute offsets so that we relative location for each row */
5507     roffsets[(i + 1) * 2 + 0] = roffsets[i * 2 + 0] + nrcols[i * 2 + 0];
5508     roffsets[(i + 1) * 2 + 1] = roffsets[i * 2 + 1] + nrcols[i * 2 + 1];
5509   }
5510   PetscCall(PetscCalloc1(2 * nrows, &nlcols));
5511   PetscCall(PetscCalloc1(2 * nrows, &loffsets));
5512   /* 'r' means root, and 'l' means leaf */
5513   PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, nrcols, nlcols, MPI_REPLACE));
5514   PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, roffsets, loffsets, MPI_REPLACE));
5515   PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, nrcols, nlcols, MPI_REPLACE));
5516   PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, roffsets, loffsets, MPI_REPLACE));
5517   PetscCall(PetscSFDestroy(&sf));
5518   PetscCall(PetscFree(roffsets));
5519   PetscCall(PetscFree(nrcols));
5520   dntotalcols = 0;
5521   ontotalcols = 0;
5522   ncol        = 0;
5523   for (i = 0; i < nrows; i++) {
5524     pnnz[i] = nlcols[i * 2 + 0] + nlcols[i * 2 + 1];
5525     ncol    = PetscMax(pnnz[i], ncol);
5526     /* diagonal */
5527     dntotalcols += nlcols[i * 2 + 0];
5528     /* off-diagonal */
5529     ontotalcols += nlcols[i * 2 + 1];
5530   }
5531   /* We do not need to figure the right number of columns
5532    * since all the calculations will be done by going through the raw data
5533    * */
5534   PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, nrows, ncol, 0, pnnz, P_oth));
5535   PetscCall(MatSetUp(*P_oth));
5536   PetscCall(PetscFree(pnnz));
5537   p_oth = (Mat_SeqAIJ *)(*P_oth)->data;
5538   /* diagonal */
5539   PetscCall(PetscCalloc1(dntotalcols, &iremote));
5540   /* off-diagonal */
5541   PetscCall(PetscCalloc1(ontotalcols, &oiremote));
5542   /* diagonal */
5543   PetscCall(PetscCalloc1(dntotalcols, &ilocal));
5544   /* off-diagonal */
5545   PetscCall(PetscCalloc1(ontotalcols, &oilocal));
5546   dntotalcols = 0;
5547   ontotalcols = 0;
5548   ntotalcols  = 0;
5549   for (i = 0; i < nrows; i++) {
5550     owner = 0;
5551     PetscCall(PetscLayoutFindOwnerIndex(P->rmap, lrowindices[i], &owner, NULL));
5552     /* Set iremote for diag matrix */
5553     for (j = 0; j < nlcols[i * 2 + 0]; j++) {
5554       iremote[dntotalcols].index = loffsets[i * 2 + 0] + j;
5555       iremote[dntotalcols].rank  = owner;
5556       /* P_oth is seqAIJ so that ilocal need to point to the first part of memory */
5557       ilocal[dntotalcols++] = ntotalcols++;
5558     }
5559     /* off-diagonal */
5560     for (j = 0; j < nlcols[i * 2 + 1]; j++) {
5561       oiremote[ontotalcols].index = loffsets[i * 2 + 1] + j;
5562       oiremote[ontotalcols].rank  = owner;
5563       oilocal[ontotalcols++]      = ntotalcols++;
5564     }
5565   }
5566   PetscCall(ISRestoreIndices(rows, &lrowindices));
5567   PetscCall(PetscFree(loffsets));
5568   PetscCall(PetscFree(nlcols));
5569   PetscCall(PetscSFCreate(comm, &sf));
5570   /* P serves as roots and P_oth is leaves
5571    * Diag matrix
5572    * */
5573   PetscCall(PetscSFSetGraph(sf, pd->i[plocalsize], dntotalcols, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
5574   PetscCall(PetscSFSetFromOptions(sf));
5575   PetscCall(PetscSFSetUp(sf));
5576 
5577   PetscCall(PetscSFCreate(comm, &osf));
5578   /* off-diagonal */
5579   PetscCall(PetscSFSetGraph(osf, po->i[plocalsize], ontotalcols, oilocal, PETSC_OWN_POINTER, oiremote, PETSC_OWN_POINTER));
5580   PetscCall(PetscSFSetFromOptions(osf));
5581   PetscCall(PetscSFSetUp(osf));
5582   PetscCall(MatSeqAIJGetArrayRead(p->A, &pd_a));
5583   PetscCall(MatSeqAIJGetArrayRead(p->B, &po_a));
5584   /* operate on the matrix internal data to save memory */
5585   PetscCall(PetscSFBcastBegin(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE));
5586   PetscCall(PetscSFBcastBegin(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE));
5587   PetscCall(MatGetOwnershipRangeColumn(P, &pcstart, NULL));
5588   /* Convert to global indices for diag matrix */
5589   for (i = 0; i < pd->i[plocalsize]; i++) pd->j[i] += pcstart;
5590   PetscCall(PetscSFBcastBegin(sf, MPIU_INT, pd->j, p_oth->j, MPI_REPLACE));
5591   /* We want P_oth store global indices */
5592   PetscCall(ISLocalToGlobalMappingCreate(comm, 1, p->B->cmap->n, p->garray, PETSC_COPY_VALUES, &mapping));
5593   /* Use memory scalable approach */
5594   PetscCall(ISLocalToGlobalMappingSetType(mapping, ISLOCALTOGLOBALMAPPINGHASH));
5595   PetscCall(ISLocalToGlobalMappingApply(mapping, po->i[plocalsize], po->j, po->j));
5596   PetscCall(PetscSFBcastBegin(osf, MPIU_INT, po->j, p_oth->j, MPI_REPLACE));
5597   PetscCall(PetscSFBcastEnd(sf, MPIU_INT, pd->j, p_oth->j, MPI_REPLACE));
5598   /* Convert back to local indices */
5599   for (i = 0; i < pd->i[plocalsize]; i++) pd->j[i] -= pcstart;
5600   PetscCall(PetscSFBcastEnd(osf, MPIU_INT, po->j, p_oth->j, MPI_REPLACE));
5601   nout = 0;
5602   PetscCall(ISGlobalToLocalMappingApply(mapping, IS_GTOLM_DROP, po->i[plocalsize], po->j, &nout, po->j));
5603   PetscCheck(nout == po->i[plocalsize], comm, PETSC_ERR_ARG_INCOMP, "n %" PetscInt_FMT " does not equal to nout %" PetscInt_FMT " ", po->i[plocalsize], nout);
5604   PetscCall(ISLocalToGlobalMappingDestroy(&mapping));
5605   /* Exchange values */
5606   PetscCall(PetscSFBcastEnd(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE));
5607   PetscCall(PetscSFBcastEnd(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE));
5608   PetscCall(MatSeqAIJRestoreArrayRead(p->A, &pd_a));
5609   PetscCall(MatSeqAIJRestoreArrayRead(p->B, &po_a));
5610   /* Stop PETSc from shrinking memory */
5611   for (i = 0; i < nrows; i++) p_oth->ilen[i] = p_oth->imax[i];
5612   PetscCall(MatAssemblyBegin(*P_oth, MAT_FINAL_ASSEMBLY));
5613   PetscCall(MatAssemblyEnd(*P_oth, MAT_FINAL_ASSEMBLY));
5614   /* Attach PetscSF objects to P_oth so that we can reuse it later */
5615   PetscCall(PetscObjectCompose((PetscObject)*P_oth, "diagsf", (PetscObject)sf));
5616   PetscCall(PetscObjectCompose((PetscObject)*P_oth, "offdiagsf", (PetscObject)osf));
5617   PetscCall(PetscSFDestroy(&sf));
5618   PetscCall(PetscSFDestroy(&osf));
5619   PetscFunctionReturn(PETSC_SUCCESS);
5620 }
5621 
5622 /*
5623  * Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
5624  * This supports MPIAIJ and MAIJ
5625  * */
5626 PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat A, Mat P, PetscInt dof, MatReuse reuse, Mat *P_oth)
5627 {
5628   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *p = (Mat_MPIAIJ *)P->data;
5629   Mat_SeqAIJ *p_oth;
5630   IS          rows, map;
5631   PetscHMapI  hamp;
5632   PetscInt    i, htsize, *rowindices, off, *mapping, key, count;
5633   MPI_Comm    comm;
5634   PetscSF     sf, osf;
5635   PetscBool   has;
5636 
5637   PetscFunctionBegin;
5638   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
5639   PetscCall(PetscLogEventBegin(MAT_GetBrowsOfAocols, A, P, 0, 0));
5640   /* If it is the first time, create an index set of off-diag nonzero columns of A,
5641    *  and then create a submatrix (that often is an overlapping matrix)
5642    * */
5643   if (reuse == MAT_INITIAL_MATRIX) {
5644     /* Use a hash table to figure out unique keys */
5645     PetscCall(PetscHMapICreateWithSize(a->B->cmap->n, &hamp));
5646     PetscCall(PetscCalloc1(a->B->cmap->n, &mapping));
5647     count = 0;
5648     /* Assume that  a->g is sorted, otherwise the following does not make sense */
5649     for (i = 0; i < a->B->cmap->n; i++) {
5650       key = a->garray[i] / dof;
5651       PetscCall(PetscHMapIHas(hamp, key, &has));
5652       if (!has) {
5653         mapping[i] = count;
5654         PetscCall(PetscHMapISet(hamp, key, count++));
5655       } else {
5656         /* Current 'i' has the same value the previous step */
5657         mapping[i] = count - 1;
5658       }
5659     }
5660     PetscCall(ISCreateGeneral(comm, a->B->cmap->n, mapping, PETSC_OWN_POINTER, &map));
5661     PetscCall(PetscHMapIGetSize(hamp, &htsize));
5662     PetscCheck(htsize == count, comm, PETSC_ERR_ARG_INCOMP, " Size of hash map %" PetscInt_FMT " is inconsistent with count %" PetscInt_FMT, htsize, count);
5663     PetscCall(PetscCalloc1(htsize, &rowindices));
5664     off = 0;
5665     PetscCall(PetscHMapIGetKeys(hamp, &off, rowindices));
5666     PetscCall(PetscHMapIDestroy(&hamp));
5667     PetscCall(PetscSortInt(htsize, rowindices));
5668     PetscCall(ISCreateGeneral(comm, htsize, rowindices, PETSC_OWN_POINTER, &rows));
5669     /* In case, the matrix was already created but users want to recreate the matrix */
5670     PetscCall(MatDestroy(P_oth));
5671     PetscCall(MatCreateSeqSubMatrixWithRows_Private(P, rows, P_oth));
5672     PetscCall(PetscObjectCompose((PetscObject)*P_oth, "aoffdiagtopothmapping", (PetscObject)map));
5673     PetscCall(ISDestroy(&map));
5674     PetscCall(ISDestroy(&rows));
5675   } else if (reuse == MAT_REUSE_MATRIX) {
5676     /* If matrix was already created, we simply update values using SF objects
5677      * that as attached to the matrix earlier.
5678      */
5679     const PetscScalar *pd_a, *po_a;
5680 
5681     PetscCall(PetscObjectQuery((PetscObject)*P_oth, "diagsf", (PetscObject *)&sf));
5682     PetscCall(PetscObjectQuery((PetscObject)*P_oth, "offdiagsf", (PetscObject *)&osf));
5683     PetscCheck(sf && osf, comm, PETSC_ERR_ARG_NULL, "Matrix is not initialized yet");
5684     p_oth = (Mat_SeqAIJ *)(*P_oth)->data;
5685     /* Update values in place */
5686     PetscCall(MatSeqAIJGetArrayRead(p->A, &pd_a));
5687     PetscCall(MatSeqAIJGetArrayRead(p->B, &po_a));
5688     PetscCall(PetscSFBcastBegin(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE));
5689     PetscCall(PetscSFBcastBegin(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE));
5690     PetscCall(PetscSFBcastEnd(sf, MPIU_SCALAR, pd_a, p_oth->a, MPI_REPLACE));
5691     PetscCall(PetscSFBcastEnd(osf, MPIU_SCALAR, po_a, p_oth->a, MPI_REPLACE));
5692     PetscCall(MatSeqAIJRestoreArrayRead(p->A, &pd_a));
5693     PetscCall(MatSeqAIJRestoreArrayRead(p->B, &po_a));
5694   } else SETERRQ(comm, PETSC_ERR_ARG_UNKNOWN_TYPE, "Unknown reuse type");
5695   PetscCall(PetscLogEventEnd(MAT_GetBrowsOfAocols, A, P, 0, 0));
5696   PetscFunctionReturn(PETSC_SUCCESS);
5697 }
5698 
5699 /*@C
5700   MatGetBrowsOfAcols - Returns `IS` that contain rows of `B` that equal to nonzero columns of local `A`
5701 
5702   Collective
5703 
5704   Input Parameters:
5705 + A     - the first matrix in `MATMPIAIJ` format
5706 . B     - the second matrix in `MATMPIAIJ` format
5707 - scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5708 
5709   Output Parameters:
5710 + rowb  - On input index sets of rows of B to extract (or `NULL`), modified on output
5711 . colb  - On input index sets of columns of B to extract (or `NULL`), modified on output
5712 - B_seq - the sequential matrix generated
5713 
5714   Level: developer
5715 
5716 .seealso: `Mat`, `MATMPIAIJ`, `IS`, `MatReuse`
5717 @*/
5718 PetscErrorCode MatGetBrowsOfAcols(Mat A, Mat B, MatReuse scall, IS *rowb, IS *colb, Mat *B_seq)
5719 {
5720   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
5721   PetscInt   *idx, i, start, ncols, nzA, nzB, *cmap, imark;
5722   IS          isrowb, iscolb;
5723   Mat        *bseq = NULL;
5724 
5725   PetscFunctionBegin;
5726   PetscCheck(A->cmap->rstart == B->rmap->rstart && A->cmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",
5727              A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
5728   PetscCall(PetscLogEventBegin(MAT_GetBrowsOfAcols, A, B, 0, 0));
5729 
5730   if (scall == MAT_INITIAL_MATRIX) {
5731     start = A->cmap->rstart;
5732     cmap  = a->garray;
5733     nzA   = a->A->cmap->n;
5734     nzB   = a->B->cmap->n;
5735     PetscCall(PetscMalloc1(nzA + nzB, &idx));
5736     ncols = 0;
5737     for (i = 0; i < nzB; i++) { /* row < local row index */
5738       if (cmap[i] < start) idx[ncols++] = cmap[i];
5739       else break;
5740     }
5741     imark = i;
5742     for (i = 0; i < nzA; i++) idx[ncols++] = start + i;   /* local rows */
5743     for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
5744     PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &isrowb));
5745     PetscCall(ISCreateStride(PETSC_COMM_SELF, B->cmap->N, 0, 1, &iscolb));
5746   } else {
5747     PetscCheck(rowb && colb, PETSC_COMM_SELF, PETSC_ERR_SUP, "IS rowb and colb must be provided for MAT_REUSE_MATRIX");
5748     isrowb = *rowb;
5749     iscolb = *colb;
5750     PetscCall(PetscMalloc1(1, &bseq));
5751     bseq[0] = *B_seq;
5752   }
5753   PetscCall(MatCreateSubMatrices(B, 1, &isrowb, &iscolb, scall, &bseq));
5754   *B_seq = bseq[0];
5755   PetscCall(PetscFree(bseq));
5756   if (!rowb) {
5757     PetscCall(ISDestroy(&isrowb));
5758   } else {
5759     *rowb = isrowb;
5760   }
5761   if (!colb) {
5762     PetscCall(ISDestroy(&iscolb));
5763   } else {
5764     *colb = iscolb;
5765   }
5766   PetscCall(PetscLogEventEnd(MAT_GetBrowsOfAcols, A, B, 0, 0));
5767   PetscFunctionReturn(PETSC_SUCCESS);
5768 }
5769 
5770 /*
5771     MatGetBrowsOfAoCols_MPIAIJ - Creates a `MATSEQAIJ` matrix by taking rows of B that equal to nonzero columns
5772     of the OFF-DIAGONAL portion of local A
5773 
5774     Collective
5775 
5776    Input Parameters:
5777 +    A,B - the matrices in `MATMPIAIJ` format
5778 -    scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
5779 
5780    Output Parameter:
5781 +    startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
5782 .    startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
5783 .    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
5784 -    B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N
5785 
5786     Developer Note:
5787     This directly accesses information inside the VecScatter associated with the matrix-vector product
5788      for this matrix. This is not desirable..
5789 
5790     Level: developer
5791 
5792 */
5793 PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A, Mat B, MatReuse scall, PetscInt **startsj_s, PetscInt **startsj_r, MatScalar **bufa_ptr, Mat *B_oth)
5794 {
5795   Mat_MPIAIJ        *a = (Mat_MPIAIJ *)A->data;
5796   Mat_SeqAIJ        *b_oth;
5797   VecScatter         ctx;
5798   MPI_Comm           comm;
5799   const PetscMPIInt *rprocs, *sprocs;
5800   const PetscInt    *srow, *rstarts, *sstarts;
5801   PetscInt          *rowlen, *bufj, *bufJ, ncols = 0, aBn = a->B->cmap->n, row, *b_othi, *b_othj, *rvalues = NULL, *svalues = NULL, *cols, sbs, rbs;
5802   PetscInt           i, j, k = 0, l, ll, nrecvs, nsends, nrows, *rstartsj = NULL, *sstartsj, len;
5803   PetscScalar       *b_otha, *bufa, *bufA, *vals = NULL;
5804   MPI_Request       *reqs = NULL, *rwaits = NULL, *swaits = NULL;
5805   PetscMPIInt        size, tag, rank, nreqs;
5806 
5807   PetscFunctionBegin;
5808   PetscCall(PetscObjectGetComm((PetscObject)A, &comm));
5809   PetscCallMPI(MPI_Comm_size(comm, &size));
5810 
5811   PetscCheck(A->cmap->rstart == B->rmap->rstart && A->cmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",
5812              A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
5813   PetscCall(PetscLogEventBegin(MAT_GetBrowsOfAocols, A, B, 0, 0));
5814   PetscCallMPI(MPI_Comm_rank(comm, &rank));
5815 
5816   if (size == 1) {
5817     startsj_s = NULL;
5818     bufa_ptr  = NULL;
5819     *B_oth    = NULL;
5820     PetscFunctionReturn(PETSC_SUCCESS);
5821   }
5822 
5823   ctx = a->Mvctx;
5824   tag = ((PetscObject)ctx)->tag;
5825 
5826   PetscCall(VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &srow, &sprocs, &sbs));
5827   /* rprocs[] must be ordered so that indices received from them are ordered in rvalues[], which is key to algorithms used in this subroutine */
5828   PetscCall(VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL /*indices not needed*/, &rprocs, &rbs));
5829   PetscCall(PetscMPIIntCast(nsends + nrecvs, &nreqs));
5830   PetscCall(PetscMalloc1(nreqs, &reqs));
5831   rwaits = reqs;
5832   swaits = reqs + nrecvs;
5833 
5834   if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
5835   if (scall == MAT_INITIAL_MATRIX) {
5836     /* i-array */
5837     /*  post receives */
5838     if (nrecvs) PetscCall(PetscMalloc1(rbs * (rstarts[nrecvs] - rstarts[0]), &rvalues)); /* rstarts can be NULL when nrecvs=0 */
5839     for (i = 0; i < nrecvs; i++) {
5840       rowlen = rvalues + rstarts[i] * rbs;
5841       nrows  = (rstarts[i + 1] - rstarts[i]) * rbs; /* num of indices to be received */
5842       PetscCallMPI(MPI_Irecv(rowlen, nrows, MPIU_INT, rprocs[i], tag, comm, rwaits + i));
5843     }
5844 
5845     /* pack the outgoing message */
5846     PetscCall(PetscMalloc2(nsends + 1, &sstartsj, nrecvs + 1, &rstartsj));
5847 
5848     sstartsj[0] = 0;
5849     rstartsj[0] = 0;
5850     len         = 0; /* total length of j or a array to be sent */
5851     if (nsends) {
5852       k = sstarts[0]; /* ATTENTION: sstarts[0] and rstarts[0] are not necessarily zero */
5853       PetscCall(PetscMalloc1(sbs * (sstarts[nsends] - sstarts[0]), &svalues));
5854     }
5855     for (i = 0; i < nsends; i++) {
5856       rowlen = svalues + (sstarts[i] - sstarts[0]) * sbs;
5857       nrows  = sstarts[i + 1] - sstarts[i]; /* num of block rows */
5858       for (j = 0; j < nrows; j++) {
5859         row = srow[k] + B->rmap->range[rank]; /* global row idx */
5860         for (l = 0; l < sbs; l++) {
5861           PetscCall(MatGetRow_MPIAIJ(B, row + l, &ncols, NULL, NULL)); /* rowlength */
5862 
5863           rowlen[j * sbs + l] = ncols;
5864 
5865           len += ncols;
5866           PetscCall(MatRestoreRow_MPIAIJ(B, row + l, &ncols, NULL, NULL));
5867         }
5868         k++;
5869       }
5870       PetscCallMPI(MPI_Isend(rowlen, nrows * sbs, MPIU_INT, sprocs[i], tag, comm, swaits + i));
5871 
5872       sstartsj[i + 1] = len; /* starting point of (i+1)-th outgoing msg in bufj and bufa */
5873     }
5874     /* recvs and sends of i-array are completed */
5875     if (nreqs) PetscCallMPI(MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE));
5876     PetscCall(PetscFree(svalues));
5877 
5878     /* allocate buffers for sending j and a arrays */
5879     PetscCall(PetscMalloc1(len + 1, &bufj));
5880     PetscCall(PetscMalloc1(len + 1, &bufa));
5881 
5882     /* create i-array of B_oth */
5883     PetscCall(PetscMalloc1(aBn + 2, &b_othi));
5884 
5885     b_othi[0] = 0;
5886     len       = 0; /* total length of j or a array to be received */
5887     k         = 0;
5888     for (i = 0; i < nrecvs; i++) {
5889       rowlen = rvalues + (rstarts[i] - rstarts[0]) * rbs;
5890       nrows  = (rstarts[i + 1] - rstarts[i]) * rbs; /* num of rows to be received */
5891       for (j = 0; j < nrows; j++) {
5892         b_othi[k + 1] = b_othi[k] + rowlen[j];
5893         PetscCall(PetscIntSumError(rowlen[j], len, &len));
5894         k++;
5895       }
5896       rstartsj[i + 1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5897     }
5898     PetscCall(PetscFree(rvalues));
5899 
5900     /* allocate space for j and a arrays of B_oth */
5901     PetscCall(PetscMalloc1(b_othi[aBn] + 1, &b_othj));
5902     PetscCall(PetscMalloc1(b_othi[aBn] + 1, &b_otha));
5903 
5904     /* j-array */
5905     /*  post receives of j-array */
5906     for (i = 0; i < nrecvs; i++) {
5907       nrows = rstartsj[i + 1] - rstartsj[i]; /* length of the msg received */
5908       PetscCallMPI(MPI_Irecv(b_othj + rstartsj[i], nrows, MPIU_INT, rprocs[i], tag, comm, rwaits + i));
5909     }
5910 
5911     /* pack the outgoing message j-array */
5912     if (nsends) k = sstarts[0];
5913     for (i = 0; i < nsends; i++) {
5914       nrows = sstarts[i + 1] - sstarts[i]; /* num of block rows */
5915       bufJ  = bufj + sstartsj[i];
5916       for (j = 0; j < nrows; j++) {
5917         row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5918         for (ll = 0; ll < sbs; ll++) {
5919           PetscCall(MatGetRow_MPIAIJ(B, row + ll, &ncols, &cols, NULL));
5920           for (l = 0; l < ncols; l++) *bufJ++ = cols[l];
5921           PetscCall(MatRestoreRow_MPIAIJ(B, row + ll, &ncols, &cols, NULL));
5922         }
5923       }
5924       PetscCallMPI(MPI_Isend(bufj + sstartsj[i], sstartsj[i + 1] - sstartsj[i], MPIU_INT, sprocs[i], tag, comm, swaits + i));
5925     }
5926 
5927     /* recvs and sends of j-array are completed */
5928     if (nreqs) PetscCallMPI(MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE));
5929   } else if (scall == MAT_REUSE_MATRIX) {
5930     sstartsj = *startsj_s;
5931     rstartsj = *startsj_r;
5932     bufa     = *bufa_ptr;
5933     b_oth    = (Mat_SeqAIJ *)(*B_oth)->data;
5934     PetscCall(MatSeqAIJGetArrayWrite(*B_oth, &b_otha));
5935   } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not possess an object container");
5936 
5937   /* a-array */
5938   /*  post receives of a-array */
5939   for (i = 0; i < nrecvs; i++) {
5940     nrows = rstartsj[i + 1] - rstartsj[i]; /* length of the msg received */
5941     PetscCallMPI(MPI_Irecv(b_otha + rstartsj[i], nrows, MPIU_SCALAR, rprocs[i], tag, comm, rwaits + i));
5942   }
5943 
5944   /* pack the outgoing message a-array */
5945   if (nsends) k = sstarts[0];
5946   for (i = 0; i < nsends; i++) {
5947     nrows = sstarts[i + 1] - sstarts[i]; /* num of block rows */
5948     bufA  = bufa + sstartsj[i];
5949     for (j = 0; j < nrows; j++) {
5950       row = srow[k++] + B->rmap->range[rank]; /* global row idx */
5951       for (ll = 0; ll < sbs; ll++) {
5952         PetscCall(MatGetRow_MPIAIJ(B, row + ll, &ncols, NULL, &vals));
5953         for (l = 0; l < ncols; l++) *bufA++ = vals[l];
5954         PetscCall(MatRestoreRow_MPIAIJ(B, row + ll, &ncols, NULL, &vals));
5955       }
5956     }
5957     PetscCallMPI(MPI_Isend(bufa + sstartsj[i], sstartsj[i + 1] - sstartsj[i], MPIU_SCALAR, sprocs[i], tag, comm, swaits + i));
5958   }
5959   /* recvs and sends of a-array are completed */
5960   if (nreqs) PetscCallMPI(MPI_Waitall(nreqs, reqs, MPI_STATUSES_IGNORE));
5961   PetscCall(PetscFree(reqs));
5962 
5963   if (scall == MAT_INITIAL_MATRIX) {
5964     /* put together the new matrix */
5965     PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, aBn, B->cmap->N, b_othi, b_othj, b_otha, B_oth));
5966 
5967     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5968     /* Since these are PETSc arrays, change flags to free them as necessary. */
5969     b_oth          = (Mat_SeqAIJ *)(*B_oth)->data;
5970     b_oth->free_a  = PETSC_TRUE;
5971     b_oth->free_ij = PETSC_TRUE;
5972     b_oth->nonew   = 0;
5973 
5974     PetscCall(PetscFree(bufj));
5975     if (!startsj_s || !bufa_ptr) {
5976       PetscCall(PetscFree2(sstartsj, rstartsj));
5977       PetscCall(PetscFree(bufa_ptr));
5978     } else {
5979       *startsj_s = sstartsj;
5980       *startsj_r = rstartsj;
5981       *bufa_ptr  = bufa;
5982     }
5983   } else if (scall == MAT_REUSE_MATRIX) {
5984     PetscCall(MatSeqAIJRestoreArrayWrite(*B_oth, &b_otha));
5985   }
5986 
5987   PetscCall(VecScatterRestoreRemote_Private(ctx, PETSC_TRUE, &nsends, &sstarts, &srow, &sprocs, &sbs));
5988   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE, &nrecvs, &rstarts, NULL, &rprocs, &rbs));
5989   PetscCall(PetscLogEventEnd(MAT_GetBrowsOfAocols, A, B, 0, 0));
5990   PetscFunctionReturn(PETSC_SUCCESS);
5991 }
5992 
5993 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat, MatType, MatReuse, Mat *);
5994 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat, MatType, MatReuse, Mat *);
5995 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJSELL(Mat, MatType, MatReuse, Mat *);
5996 #if defined(PETSC_HAVE_MKL_SPARSE)
5997 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJMKL(Mat, MatType, MatReuse, Mat *);
5998 #endif
5999 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIBAIJ(Mat, MatType, MatReuse, Mat *);
6000 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat, MatType, MatReuse, Mat *);
6001 #if defined(PETSC_HAVE_ELEMENTAL)
6002 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat, MatType, MatReuse, Mat *);
6003 #endif
6004 #if defined(PETSC_HAVE_SCALAPACK)
6005 PETSC_INTERN PetscErrorCode MatConvert_AIJ_ScaLAPACK(Mat, MatType, MatReuse, Mat *);
6006 #endif
6007 #if defined(PETSC_HAVE_HYPRE)
6008 PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat, MatType, MatReuse, Mat *);
6009 #endif
6010 #if defined(PETSC_HAVE_CUDA)
6011 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCUSPARSE(Mat, MatType, MatReuse, Mat *);
6012 #endif
6013 #if defined(PETSC_HAVE_HIP)
6014 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJHIPSPARSE(Mat, MatType, MatReuse, Mat *);
6015 #endif
6016 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
6017 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJKokkos(Mat, MatType, MatReuse, Mat *);
6018 #endif
6019 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat, MatType, MatReuse, Mat *);
6020 PETSC_INTERN PetscErrorCode MatConvert_XAIJ_IS(Mat, MatType, MatReuse, Mat *);
6021 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_IS_XAIJ(Mat);
6022 
6023 /*
6024     Computes (B'*A')' since computing B*A directly is untenable
6025 
6026                n                       p                          p
6027         [             ]       [             ]         [                 ]
6028       m [      A      ]  *  n [       B     ]   =   m [         C       ]
6029         [             ]       [             ]         [                 ]
6030 
6031 */
6032 static PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A, Mat B, Mat C)
6033 {
6034   Mat At, Bt, Ct;
6035 
6036   PetscFunctionBegin;
6037   PetscCall(MatTranspose(A, MAT_INITIAL_MATRIX, &At));
6038   PetscCall(MatTranspose(B, MAT_INITIAL_MATRIX, &Bt));
6039   PetscCall(MatMatMult(Bt, At, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &Ct));
6040   PetscCall(MatDestroy(&At));
6041   PetscCall(MatDestroy(&Bt));
6042   PetscCall(MatTransposeSetPrecursor(Ct, C));
6043   PetscCall(MatTranspose(Ct, MAT_REUSE_MATRIX, &C));
6044   PetscCall(MatDestroy(&Ct));
6045   PetscFunctionReturn(PETSC_SUCCESS);
6046 }
6047 
6048 static PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A, Mat B, PetscReal fill, Mat C)
6049 {
6050   PetscBool cisdense;
6051 
6052   PetscFunctionBegin;
6053   PetscCheck(A->cmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "A->cmap->n %" PetscInt_FMT " != B->rmap->n %" PetscInt_FMT, A->cmap->n, B->rmap->n);
6054   PetscCall(MatSetSizes(C, A->rmap->n, B->cmap->n, A->rmap->N, B->cmap->N));
6055   PetscCall(MatSetBlockSizesFromMats(C, A, B));
6056   PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &cisdense, MATMPIDENSE, MATMPIDENSECUDA, MATMPIDENSEHIP, ""));
6057   if (!cisdense) PetscCall(MatSetType(C, ((PetscObject)A)->type_name));
6058   PetscCall(MatSetUp(C));
6059 
6060   C->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
6061   PetscFunctionReturn(PETSC_SUCCESS);
6062 }
6063 
6064 static PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ_AB(Mat C)
6065 {
6066   Mat_Product *product = C->product;
6067   Mat          A = product->A, B = product->B;
6068 
6069   PetscFunctionBegin;
6070   PetscCheck(A->cmap->rstart == B->rmap->rstart && A->cmap->rend == B->rmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",
6071              A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);
6072   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIDense_MPIAIJ;
6073   C->ops->productsymbolic = MatProductSymbolic_AB;
6074   PetscFunctionReturn(PETSC_SUCCESS);
6075 }
6076 
6077 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIDense_MPIAIJ(Mat C)
6078 {
6079   Mat_Product *product = C->product;
6080 
6081   PetscFunctionBegin;
6082   if (product->type == MATPRODUCT_AB) PetscCall(MatProductSetFromOptions_MPIDense_MPIAIJ_AB(C));
6083   PetscFunctionReturn(PETSC_SUCCESS);
6084 }
6085 
6086 /*
6087    Merge two sets of sorted nonzeros and return a CSR for the merged (sequential) matrix
6088 
6089   Input Parameters:
6090 
6091     j1,rowBegin1,rowEnd1,jmap1: describe the first set of nonzeros (Set1)
6092     j2,rowBegin2,rowEnd2,jmap2: describe the second set of nonzeros (Set2)
6093 
6094     mat: both sets' nonzeros are on m rows, where m is the number of local rows of the matrix mat
6095 
6096     For Set1, j1[] contains column indices of the nonzeros.
6097     For the k-th row (0<=k<m), [rowBegin1[k],rowEnd1[k]) index into j1[] and point to the begin/end nonzero in row k
6098     respectively (note rowEnd1[k] is not necessarily equal to rwoBegin1[k+1]). Indices in this range of j1[] are sorted,
6099     but might have repeats. jmap1[t+1] - jmap1[t] is the number of repeats for the t-th unique nonzero in Set1.
6100 
6101     Similar for Set2.
6102 
6103     This routine merges the two sets of nonzeros row by row and removes repeats.
6104 
6105   Output Parameters: (memory is allocated by the caller)
6106 
6107     i[],j[]: the CSR of the merged matrix, which has m rows.
6108     imap1[]: the k-th unique nonzero in Set1 (k=0,1,...) corresponds to imap1[k]-th unique nonzero in the merged matrix.
6109     imap2[]: similar to imap1[], but for Set2.
6110     Note we order nonzeros row-by-row and from left to right.
6111 */
6112 static PetscErrorCode MatMergeEntries_Internal(Mat mat, const PetscInt j1[], const PetscInt j2[], const PetscCount rowBegin1[], const PetscCount rowEnd1[], const PetscCount rowBegin2[], const PetscCount rowEnd2[], const PetscCount jmap1[], const PetscCount jmap2[], PetscCount imap1[], PetscCount imap2[], PetscInt i[], PetscInt j[])
6113 {
6114   PetscInt   r, m; /* Row index of mat */
6115   PetscCount t, t1, t2, b1, e1, b2, e2;
6116 
6117   PetscFunctionBegin;
6118   PetscCall(MatGetLocalSize(mat, &m, NULL));
6119   t1 = t2 = t = 0; /* Count unique nonzeros of in Set1, Set1 and the merged respectively */
6120   i[0]        = 0;
6121   for (r = 0; r < m; r++) { /* Do row by row merging */
6122     b1 = rowBegin1[r];
6123     e1 = rowEnd1[r];
6124     b2 = rowBegin2[r];
6125     e2 = rowEnd2[r];
6126     while (b1 < e1 && b2 < e2) {
6127       if (j1[b1] == j2[b2]) { /* Same column index and hence same nonzero */
6128         j[t]      = j1[b1];
6129         imap1[t1] = t;
6130         imap2[t2] = t;
6131         b1 += jmap1[t1 + 1] - jmap1[t1]; /* Jump to next unique local nonzero */
6132         b2 += jmap2[t2 + 1] - jmap2[t2]; /* Jump to next unique remote nonzero */
6133         t1++;
6134         t2++;
6135         t++;
6136       } else if (j1[b1] < j2[b2]) {
6137         j[t]      = j1[b1];
6138         imap1[t1] = t;
6139         b1 += jmap1[t1 + 1] - jmap1[t1];
6140         t1++;
6141         t++;
6142       } else {
6143         j[t]      = j2[b2];
6144         imap2[t2] = t;
6145         b2 += jmap2[t2 + 1] - jmap2[t2];
6146         t2++;
6147         t++;
6148       }
6149     }
6150     /* Merge the remaining in either j1[] or j2[] */
6151     while (b1 < e1) {
6152       j[t]      = j1[b1];
6153       imap1[t1] = t;
6154       b1 += jmap1[t1 + 1] - jmap1[t1];
6155       t1++;
6156       t++;
6157     }
6158     while (b2 < e2) {
6159       j[t]      = j2[b2];
6160       imap2[t2] = t;
6161       b2 += jmap2[t2 + 1] - jmap2[t2];
6162       t2++;
6163       t++;
6164     }
6165     i[r + 1] = t;
6166   }
6167   PetscFunctionReturn(PETSC_SUCCESS);
6168 }
6169 
6170 /*
6171   Split nonzeros in a block of local rows into two subsets: those in the diagonal block and those in the off-diagonal block
6172 
6173   Input Parameters:
6174     mat: an MPI matrix that provides row and column layout information for splitting. Let's say its number of local rows is m.
6175     n,i[],j[],perm[]: there are n input entries, belonging to m rows. Row/col indices of the entries are stored in i[] and j[]
6176       respectively, along with a permutation array perm[]. Length of the i[],j[],perm[] arrays is n.
6177 
6178       i[] is already sorted, but within a row, j[] is not sorted and might have repeats.
6179       i[] might contain negative indices at the beginning, which means the corresponding entries should be ignored in the splitting.
6180 
6181   Output Parameters:
6182     j[],perm[]: the routine needs to sort j[] within each row along with perm[].
6183     rowBegin[],rowMid[],rowEnd[]: of length m, and the memory is preallocated and zeroed by the caller.
6184       They contain indices pointing to j[]. For 0<=r<m, [rowBegin[r],rowMid[r]) point to begin/end entries of row r of the diagonal block,
6185       and [rowMid[r],rowEnd[r]) point to begin/end entries of row r of the off-diagonal block.
6186 
6187     Aperm[],Ajmap[],Atot,Annz: Arrays are allocated by this routine.
6188       Atot: number of entries belonging to the diagonal block.
6189       Annz: number of unique nonzeros belonging to the diagonal block.
6190       Aperm[Atot] stores values from perm[] for entries belonging to the diagonal block. Length of Aperm[] is Atot, though it may also count
6191         repeats (i.e., same 'i,j' pair).
6192       Ajmap[Annz+1] stores the number of repeats of each unique entry belonging to the diagonal block. More precisely, Ajmap[t+1] - Ajmap[t]
6193         is the number of repeats for the t-th unique entry in the diagonal block. Ajmap[0] is always 0.
6194 
6195       Atot: number of entries belonging to the diagonal block
6196       Annz: number of unique nonzeros belonging to the diagonal block.
6197 
6198     Bperm[], Bjmap[], Btot, Bnnz are similar but for the off-diagonal block.
6199 
6200     Aperm[],Bperm[],Ajmap[] and Bjmap[] are allocated separately by this routine with PetscMalloc1().
6201 */
6202 static PetscErrorCode MatSplitEntries_Internal(Mat mat, PetscCount n, const PetscInt i[], PetscInt j[], PetscCount perm[], PetscCount rowBegin[], PetscCount rowMid[], PetscCount rowEnd[], PetscCount *Atot_, PetscCount **Aperm_, PetscCount *Annz_, PetscCount **Ajmap_, PetscCount *Btot_, PetscCount **Bperm_, PetscCount *Bnnz_, PetscCount **Bjmap_)
6203 {
6204   PetscInt    cstart, cend, rstart, rend, row, col;
6205   PetscCount  Atot = 0, Btot = 0; /* Total number of nonzeros in the diagonal and off-diagonal blocks */
6206   PetscCount  Annz = 0, Bnnz = 0; /* Number of unique nonzeros in the diagonal and off-diagonal blocks */
6207   PetscCount  k, m, p, q, r, s, mid;
6208   PetscCount *Aperm, *Bperm, *Ajmap, *Bjmap;
6209 
6210   PetscFunctionBegin;
6211   PetscCall(PetscLayoutGetRange(mat->rmap, &rstart, &rend));
6212   PetscCall(PetscLayoutGetRange(mat->cmap, &cstart, &cend));
6213   m = rend - rstart;
6214 
6215   /* Skip negative rows */
6216   for (k = 0; k < n; k++)
6217     if (i[k] >= 0) break;
6218 
6219   /* Process [k,n): sort and partition each local row into diag and offdiag portions,
6220      fill rowBegin[], rowMid[], rowEnd[], and count Atot, Btot, Annz, Bnnz.
6221   */
6222   while (k < n) {
6223     row = i[k];
6224     /* Entries in [k,s) are in one row. Shift diagonal block col indices so that diag is ahead of offdiag after sorting the row */
6225     for (s = k; s < n; s++)
6226       if (i[s] != row) break;
6227 
6228     /* Shift diag columns to range of [-PETSC_MAX_INT, -1] */
6229     for (p = k; p < s; p++) {
6230       if (j[p] >= cstart && j[p] < cend) j[p] -= PETSC_MAX_INT;
6231       else PetscAssert((j[p] >= 0) && (j[p] <= mat->cmap->N), PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column index %" PetscInt_FMT " is out of range", j[p]);
6232     }
6233     PetscCall(PetscSortIntWithCountArray(s - k, j + k, perm + k));
6234     PetscCall(PetscSortedIntUpperBound(j, k, s, -1, &mid)); /* Separate [k,s) into [k,mid) for diag and [mid,s) for offdiag */
6235     rowBegin[row - rstart] = k;
6236     rowMid[row - rstart]   = mid;
6237     rowEnd[row - rstart]   = s;
6238 
6239     /* Count nonzeros of this diag/offdiag row, which might have repeats */
6240     Atot += mid - k;
6241     Btot += s - mid;
6242 
6243     /* Count unique nonzeros of this diag row */
6244     for (p = k; p < mid;) {
6245       col = j[p];
6246       do {
6247         j[p] += PETSC_MAX_INT; /* Revert the modified diagonal indices */
6248         p++;
6249       } while (p < mid && j[p] == col);
6250       Annz++;
6251     }
6252 
6253     /* Count unique nonzeros of this offdiag row */
6254     for (p = mid; p < s;) {
6255       col = j[p];
6256       do {
6257         p++;
6258       } while (p < s && j[p] == col);
6259       Bnnz++;
6260     }
6261     k = s;
6262   }
6263 
6264   /* Allocation according to Atot, Btot, Annz, Bnnz */
6265   PetscCall(PetscMalloc1(Atot, &Aperm));
6266   PetscCall(PetscMalloc1(Btot, &Bperm));
6267   PetscCall(PetscMalloc1(Annz + 1, &Ajmap));
6268   PetscCall(PetscMalloc1(Bnnz + 1, &Bjmap));
6269 
6270   /* Re-scan indices and copy diag/offdiag permutation indices to Aperm, Bperm and also fill Ajmap and Bjmap */
6271   Ajmap[0] = Bjmap[0] = Atot = Btot = Annz = Bnnz = 0;
6272   for (r = 0; r < m; r++) {
6273     k   = rowBegin[r];
6274     mid = rowMid[r];
6275     s   = rowEnd[r];
6276     PetscCall(PetscArraycpy(Aperm + Atot, perm + k, mid - k));
6277     PetscCall(PetscArraycpy(Bperm + Btot, perm + mid, s - mid));
6278     Atot += mid - k;
6279     Btot += s - mid;
6280 
6281     /* Scan column indices in this row and find out how many repeats each unique nonzero has */
6282     for (p = k; p < mid;) {
6283       col = j[p];
6284       q   = p;
6285       do {
6286         p++;
6287       } while (p < mid && j[p] == col);
6288       Ajmap[Annz + 1] = Ajmap[Annz] + (p - q);
6289       Annz++;
6290     }
6291 
6292     for (p = mid; p < s;) {
6293       col = j[p];
6294       q   = p;
6295       do {
6296         p++;
6297       } while (p < s && j[p] == col);
6298       Bjmap[Bnnz + 1] = Bjmap[Bnnz] + (p - q);
6299       Bnnz++;
6300     }
6301   }
6302   /* Output */
6303   *Aperm_ = Aperm;
6304   *Annz_  = Annz;
6305   *Atot_  = Atot;
6306   *Ajmap_ = Ajmap;
6307   *Bperm_ = Bperm;
6308   *Bnnz_  = Bnnz;
6309   *Btot_  = Btot;
6310   *Bjmap_ = Bjmap;
6311   PetscFunctionReturn(PETSC_SUCCESS);
6312 }
6313 
6314 /*
6315   Expand the jmap[] array to make a new one in view of nonzeros in the merged matrix
6316 
6317   Input Parameters:
6318     nnz1: number of unique nonzeros in a set that was used to produce imap[], jmap[]
6319     nnz:  number of unique nonzeros in the merged matrix
6320     imap[nnz1]: i-th nonzero in the set is the imap[i]-th nonzero in the merged matrix
6321     jmap[nnz1+1]: i-th nonzero in the set has jmap[i+1] - jmap[i] repeats in the set
6322 
6323   Output Parameter: (memory is allocated by the caller)
6324     jmap_new[nnz+1]: i-th nonzero in the merged matrix has jmap_new[i+1] - jmap_new[i] repeats in the set
6325 
6326   Example:
6327     nnz1 = 4
6328     nnz  = 6
6329     imap = [1,3,4,5]
6330     jmap = [0,3,5,6,7]
6331    then,
6332     jmap_new = [0,0,3,3,5,6,7]
6333 */
6334 static PetscErrorCode ExpandJmap_Internal(PetscCount nnz1, PetscCount nnz, const PetscCount imap[], const PetscCount jmap[], PetscCount jmap_new[])
6335 {
6336   PetscCount k, p;
6337 
6338   PetscFunctionBegin;
6339   jmap_new[0] = 0;
6340   p           = nnz;                /* p loops over jmap_new[] backwards */
6341   for (k = nnz1 - 1; k >= 0; k--) { /* k loops over imap[] */
6342     for (; p > imap[k]; p--) jmap_new[p] = jmap[k + 1];
6343   }
6344   for (; p >= 0; p--) jmap_new[p] = jmap[0];
6345   PetscFunctionReturn(PETSC_SUCCESS);
6346 }
6347 
6348 static PetscErrorCode MatCOOStructDestroy_MPIAIJ(void *data)
6349 {
6350   MatCOOStruct_MPIAIJ *coo = (MatCOOStruct_MPIAIJ *)data;
6351 
6352   PetscFunctionBegin;
6353   PetscCall(PetscSFDestroy(&coo->sf));
6354   PetscCall(PetscFree(coo->Aperm1));
6355   PetscCall(PetscFree(coo->Bperm1));
6356   PetscCall(PetscFree(coo->Ajmap1));
6357   PetscCall(PetscFree(coo->Bjmap1));
6358   PetscCall(PetscFree(coo->Aimap2));
6359   PetscCall(PetscFree(coo->Bimap2));
6360   PetscCall(PetscFree(coo->Aperm2));
6361   PetscCall(PetscFree(coo->Bperm2));
6362   PetscCall(PetscFree(coo->Ajmap2));
6363   PetscCall(PetscFree(coo->Bjmap2));
6364   PetscCall(PetscFree(coo->Cperm1));
6365   PetscCall(PetscFree2(coo->sendbuf, coo->recvbuf));
6366   PetscCall(PetscFree(coo));
6367   PetscFunctionReturn(PETSC_SUCCESS);
6368 }
6369 
6370 PetscErrorCode MatSetPreallocationCOO_MPIAIJ(Mat mat, PetscCount coo_n, PetscInt coo_i[], PetscInt coo_j[])
6371 {
6372   MPI_Comm             comm;
6373   PetscMPIInt          rank, size;
6374   PetscInt             m, n, M, N, rstart, rend, cstart, cend; /* Sizes, indices of row/col, therefore with type PetscInt */
6375   PetscCount           k, p, q, rem;                           /* Loop variables over coo arrays */
6376   Mat_MPIAIJ          *mpiaij = (Mat_MPIAIJ *)mat->data;
6377   PetscContainer       container;
6378   MatCOOStruct_MPIAIJ *coo;
6379 
6380   PetscFunctionBegin;
6381   PetscCall(PetscFree(mpiaij->garray));
6382   PetscCall(VecDestroy(&mpiaij->lvec));
6383 #if defined(PETSC_USE_CTABLE)
6384   PetscCall(PetscHMapIDestroy(&mpiaij->colmap));
6385 #else
6386   PetscCall(PetscFree(mpiaij->colmap));
6387 #endif
6388   PetscCall(VecScatterDestroy(&mpiaij->Mvctx));
6389   mat->assembled     = PETSC_FALSE;
6390   mat->was_assembled = PETSC_FALSE;
6391 
6392   PetscCall(PetscObjectGetComm((PetscObject)mat, &comm));
6393   PetscCallMPI(MPI_Comm_size(comm, &size));
6394   PetscCallMPI(MPI_Comm_rank(comm, &rank));
6395   PetscCall(PetscLayoutSetUp(mat->rmap));
6396   PetscCall(PetscLayoutSetUp(mat->cmap));
6397   PetscCall(PetscLayoutGetRange(mat->rmap, &rstart, &rend));
6398   PetscCall(PetscLayoutGetRange(mat->cmap, &cstart, &cend));
6399   PetscCall(MatGetLocalSize(mat, &m, &n));
6400   PetscCall(MatGetSize(mat, &M, &N));
6401 
6402   /* Sort (i,j) by row along with a permutation array, so that the to-be-ignored */
6403   /* entries come first, then local rows, then remote rows.                     */
6404   PetscCount n1 = coo_n, *perm1;
6405   PetscInt  *i1 = coo_i, *j1 = coo_j;
6406 
6407   PetscCall(PetscMalloc1(n1, &perm1));
6408   for (k = 0; k < n1; k++) perm1[k] = k;
6409 
6410   /* Manipulate indices so that entries with negative row or col indices will have smallest
6411      row indices, local entries will have greater but negative row indices, and remote entries
6412      will have positive row indices.
6413   */
6414   for (k = 0; k < n1; k++) {
6415     if (i1[k] < 0 || j1[k] < 0) i1[k] = PETSC_MIN_INT;                /* e.g., -2^31, minimal to move them ahead */
6416     else if (i1[k] >= rstart && i1[k] < rend) i1[k] -= PETSC_MAX_INT; /* e.g., minus 2^31-1 to shift local rows to range of [-PETSC_MAX_INT, -1] */
6417     else {
6418       PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "MAT_NO_OFF_PROC_ENTRIES is set but insert to remote rows");
6419       if (mpiaij->donotstash) i1[k] = PETSC_MIN_INT; /* Ignore offproc entries as if they had negative indices */
6420     }
6421   }
6422 
6423   /* Sort by row; after that, [0,k) have ignored entries, [k,rem) have local rows and [rem,n1) have remote rows */
6424   PetscCall(PetscSortIntWithIntCountArrayPair(n1, i1, j1, perm1));
6425 
6426   /* Advance k to the first entry we need to take care of */
6427   for (k = 0; k < n1; k++)
6428     if (i1[k] > PETSC_MIN_INT) break;
6429   PetscInt i1start = k;
6430 
6431   PetscCall(PetscSortedIntUpperBound(i1, k, n1, rend - 1 - PETSC_MAX_INT, &rem)); /* rem is upper bound of the last local row */
6432   for (; k < rem; k++) i1[k] += PETSC_MAX_INT;                                    /* Revert row indices of local rows*/
6433 
6434   /*           Send remote rows to their owner                                  */
6435   /* Find which rows should be sent to which remote ranks*/
6436   PetscInt        nsend = 0; /* Number of MPI ranks to send data to */
6437   PetscMPIInt    *sendto;    /* [nsend], storing remote ranks */
6438   PetscInt       *nentries;  /* [nsend], storing number of entries sent to remote ranks; Assume PetscInt is big enough for this count, and error if not */
6439   const PetscInt *ranges;
6440   PetscInt        maxNsend = size >= 128 ? 128 : size; /* Assume max 128 neighbors; realloc when needed */
6441 
6442   PetscCall(PetscLayoutGetRanges(mat->rmap, &ranges));
6443   PetscCall(PetscMalloc2(maxNsend, &sendto, maxNsend, &nentries));
6444   for (k = rem; k < n1;) {
6445     PetscMPIInt owner;
6446     PetscInt    firstRow, lastRow;
6447 
6448     /* Locate a row range */
6449     firstRow = i1[k]; /* first row of this owner */
6450     PetscCall(PetscLayoutFindOwner(mat->rmap, firstRow, &owner));
6451     lastRow = ranges[owner + 1] - 1; /* last row of this owner */
6452 
6453     /* Find the first index 'p' in [k,n) with i[p] belonging to next owner */
6454     PetscCall(PetscSortedIntUpperBound(i1, k, n1, lastRow, &p));
6455 
6456     /* All entries in [k,p) belong to this remote owner */
6457     if (nsend >= maxNsend) { /* Double the remote ranks arrays if not long enough */
6458       PetscMPIInt *sendto2;
6459       PetscInt    *nentries2;
6460       PetscInt     maxNsend2 = (maxNsend <= size / 2) ? maxNsend * 2 : size;
6461 
6462       PetscCall(PetscMalloc2(maxNsend2, &sendto2, maxNsend2, &nentries2));
6463       PetscCall(PetscArraycpy(sendto2, sendto, maxNsend));
6464       PetscCall(PetscArraycpy(nentries2, nentries2, maxNsend + 1));
6465       PetscCall(PetscFree2(sendto, nentries2));
6466       sendto   = sendto2;
6467       nentries = nentries2;
6468       maxNsend = maxNsend2;
6469     }
6470     sendto[nsend]   = owner;
6471     nentries[nsend] = p - k;
6472     PetscCall(PetscCountCast(p - k, &nentries[nsend]));
6473     nsend++;
6474     k = p;
6475   }
6476 
6477   /* Build 1st SF to know offsets on remote to send data */
6478   PetscSF      sf1;
6479   PetscInt     nroots = 1, nroots2 = 0;
6480   PetscInt     nleaves = nsend, nleaves2 = 0;
6481   PetscInt    *offsets;
6482   PetscSFNode *iremote;
6483 
6484   PetscCall(PetscSFCreate(comm, &sf1));
6485   PetscCall(PetscMalloc1(nsend, &iremote));
6486   PetscCall(PetscMalloc1(nsend, &offsets));
6487   for (k = 0; k < nsend; k++) {
6488     iremote[k].rank  = sendto[k];
6489     iremote[k].index = 0;
6490     nleaves2 += nentries[k];
6491     PetscCheck(nleaves2 >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Number of SF leaves is too large for PetscInt");
6492   }
6493   PetscCall(PetscSFSetGraph(sf1, nroots, nleaves, NULL, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
6494   PetscCall(PetscSFFetchAndOpWithMemTypeBegin(sf1, MPIU_INT, PETSC_MEMTYPE_HOST, &nroots2 /*rootdata*/, PETSC_MEMTYPE_HOST, nentries /*leafdata*/, PETSC_MEMTYPE_HOST, offsets /*leafupdate*/, MPI_SUM));
6495   PetscCall(PetscSFFetchAndOpEnd(sf1, MPIU_INT, &nroots2, nentries, offsets, MPI_SUM)); /* Would nroots2 overflow, we check offsets[] below */
6496   PetscCall(PetscSFDestroy(&sf1));
6497   PetscAssert(nleaves2 == n1 - rem, PETSC_COMM_SELF, PETSC_ERR_PLIB, "nleaves2 %" PetscInt_FMT " != number of remote entries %" PetscCount_FMT "", nleaves2, n1 - rem);
6498 
6499   /* Build 2nd SF to send remote COOs to their owner */
6500   PetscSF sf2;
6501   nroots  = nroots2;
6502   nleaves = nleaves2;
6503   PetscCall(PetscSFCreate(comm, &sf2));
6504   PetscCall(PetscSFSetFromOptions(sf2));
6505   PetscCall(PetscMalloc1(nleaves, &iremote));
6506   p = 0;
6507   for (k = 0; k < nsend; k++) {
6508     PetscCheck(offsets[k] >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Number of SF roots is too large for PetscInt");
6509     for (q = 0; q < nentries[k]; q++, p++) {
6510       iremote[p].rank  = sendto[k];
6511       iremote[p].index = offsets[k] + q;
6512     }
6513   }
6514   PetscCall(PetscSFSetGraph(sf2, nroots, nleaves, NULL, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
6515 
6516   /* Send the remote COOs to their owner */
6517   PetscInt    n2 = nroots, *i2, *j2; /* Buffers for received COOs from other ranks, along with a permutation array */
6518   PetscCount *perm2;                 /* Though PetscInt is enough for remote entries, we use PetscCount here as we want to reuse MatSplitEntries_Internal() */
6519   PetscCall(PetscMalloc3(n2, &i2, n2, &j2, n2, &perm2));
6520   PetscCall(PetscSFReduceWithMemTypeBegin(sf2, MPIU_INT, PETSC_MEMTYPE_HOST, i1 + rem, PETSC_MEMTYPE_HOST, i2, MPI_REPLACE));
6521   PetscCall(PetscSFReduceEnd(sf2, MPIU_INT, i1 + rem, i2, MPI_REPLACE));
6522   PetscCall(PetscSFReduceWithMemTypeBegin(sf2, MPIU_INT, PETSC_MEMTYPE_HOST, j1 + rem, PETSC_MEMTYPE_HOST, j2, MPI_REPLACE));
6523   PetscCall(PetscSFReduceEnd(sf2, MPIU_INT, j1 + rem, j2, MPI_REPLACE));
6524 
6525   PetscCall(PetscFree(offsets));
6526   PetscCall(PetscFree2(sendto, nentries));
6527 
6528   /* Sort received COOs by row along with the permutation array     */
6529   for (k = 0; k < n2; k++) perm2[k] = k;
6530   PetscCall(PetscSortIntWithIntCountArrayPair(n2, i2, j2, perm2));
6531 
6532   /* sf2 only sends contiguous leafdata to contiguous rootdata. We record the permutation which will be used to fill leafdata */
6533   PetscCount *Cperm1;
6534   PetscCall(PetscMalloc1(nleaves, &Cperm1));
6535   PetscCall(PetscArraycpy(Cperm1, perm1 + rem, nleaves));
6536 
6537   /* Support for HYPRE matrices, kind of a hack.
6538      Swap min column with diagonal so that diagonal values will go first */
6539   PetscBool   hypre;
6540   const char *name;
6541   PetscCall(PetscObjectGetName((PetscObject)mat, &name));
6542   PetscCall(PetscStrcmp("_internal_COO_mat_for_hypre", name, &hypre));
6543   if (hypre) {
6544     PetscInt *minj;
6545     PetscBT   hasdiag;
6546 
6547     PetscCall(PetscBTCreate(m, &hasdiag));
6548     PetscCall(PetscMalloc1(m, &minj));
6549     for (k = 0; k < m; k++) minj[k] = PETSC_MAX_INT;
6550     for (k = i1start; k < rem; k++) {
6551       if (j1[k] < cstart || j1[k] >= cend) continue;
6552       const PetscInt rindex = i1[k] - rstart;
6553       if ((j1[k] - cstart) == rindex) PetscCall(PetscBTSet(hasdiag, rindex));
6554       minj[rindex] = PetscMin(minj[rindex], j1[k]);
6555     }
6556     for (k = 0; k < n2; k++) {
6557       if (j2[k] < cstart || j2[k] >= cend) continue;
6558       const PetscInt rindex = i2[k] - rstart;
6559       if ((j2[k] - cstart) == rindex) PetscCall(PetscBTSet(hasdiag, rindex));
6560       minj[rindex] = PetscMin(minj[rindex], j2[k]);
6561     }
6562     for (k = i1start; k < rem; k++) {
6563       const PetscInt rindex = i1[k] - rstart;
6564       if (j1[k] < cstart || j1[k] >= cend || !PetscBTLookup(hasdiag, rindex)) continue;
6565       if (j1[k] == minj[rindex]) j1[k] = i1[k] + (cstart - rstart);
6566       else if ((j1[k] - cstart) == rindex) j1[k] = minj[rindex];
6567     }
6568     for (k = 0; k < n2; k++) {
6569       const PetscInt rindex = i2[k] - rstart;
6570       if (j2[k] < cstart || j2[k] >= cend || !PetscBTLookup(hasdiag, rindex)) continue;
6571       if (j2[k] == minj[rindex]) j2[k] = i2[k] + (cstart - rstart);
6572       else if ((j2[k] - cstart) == rindex) j2[k] = minj[rindex];
6573     }
6574     PetscCall(PetscBTDestroy(&hasdiag));
6575     PetscCall(PetscFree(minj));
6576   }
6577 
6578   /* Split local COOs and received COOs into diag/offdiag portions */
6579   PetscCount *rowBegin1, *rowMid1, *rowEnd1;
6580   PetscCount *Ajmap1, *Aperm1, *Bjmap1, *Bperm1;
6581   PetscCount  Annz1, Bnnz1, Atot1, Btot1;
6582   PetscCount *rowBegin2, *rowMid2, *rowEnd2;
6583   PetscCount *Ajmap2, *Aperm2, *Bjmap2, *Bperm2;
6584   PetscCount  Annz2, Bnnz2, Atot2, Btot2;
6585 
6586   PetscCall(PetscCalloc3(m, &rowBegin1, m, &rowMid1, m, &rowEnd1));
6587   PetscCall(PetscCalloc3(m, &rowBegin2, m, &rowMid2, m, &rowEnd2));
6588   PetscCall(MatSplitEntries_Internal(mat, rem, i1, j1, perm1, rowBegin1, rowMid1, rowEnd1, &Atot1, &Aperm1, &Annz1, &Ajmap1, &Btot1, &Bperm1, &Bnnz1, &Bjmap1));
6589   PetscCall(MatSplitEntries_Internal(mat, n2, i2, j2, perm2, rowBegin2, rowMid2, rowEnd2, &Atot2, &Aperm2, &Annz2, &Ajmap2, &Btot2, &Bperm2, &Bnnz2, &Bjmap2));
6590 
6591   /* Merge local COOs with received COOs: diag with diag, offdiag with offdiag */
6592   PetscInt *Ai, *Bi;
6593   PetscInt *Aj, *Bj;
6594 
6595   PetscCall(PetscMalloc1(m + 1, &Ai));
6596   PetscCall(PetscMalloc1(m + 1, &Bi));
6597   PetscCall(PetscMalloc1(Annz1 + Annz2, &Aj)); /* Since local and remote entries might have dups, we might allocate excess memory */
6598   PetscCall(PetscMalloc1(Bnnz1 + Bnnz2, &Bj));
6599 
6600   PetscCount *Aimap1, *Bimap1, *Aimap2, *Bimap2;
6601   PetscCall(PetscMalloc1(Annz1, &Aimap1));
6602   PetscCall(PetscMalloc1(Bnnz1, &Bimap1));
6603   PetscCall(PetscMalloc1(Annz2, &Aimap2));
6604   PetscCall(PetscMalloc1(Bnnz2, &Bimap2));
6605 
6606   PetscCall(MatMergeEntries_Internal(mat, j1, j2, rowBegin1, rowMid1, rowBegin2, rowMid2, Ajmap1, Ajmap2, Aimap1, Aimap2, Ai, Aj));
6607   PetscCall(MatMergeEntries_Internal(mat, j1, j2, rowMid1, rowEnd1, rowMid2, rowEnd2, Bjmap1, Bjmap2, Bimap1, Bimap2, Bi, Bj));
6608 
6609   /* Expand Ajmap1/Bjmap1 to make them based off nonzeros in A/B, since we     */
6610   /* expect nonzeros in A/B most likely have local contributing entries        */
6611   PetscInt    Annz = Ai[m];
6612   PetscInt    Bnnz = Bi[m];
6613   PetscCount *Ajmap1_new, *Bjmap1_new;
6614 
6615   PetscCall(PetscMalloc1(Annz + 1, &Ajmap1_new));
6616   PetscCall(PetscMalloc1(Bnnz + 1, &Bjmap1_new));
6617 
6618   PetscCall(ExpandJmap_Internal(Annz1, Annz, Aimap1, Ajmap1, Ajmap1_new));
6619   PetscCall(ExpandJmap_Internal(Bnnz1, Bnnz, Bimap1, Bjmap1, Bjmap1_new));
6620 
6621   PetscCall(PetscFree(Aimap1));
6622   PetscCall(PetscFree(Ajmap1));
6623   PetscCall(PetscFree(Bimap1));
6624   PetscCall(PetscFree(Bjmap1));
6625   PetscCall(PetscFree3(rowBegin1, rowMid1, rowEnd1));
6626   PetscCall(PetscFree3(rowBegin2, rowMid2, rowEnd2));
6627   PetscCall(PetscFree(perm1));
6628   PetscCall(PetscFree3(i2, j2, perm2));
6629 
6630   Ajmap1 = Ajmap1_new;
6631   Bjmap1 = Bjmap1_new;
6632 
6633   /* Reallocate Aj, Bj once we know actual numbers of unique nonzeros in A and B */
6634   if (Annz < Annz1 + Annz2) {
6635     PetscInt *Aj_new;
6636     PetscCall(PetscMalloc1(Annz, &Aj_new));
6637     PetscCall(PetscArraycpy(Aj_new, Aj, Annz));
6638     PetscCall(PetscFree(Aj));
6639     Aj = Aj_new;
6640   }
6641 
6642   if (Bnnz < Bnnz1 + Bnnz2) {
6643     PetscInt *Bj_new;
6644     PetscCall(PetscMalloc1(Bnnz, &Bj_new));
6645     PetscCall(PetscArraycpy(Bj_new, Bj, Bnnz));
6646     PetscCall(PetscFree(Bj));
6647     Bj = Bj_new;
6648   }
6649 
6650   /* Create new submatrices for on-process and off-process coupling                  */
6651   PetscScalar     *Aa, *Ba;
6652   MatType          rtype;
6653   Mat_SeqAIJ      *a, *b;
6654   PetscObjectState state;
6655   PetscCall(PetscCalloc1(Annz, &Aa)); /* Zero matrix on device */
6656   PetscCall(PetscCalloc1(Bnnz, &Ba));
6657   /* make Aj[] local, i.e, based off the start column of the diagonal portion */
6658   if (cstart) {
6659     for (k = 0; k < Annz; k++) Aj[k] -= cstart;
6660   }
6661   PetscCall(MatDestroy(&mpiaij->A));
6662   PetscCall(MatDestroy(&mpiaij->B));
6663   PetscCall(MatGetRootType_Private(mat, &rtype));
6664   PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, n, Ai, Aj, Aa, &mpiaij->A));
6665   PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, mat->cmap->N, Bi, Bj, Ba, &mpiaij->B));
6666   PetscCall(MatSetUpMultiply_MPIAIJ(mat));
6667   mat->was_assembled = PETSC_TRUE; // was_assembled in effect means the Mvctx is built; doing so avoids redundant MatSetUpMultiply_MPIAIJ
6668   state              = mpiaij->A->nonzerostate + mpiaij->B->nonzerostate;
6669   PetscCall(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat)));
6670 
6671   a               = (Mat_SeqAIJ *)mpiaij->A->data;
6672   b               = (Mat_SeqAIJ *)mpiaij->B->data;
6673   a->singlemalloc = b->singlemalloc = PETSC_FALSE; /* Let newmat own Ai,Aj,Aa,Bi,Bj,Ba */
6674   a->free_a = b->free_a = PETSC_TRUE;
6675   a->free_ij = b->free_ij = PETSC_TRUE;
6676 
6677   /* conversion must happen AFTER multiply setup */
6678   PetscCall(MatConvert(mpiaij->A, rtype, MAT_INPLACE_MATRIX, &mpiaij->A));
6679   PetscCall(MatConvert(mpiaij->B, rtype, MAT_INPLACE_MATRIX, &mpiaij->B));
6680   PetscCall(VecDestroy(&mpiaij->lvec));
6681   PetscCall(MatCreateVecs(mpiaij->B, &mpiaij->lvec, NULL));
6682 
6683   // Put the COO struct in a container and then attach that to the matrix
6684   PetscCall(PetscMalloc1(1, &coo));
6685   coo->n       = coo_n;
6686   coo->sf      = sf2;
6687   coo->sendlen = nleaves;
6688   coo->recvlen = nroots;
6689   coo->Annz    = Annz;
6690   coo->Bnnz    = Bnnz;
6691   coo->Annz2   = Annz2;
6692   coo->Bnnz2   = Bnnz2;
6693   coo->Atot1   = Atot1;
6694   coo->Atot2   = Atot2;
6695   coo->Btot1   = Btot1;
6696   coo->Btot2   = Btot2;
6697   coo->Ajmap1  = Ajmap1;
6698   coo->Aperm1  = Aperm1;
6699   coo->Bjmap1  = Bjmap1;
6700   coo->Bperm1  = Bperm1;
6701   coo->Aimap2  = Aimap2;
6702   coo->Ajmap2  = Ajmap2;
6703   coo->Aperm2  = Aperm2;
6704   coo->Bimap2  = Bimap2;
6705   coo->Bjmap2  = Bjmap2;
6706   coo->Bperm2  = Bperm2;
6707   coo->Cperm1  = Cperm1;
6708   // Allocate in preallocation. If not used, it has zero cost on host
6709   PetscCall(PetscMalloc2(coo->sendlen, &coo->sendbuf, coo->recvlen, &coo->recvbuf));
6710   PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
6711   PetscCall(PetscContainerSetPointer(container, coo));
6712   PetscCall(PetscContainerSetUserDestroy(container, MatCOOStructDestroy_MPIAIJ));
6713   PetscCall(PetscObjectCompose((PetscObject)mat, "__PETSc_MatCOOStruct_Host", (PetscObject)container));
6714   PetscCall(PetscContainerDestroy(&container));
6715   PetscFunctionReturn(PETSC_SUCCESS);
6716 }
6717 
6718 static PetscErrorCode MatSetValuesCOO_MPIAIJ(Mat mat, const PetscScalar v[], InsertMode imode)
6719 {
6720   Mat_MPIAIJ          *mpiaij = (Mat_MPIAIJ *)mat->data;
6721   Mat                  A = mpiaij->A, B = mpiaij->B;
6722   PetscScalar         *Aa, *Ba;
6723   PetscScalar         *sendbuf, *recvbuf;
6724   const PetscCount    *Ajmap1, *Ajmap2, *Aimap2;
6725   const PetscCount    *Bjmap1, *Bjmap2, *Bimap2;
6726   const PetscCount    *Aperm1, *Aperm2, *Bperm1, *Bperm2;
6727   const PetscCount    *Cperm1;
6728   PetscContainer       container;
6729   MatCOOStruct_MPIAIJ *coo;
6730 
6731   PetscFunctionBegin;
6732   PetscCall(PetscObjectQuery((PetscObject)mat, "__PETSc_MatCOOStruct_Host", (PetscObject *)&container));
6733   PetscCheck(container, PetscObjectComm((PetscObject)mat), PETSC_ERR_PLIB, "Not found MatCOOStruct on this matrix");
6734   PetscCall(PetscContainerGetPointer(container, (void **)&coo));
6735   sendbuf = coo->sendbuf;
6736   recvbuf = coo->recvbuf;
6737   Ajmap1  = coo->Ajmap1;
6738   Ajmap2  = coo->Ajmap2;
6739   Aimap2  = coo->Aimap2;
6740   Bjmap1  = coo->Bjmap1;
6741   Bjmap2  = coo->Bjmap2;
6742   Bimap2  = coo->Bimap2;
6743   Aperm1  = coo->Aperm1;
6744   Aperm2  = coo->Aperm2;
6745   Bperm1  = coo->Bperm1;
6746   Bperm2  = coo->Bperm2;
6747   Cperm1  = coo->Cperm1;
6748 
6749   PetscCall(MatSeqAIJGetArray(A, &Aa)); /* Might read and write matrix values */
6750   PetscCall(MatSeqAIJGetArray(B, &Ba));
6751 
6752   /* Pack entries to be sent to remote */
6753   for (PetscCount i = 0; i < coo->sendlen; i++) sendbuf[i] = v[Cperm1[i]];
6754 
6755   /* Send remote entries to their owner and overlap the communication with local computation */
6756   PetscCall(PetscSFReduceWithMemTypeBegin(coo->sf, MPIU_SCALAR, PETSC_MEMTYPE_HOST, sendbuf, PETSC_MEMTYPE_HOST, recvbuf, MPI_REPLACE));
6757   /* Add local entries to A and B */
6758   for (PetscCount i = 0; i < coo->Annz; i++) { /* All nonzeros in A are either zero'ed or added with a value (i.e., initialized) */
6759     PetscScalar sum = 0.0;                     /* Do partial summation first to improve numerical stability */
6760     for (PetscCount k = Ajmap1[i]; k < Ajmap1[i + 1]; k++) sum += v[Aperm1[k]];
6761     Aa[i] = (imode == INSERT_VALUES ? 0.0 : Aa[i]) + sum;
6762   }
6763   for (PetscCount i = 0; i < coo->Bnnz; i++) {
6764     PetscScalar sum = 0.0;
6765     for (PetscCount k = Bjmap1[i]; k < Bjmap1[i + 1]; k++) sum += v[Bperm1[k]];
6766     Ba[i] = (imode == INSERT_VALUES ? 0.0 : Ba[i]) + sum;
6767   }
6768   PetscCall(PetscSFReduceEnd(coo->sf, MPIU_SCALAR, sendbuf, recvbuf, MPI_REPLACE));
6769 
6770   /* Add received remote entries to A and B */
6771   for (PetscCount i = 0; i < coo->Annz2; i++) {
6772     for (PetscCount k = Ajmap2[i]; k < Ajmap2[i + 1]; k++) Aa[Aimap2[i]] += recvbuf[Aperm2[k]];
6773   }
6774   for (PetscCount i = 0; i < coo->Bnnz2; i++) {
6775     for (PetscCount k = Bjmap2[i]; k < Bjmap2[i + 1]; k++) Ba[Bimap2[i]] += recvbuf[Bperm2[k]];
6776   }
6777   PetscCall(MatSeqAIJRestoreArray(A, &Aa));
6778   PetscCall(MatSeqAIJRestoreArray(B, &Ba));
6779   PetscFunctionReturn(PETSC_SUCCESS);
6780 }
6781 
6782 /*MC
6783    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
6784 
6785    Options Database Keys:
6786 . -mat_type mpiaij - sets the matrix type to `MATMPIAIJ` during a call to `MatSetFromOptions()`
6787 
6788    Level: beginner
6789 
6790    Notes:
6791    `MatSetValues()` may be called for this matrix type with a `NULL` argument for the numerical values,
6792     in this case the values associated with the rows and columns one passes in are set to zero
6793     in the matrix
6794 
6795     `MatSetOptions`(,`MAT_STRUCTURE_ONLY`,`PETSC_TRUE`) may be called for this matrix type. In this no
6796     space is allocated for the nonzero entries and any entries passed with `MatSetValues()` are ignored
6797 
6798 .seealso: [](ch_matrices), `Mat`, `MATSEQAIJ`, `MATAIJ`, `MatCreateAIJ()`
6799 M*/
6800 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
6801 {
6802   Mat_MPIAIJ *b;
6803   PetscMPIInt size;
6804 
6805   PetscFunctionBegin;
6806   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size));
6807 
6808   PetscCall(PetscNew(&b));
6809   B->data       = (void *)b;
6810   B->ops[0]     = MatOps_Values;
6811   B->assembled  = PETSC_FALSE;
6812   B->insertmode = NOT_SET_VALUES;
6813   b->size       = size;
6814 
6815   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)B), &b->rank));
6816 
6817   /* build cache for off array entries formed */
6818   PetscCall(MatStashCreate_Private(PetscObjectComm((PetscObject)B), 1, &B->stash));
6819 
6820   b->donotstash  = PETSC_FALSE;
6821   b->colmap      = NULL;
6822   b->garray      = NULL;
6823   b->roworiented = PETSC_TRUE;
6824 
6825   /* stuff used for matrix vector multiply */
6826   b->lvec  = NULL;
6827   b->Mvctx = NULL;
6828 
6829   /* stuff for MatGetRow() */
6830   b->rowindices   = NULL;
6831   b->rowvalues    = NULL;
6832   b->getrowactive = PETSC_FALSE;
6833 
6834   /* flexible pointer used in CUSPARSE classes */
6835   b->spptr = NULL;
6836 
6837   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetUseScalableIncreaseOverlap_C", MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ));
6838   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatStoreValues_C", MatStoreValues_MPIAIJ));
6839   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatRetrieveValues_C", MatRetrieveValues_MPIAIJ));
6840   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatIsTranspose_C", MatIsTranspose_MPIAIJ));
6841   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocation_C", MatMPIAIJSetPreallocation_MPIAIJ));
6842   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatResetPreallocation_C", MatResetPreallocation_MPIAIJ));
6843   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocationCSR_C", MatMPIAIJSetPreallocationCSR_MPIAIJ));
6844   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatDiagonalScaleLocal_C", MatDiagonalScaleLocal_MPIAIJ));
6845   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijperm_C", MatConvert_MPIAIJ_MPIAIJPERM));
6846   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijsell_C", MatConvert_MPIAIJ_MPIAIJSELL));
6847 #if defined(PETSC_HAVE_CUDA)
6848   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijcusparse_C", MatConvert_MPIAIJ_MPIAIJCUSPARSE));
6849 #endif
6850 #if defined(PETSC_HAVE_HIP)
6851   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijhipsparse_C", MatConvert_MPIAIJ_MPIAIJHIPSPARSE));
6852 #endif
6853 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
6854   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijkokkos_C", MatConvert_MPIAIJ_MPIAIJKokkos));
6855 #endif
6856 #if defined(PETSC_HAVE_MKL_SPARSE)
6857   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijmkl_C", MatConvert_MPIAIJ_MPIAIJMKL));
6858 #endif
6859   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpiaijcrl_C", MatConvert_MPIAIJ_MPIAIJCRL));
6860   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpibaij_C", MatConvert_MPIAIJ_MPIBAIJ));
6861   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpisbaij_C", MatConvert_MPIAIJ_MPISBAIJ));
6862   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpidense_C", MatConvert_MPIAIJ_MPIDense));
6863 #if defined(PETSC_HAVE_ELEMENTAL)
6864   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_elemental_C", MatConvert_MPIAIJ_Elemental));
6865 #endif
6866 #if defined(PETSC_HAVE_SCALAPACK)
6867   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_scalapack_C", MatConvert_AIJ_ScaLAPACK));
6868 #endif
6869   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_is_C", MatConvert_XAIJ_IS));
6870   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_mpisell_C", MatConvert_MPIAIJ_MPISELL));
6871 #if defined(PETSC_HAVE_HYPRE)
6872   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpiaij_hypre_C", MatConvert_AIJ_HYPRE));
6873   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatProductSetFromOptions_transpose_mpiaij_mpiaij_C", MatProductSetFromOptions_Transpose_AIJ_AIJ));
6874 #endif
6875   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatProductSetFromOptions_is_mpiaij_C", MatProductSetFromOptions_IS_XAIJ));
6876   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatProductSetFromOptions_mpiaij_mpiaij_C", MatProductSetFromOptions_MPIAIJ));
6877   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatSetPreallocationCOO_C", MatSetPreallocationCOO_MPIAIJ));
6878   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatSetValuesCOO_C", MatSetValuesCOO_MPIAIJ));
6879   PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPIAIJ));
6880   PetscFunctionReturn(PETSC_SUCCESS);
6881 }
6882 
6883 /*@C
6884   MatCreateMPIAIJWithSplitArrays - creates a `MATMPIAIJ` matrix using arrays that contain the "diagonal"
6885   and "off-diagonal" part of the matrix in CSR format.
6886 
6887   Collective
6888 
6889   Input Parameters:
6890 + comm - MPI communicator
6891 . m    - number of local rows (Cannot be `PETSC_DECIDE`)
6892 . n    - This value should be the same as the local size used in creating the
6893        x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
6894        calculated if `N` is given) For square matrices `n` is almost always `m`.
6895 . M    - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
6896 . N    - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
6897 . i    - row indices for "diagonal" portion of matrix; that is i[0] = 0, i[row] = i[row-1] + number of elements in that row of the matrix
6898 . j    - column indices, which must be local, i.e., based off the start column of the diagonal portion
6899 . a    - matrix values
6900 . oi   - row indices for "off-diagonal" portion of matrix; that is oi[0] = 0, oi[row] = oi[row-1] + number of elements in that row of the matrix
6901 . oj   - column indices, which must be global, representing global columns in the `MATMPIAIJ` matrix
6902 - oa   - matrix values
6903 
6904   Output Parameter:
6905 . mat - the matrix
6906 
6907   Level: advanced
6908 
6909   Notes:
6910   The `i`, `j`, and `a` arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
6911   must free the arrays once the matrix has been destroyed and not before.
6912 
6913   The `i` and `j` indices are 0 based
6914 
6915   See `MatCreateAIJ()` for the definition of "diagonal" and "off-diagonal" portion of the matrix
6916 
6917   This sets local rows and cannot be used to set off-processor values.
6918 
6919   Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
6920   legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
6921   not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
6922   the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
6923   keep track of the underlying array. Use `MatSetOption`(A,`MAT_NO_OFF_PROC_ENTRIES`,`PETSC_TRUE`) to disable all
6924   communication if it is known that only local entries will be set.
6925 
6926 .seealso: [](ch_matrices), `Mat`, `MatCreate()`, `MatCreateSeqAIJ()`, `MatSetValues()`, `MatMPIAIJSetPreallocation()`, `MatMPIAIJSetPreallocationCSR()`,
6927           `MATMPIAIJ`, `MatCreateAIJ()`, `MatCreateMPIAIJWithArrays()`
6928 @*/
6929 PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt i[], PetscInt j[], PetscScalar a[], PetscInt oi[], PetscInt oj[], PetscScalar oa[], Mat *mat)
6930 {
6931   Mat_MPIAIJ *maij;
6932 
6933   PetscFunctionBegin;
6934   PetscCheck(m >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "local number of rows (m) cannot be PETSC_DECIDE, or negative");
6935   PetscCheck(i[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "i (row indices) must start with 0");
6936   PetscCheck(oi[0] == 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "oi (row indices) must start with 0");
6937   PetscCall(MatCreate(comm, mat));
6938   PetscCall(MatSetSizes(*mat, m, n, M, N));
6939   PetscCall(MatSetType(*mat, MATMPIAIJ));
6940   maij = (Mat_MPIAIJ *)(*mat)->data;
6941 
6942   (*mat)->preallocated = PETSC_TRUE;
6943 
6944   PetscCall(PetscLayoutSetUp((*mat)->rmap));
6945   PetscCall(PetscLayoutSetUp((*mat)->cmap));
6946 
6947   PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, n, i, j, a, &maij->A));
6948   PetscCall(MatCreateSeqAIJWithArrays(PETSC_COMM_SELF, m, (*mat)->cmap->N, oi, oj, oa, &maij->B));
6949 
6950   PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE));
6951   PetscCall(MatAssemblyBegin(*mat, MAT_FINAL_ASSEMBLY));
6952   PetscCall(MatAssemblyEnd(*mat, MAT_FINAL_ASSEMBLY));
6953   PetscCall(MatSetOption(*mat, MAT_NO_OFF_PROC_ENTRIES, PETSC_FALSE));
6954   PetscCall(MatSetOption(*mat, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE));
6955   PetscFunctionReturn(PETSC_SUCCESS);
6956 }
6957 
6958 typedef struct {
6959   Mat       *mp;    /* intermediate products */
6960   PetscBool *mptmp; /* is the intermediate product temporary ? */
6961   PetscInt   cp;    /* number of intermediate products */
6962 
6963   /* support for MatGetBrowsOfAoCols_MPIAIJ for P_oth */
6964   PetscInt    *startsj_s, *startsj_r;
6965   PetscScalar *bufa;
6966   Mat          P_oth;
6967 
6968   /* may take advantage of merging product->B */
6969   Mat Bloc; /* B-local by merging diag and off-diag */
6970 
6971   /* cusparse does not have support to split between symbolic and numeric phases.
6972      When api_user is true, we don't need to update the numerical values
6973      of the temporary storage */
6974   PetscBool reusesym;
6975 
6976   /* support for COO values insertion */
6977   PetscScalar *coo_v, *coo_w; /* store on-process and off-process COO scalars, and used as MPI recv/send buffers respectively */
6978   PetscInt   **own;           /* own[i] points to address of on-process COO indices for Mat mp[i] */
6979   PetscInt   **off;           /* off[i] points to address of off-process COO indices for Mat mp[i] */
6980   PetscBool    hasoffproc;    /* if true, have off-process values insertion (i.e. AtB or PtAP) */
6981   PetscSF      sf;            /* used for non-local values insertion and memory malloc */
6982   PetscMemType mtype;
6983 
6984   /* customization */
6985   PetscBool abmerge;
6986   PetscBool P_oth_bind;
6987 } MatMatMPIAIJBACKEND;
6988 
6989 static PetscErrorCode MatDestroy_MatMatMPIAIJBACKEND(void *data)
6990 {
6991   MatMatMPIAIJBACKEND *mmdata = (MatMatMPIAIJBACKEND *)data;
6992   PetscInt             i;
6993 
6994   PetscFunctionBegin;
6995   PetscCall(PetscFree2(mmdata->startsj_s, mmdata->startsj_r));
6996   PetscCall(PetscFree(mmdata->bufa));
6997   PetscCall(PetscSFFree(mmdata->sf, mmdata->mtype, mmdata->coo_v));
6998   PetscCall(PetscSFFree(mmdata->sf, mmdata->mtype, mmdata->coo_w));
6999   PetscCall(MatDestroy(&mmdata->P_oth));
7000   PetscCall(MatDestroy(&mmdata->Bloc));
7001   PetscCall(PetscSFDestroy(&mmdata->sf));
7002   for (i = 0; i < mmdata->cp; i++) PetscCall(MatDestroy(&mmdata->mp[i]));
7003   PetscCall(PetscFree2(mmdata->mp, mmdata->mptmp));
7004   PetscCall(PetscFree(mmdata->own[0]));
7005   PetscCall(PetscFree(mmdata->own));
7006   PetscCall(PetscFree(mmdata->off[0]));
7007   PetscCall(PetscFree(mmdata->off));
7008   PetscCall(PetscFree(mmdata));
7009   PetscFunctionReturn(PETSC_SUCCESS);
7010 }
7011 
7012 /* Copy selected n entries with indices in idx[] of A to v[].
7013    If idx is NULL, copy the whole data array of A to v[]
7014  */
7015 static PetscErrorCode MatSeqAIJCopySubArray(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[])
7016 {
7017   PetscErrorCode (*f)(Mat, PetscInt, const PetscInt[], PetscScalar[]);
7018 
7019   PetscFunctionBegin;
7020   PetscCall(PetscObjectQueryFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", &f));
7021   if (f) {
7022     PetscCall((*f)(A, n, idx, v));
7023   } else {
7024     const PetscScalar *vv;
7025 
7026     PetscCall(MatSeqAIJGetArrayRead(A, &vv));
7027     if (n && idx) {
7028       PetscScalar    *w  = v;
7029       const PetscInt *oi = idx;
7030       PetscInt        j;
7031 
7032       for (j = 0; j < n; j++) *w++ = vv[*oi++];
7033     } else {
7034       PetscCall(PetscArraycpy(v, vv, n));
7035     }
7036     PetscCall(MatSeqAIJRestoreArrayRead(A, &vv));
7037   }
7038   PetscFunctionReturn(PETSC_SUCCESS);
7039 }
7040 
7041 static PetscErrorCode MatProductNumeric_MPIAIJBACKEND(Mat C)
7042 {
7043   MatMatMPIAIJBACKEND *mmdata;
7044   PetscInt             i, n_d, n_o;
7045 
7046   PetscFunctionBegin;
7047   MatCheckProduct(C, 1);
7048   PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data empty");
7049   mmdata = (MatMatMPIAIJBACKEND *)C->product->data;
7050   if (!mmdata->reusesym) { /* update temporary matrices */
7051     if (mmdata->P_oth) PetscCall(MatGetBrowsOfAoCols_MPIAIJ(C->product->A, C->product->B, MAT_REUSE_MATRIX, &mmdata->startsj_s, &mmdata->startsj_r, &mmdata->bufa, &mmdata->P_oth));
7052     if (mmdata->Bloc) PetscCall(MatMPIAIJGetLocalMatMerge(C->product->B, MAT_REUSE_MATRIX, NULL, &mmdata->Bloc));
7053   }
7054   mmdata->reusesym = PETSC_FALSE;
7055 
7056   for (i = 0; i < mmdata->cp; i++) {
7057     PetscCheck(mmdata->mp[i]->ops->productnumeric, PetscObjectComm((PetscObject)mmdata->mp[i]), PETSC_ERR_PLIB, "Missing numeric op for %s", MatProductTypes[mmdata->mp[i]->product->type]);
7058     PetscCall((*mmdata->mp[i]->ops->productnumeric)(mmdata->mp[i]));
7059   }
7060   for (i = 0, n_d = 0, n_o = 0; i < mmdata->cp; i++) {
7061     PetscInt noff = mmdata->off[i + 1] - mmdata->off[i];
7062 
7063     if (mmdata->mptmp[i]) continue;
7064     if (noff) {
7065       PetscInt nown = mmdata->own[i + 1] - mmdata->own[i];
7066 
7067       PetscCall(MatSeqAIJCopySubArray(mmdata->mp[i], noff, mmdata->off[i], mmdata->coo_w + n_o));
7068       PetscCall(MatSeqAIJCopySubArray(mmdata->mp[i], nown, mmdata->own[i], mmdata->coo_v + n_d));
7069       n_o += noff;
7070       n_d += nown;
7071     } else {
7072       Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mmdata->mp[i]->data;
7073 
7074       PetscCall(MatSeqAIJCopySubArray(mmdata->mp[i], mm->nz, NULL, mmdata->coo_v + n_d));
7075       n_d += mm->nz;
7076     }
7077   }
7078   if (mmdata->hasoffproc) { /* offprocess insertion */
7079     PetscCall(PetscSFGatherBegin(mmdata->sf, MPIU_SCALAR, mmdata->coo_w, mmdata->coo_v + n_d));
7080     PetscCall(PetscSFGatherEnd(mmdata->sf, MPIU_SCALAR, mmdata->coo_w, mmdata->coo_v + n_d));
7081   }
7082   PetscCall(MatSetValuesCOO(C, mmdata->coo_v, INSERT_VALUES));
7083   PetscFunctionReturn(PETSC_SUCCESS);
7084 }
7085 
7086 /* Support for Pt * A, A * P, or Pt * A * P */
7087 #define MAX_NUMBER_INTERMEDIATE 4
7088 PetscErrorCode MatProductSymbolic_MPIAIJBACKEND(Mat C)
7089 {
7090   Mat_Product           *product = C->product;
7091   Mat                    A, P, mp[MAX_NUMBER_INTERMEDIATE]; /* A, P and a series of intermediate matrices */
7092   Mat_MPIAIJ            *a, *p;
7093   MatMatMPIAIJBACKEND   *mmdata;
7094   ISLocalToGlobalMapping P_oth_l2g = NULL;
7095   IS                     glob      = NULL;
7096   const char            *prefix;
7097   char                   pprefix[256];
7098   const PetscInt        *globidx, *P_oth_idx;
7099   PetscInt               i, j, cp, m, n, M, N, *coo_i, *coo_j;
7100   PetscCount             ncoo, ncoo_d, ncoo_o, ncoo_oown;
7101   PetscInt               cmapt[MAX_NUMBER_INTERMEDIATE], rmapt[MAX_NUMBER_INTERMEDIATE]; /* col/row map type for each Mat in mp[]. */
7102                                                                                          /* type-0: consecutive, start from 0; type-1: consecutive with */
7103                                                                                          /* a base offset; type-2: sparse with a local to global map table */
7104   const PetscInt *cmapa[MAX_NUMBER_INTERMEDIATE], *rmapa[MAX_NUMBER_INTERMEDIATE];       /* col/row local to global map array (table) for type-2 map type */
7105 
7106   MatProductType ptype;
7107   PetscBool      mptmp[MAX_NUMBER_INTERMEDIATE], hasoffproc = PETSC_FALSE, iscuda, iship, iskokk;
7108   PetscMPIInt    size;
7109 
7110   PetscFunctionBegin;
7111   MatCheckProduct(C, 1);
7112   PetscCheck(!product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Product data not empty");
7113   ptype = product->type;
7114   if (product->A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) {
7115     ptype                                          = MATPRODUCT_AB;
7116     product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE;
7117   }
7118   switch (ptype) {
7119   case MATPRODUCT_AB:
7120     A          = product->A;
7121     P          = product->B;
7122     m          = A->rmap->n;
7123     n          = P->cmap->n;
7124     M          = A->rmap->N;
7125     N          = P->cmap->N;
7126     hasoffproc = PETSC_FALSE; /* will not scatter mat product values to other processes */
7127     break;
7128   case MATPRODUCT_AtB:
7129     P          = product->A;
7130     A          = product->B;
7131     m          = P->cmap->n;
7132     n          = A->cmap->n;
7133     M          = P->cmap->N;
7134     N          = A->cmap->N;
7135     hasoffproc = PETSC_TRUE;
7136     break;
7137   case MATPRODUCT_PtAP:
7138     A          = product->A;
7139     P          = product->B;
7140     m          = P->cmap->n;
7141     n          = P->cmap->n;
7142     M          = P->cmap->N;
7143     N          = P->cmap->N;
7144     hasoffproc = PETSC_TRUE;
7145     break;
7146   default:
7147     SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Not for product type %s", MatProductTypes[ptype]);
7148   }
7149   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)C), &size));
7150   if (size == 1) hasoffproc = PETSC_FALSE;
7151 
7152   /* defaults */
7153   for (i = 0; i < MAX_NUMBER_INTERMEDIATE; i++) {
7154     mp[i]    = NULL;
7155     mptmp[i] = PETSC_FALSE;
7156     rmapt[i] = -1;
7157     cmapt[i] = -1;
7158     rmapa[i] = NULL;
7159     cmapa[i] = NULL;
7160   }
7161 
7162   /* customization */
7163   PetscCall(PetscNew(&mmdata));
7164   mmdata->reusesym = product->api_user;
7165   if (ptype == MATPRODUCT_AB) {
7166     if (product->api_user) {
7167       PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat");
7168       PetscCall(PetscOptionsBool("-matmatmult_backend_mergeB", "Merge product->B local matrices", "MatMatMult", mmdata->abmerge, &mmdata->abmerge, NULL));
7169       PetscCall(PetscOptionsBool("-matmatmult_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL));
7170       PetscOptionsEnd();
7171     } else {
7172       PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat");
7173       PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_mergeB", "Merge product->B local matrices", "MatMatMult", mmdata->abmerge, &mmdata->abmerge, NULL));
7174       PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL));
7175       PetscOptionsEnd();
7176     }
7177   } else if (ptype == MATPRODUCT_PtAP) {
7178     if (product->api_user) {
7179       PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat");
7180       PetscCall(PetscOptionsBool("-matptap_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL));
7181       PetscOptionsEnd();
7182     } else {
7183       PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat");
7184       PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_pothbind", "Bind P_oth to CPU", "MatBindToCPU", mmdata->P_oth_bind, &mmdata->P_oth_bind, NULL));
7185       PetscOptionsEnd();
7186     }
7187   }
7188   a = (Mat_MPIAIJ *)A->data;
7189   p = (Mat_MPIAIJ *)P->data;
7190   PetscCall(MatSetSizes(C, m, n, M, N));
7191   PetscCall(PetscLayoutSetUp(C->rmap));
7192   PetscCall(PetscLayoutSetUp(C->cmap));
7193   PetscCall(MatSetType(C, ((PetscObject)A)->type_name));
7194   PetscCall(MatGetOptionsPrefix(C, &prefix));
7195 
7196   cp = 0;
7197   switch (ptype) {
7198   case MATPRODUCT_AB: /* A * P */
7199     PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &mmdata->startsj_s, &mmdata->startsj_r, &mmdata->bufa, &mmdata->P_oth));
7200 
7201     /* A_diag * P_local (merged or not) */
7202     if (mmdata->abmerge) { /* P's diagonal and off-diag blocks are merged to one matrix, then multiplied by A_diag */
7203       /* P is product->B */
7204       PetscCall(MatMPIAIJGetLocalMatMerge(P, MAT_INITIAL_MATRIX, &glob, &mmdata->Bloc));
7205       PetscCall(MatProductCreate(a->A, mmdata->Bloc, NULL, &mp[cp]));
7206       PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7207       PetscCall(MatProductSetFill(mp[cp], product->fill));
7208       PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7209       PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7210       PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7211       mp[cp]->product->api_user = product->api_user;
7212       PetscCall(MatProductSetFromOptions(mp[cp]));
7213       PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7214       PetscCall(ISGetIndices(glob, &globidx));
7215       rmapt[cp] = 1;
7216       cmapt[cp] = 2;
7217       cmapa[cp] = globidx;
7218       mptmp[cp] = PETSC_FALSE;
7219       cp++;
7220     } else { /* A_diag * P_diag and A_diag * P_off */
7221       PetscCall(MatProductCreate(a->A, p->A, NULL, &mp[cp]));
7222       PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7223       PetscCall(MatProductSetFill(mp[cp], product->fill));
7224       PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7225       PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7226       PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7227       mp[cp]->product->api_user = product->api_user;
7228       PetscCall(MatProductSetFromOptions(mp[cp]));
7229       PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7230       rmapt[cp] = 1;
7231       cmapt[cp] = 1;
7232       mptmp[cp] = PETSC_FALSE;
7233       cp++;
7234       PetscCall(MatProductCreate(a->A, p->B, NULL, &mp[cp]));
7235       PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7236       PetscCall(MatProductSetFill(mp[cp], product->fill));
7237       PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7238       PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7239       PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7240       mp[cp]->product->api_user = product->api_user;
7241       PetscCall(MatProductSetFromOptions(mp[cp]));
7242       PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7243       rmapt[cp] = 1;
7244       cmapt[cp] = 2;
7245       cmapa[cp] = p->garray;
7246       mptmp[cp] = PETSC_FALSE;
7247       cp++;
7248     }
7249 
7250     /* A_off * P_other */
7251     if (mmdata->P_oth) {
7252       PetscCall(MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth, &P_oth_l2g)); /* make P_oth use local col ids */
7253       PetscCall(ISLocalToGlobalMappingGetIndices(P_oth_l2g, &P_oth_idx));
7254       PetscCall(MatSetType(mmdata->P_oth, ((PetscObject)(a->B))->type_name));
7255       PetscCall(MatBindToCPU(mmdata->P_oth, mmdata->P_oth_bind));
7256       PetscCall(MatProductCreate(a->B, mmdata->P_oth, NULL, &mp[cp]));
7257       PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7258       PetscCall(MatProductSetFill(mp[cp], product->fill));
7259       PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7260       PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7261       PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7262       mp[cp]->product->api_user = product->api_user;
7263       PetscCall(MatProductSetFromOptions(mp[cp]));
7264       PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7265       rmapt[cp] = 1;
7266       cmapt[cp] = 2;
7267       cmapa[cp] = P_oth_idx;
7268       mptmp[cp] = PETSC_FALSE;
7269       cp++;
7270     }
7271     break;
7272 
7273   case MATPRODUCT_AtB: /* (P^t * A): P_diag * A_loc + P_off * A_loc */
7274     /* A is product->B */
7275     PetscCall(MatMPIAIJGetLocalMatMerge(A, MAT_INITIAL_MATRIX, &glob, &mmdata->Bloc));
7276     if (A == P) { /* when A==P, we can take advantage of the already merged mmdata->Bloc */
7277       PetscCall(MatProductCreate(mmdata->Bloc, mmdata->Bloc, NULL, &mp[cp]));
7278       PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB));
7279       PetscCall(MatProductSetFill(mp[cp], product->fill));
7280       PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7281       PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7282       PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7283       mp[cp]->product->api_user = product->api_user;
7284       PetscCall(MatProductSetFromOptions(mp[cp]));
7285       PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7286       PetscCall(ISGetIndices(glob, &globidx));
7287       rmapt[cp] = 2;
7288       rmapa[cp] = globidx;
7289       cmapt[cp] = 2;
7290       cmapa[cp] = globidx;
7291       mptmp[cp] = PETSC_FALSE;
7292       cp++;
7293     } else {
7294       PetscCall(MatProductCreate(p->A, mmdata->Bloc, NULL, &mp[cp]));
7295       PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB));
7296       PetscCall(MatProductSetFill(mp[cp], product->fill));
7297       PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7298       PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7299       PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7300       mp[cp]->product->api_user = product->api_user;
7301       PetscCall(MatProductSetFromOptions(mp[cp]));
7302       PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7303       PetscCall(ISGetIndices(glob, &globidx));
7304       rmapt[cp] = 1;
7305       cmapt[cp] = 2;
7306       cmapa[cp] = globidx;
7307       mptmp[cp] = PETSC_FALSE;
7308       cp++;
7309       PetscCall(MatProductCreate(p->B, mmdata->Bloc, NULL, &mp[cp]));
7310       PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB));
7311       PetscCall(MatProductSetFill(mp[cp], product->fill));
7312       PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7313       PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7314       PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7315       mp[cp]->product->api_user = product->api_user;
7316       PetscCall(MatProductSetFromOptions(mp[cp]));
7317       PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7318       rmapt[cp] = 2;
7319       rmapa[cp] = p->garray;
7320       cmapt[cp] = 2;
7321       cmapa[cp] = globidx;
7322       mptmp[cp] = PETSC_FALSE;
7323       cp++;
7324     }
7325     break;
7326   case MATPRODUCT_PtAP:
7327     PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &mmdata->startsj_s, &mmdata->startsj_r, &mmdata->bufa, &mmdata->P_oth));
7328     /* P is product->B */
7329     PetscCall(MatMPIAIJGetLocalMatMerge(P, MAT_INITIAL_MATRIX, &glob, &mmdata->Bloc));
7330     PetscCall(MatProductCreate(a->A, mmdata->Bloc, NULL, &mp[cp]));
7331     PetscCall(MatProductSetType(mp[cp], MATPRODUCT_PtAP));
7332     PetscCall(MatProductSetFill(mp[cp], product->fill));
7333     PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7334     PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7335     PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7336     mp[cp]->product->api_user = product->api_user;
7337     PetscCall(MatProductSetFromOptions(mp[cp]));
7338     PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7339     PetscCall(ISGetIndices(glob, &globidx));
7340     rmapt[cp] = 2;
7341     rmapa[cp] = globidx;
7342     cmapt[cp] = 2;
7343     cmapa[cp] = globidx;
7344     mptmp[cp] = PETSC_FALSE;
7345     cp++;
7346     if (mmdata->P_oth) {
7347       PetscCall(MatSeqAIJCompactOutExtraColumns_SeqAIJ(mmdata->P_oth, &P_oth_l2g));
7348       PetscCall(ISLocalToGlobalMappingGetIndices(P_oth_l2g, &P_oth_idx));
7349       PetscCall(MatSetType(mmdata->P_oth, ((PetscObject)(a->B))->type_name));
7350       PetscCall(MatBindToCPU(mmdata->P_oth, mmdata->P_oth_bind));
7351       PetscCall(MatProductCreate(a->B, mmdata->P_oth, NULL, &mp[cp]));
7352       PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AB));
7353       PetscCall(MatProductSetFill(mp[cp], product->fill));
7354       PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7355       PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7356       PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7357       mp[cp]->product->api_user = product->api_user;
7358       PetscCall(MatProductSetFromOptions(mp[cp]));
7359       PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7360       mptmp[cp] = PETSC_TRUE;
7361       cp++;
7362       PetscCall(MatProductCreate(mmdata->Bloc, mp[1], NULL, &mp[cp]));
7363       PetscCall(MatProductSetType(mp[cp], MATPRODUCT_AtB));
7364       PetscCall(MatProductSetFill(mp[cp], product->fill));
7365       PetscCall(PetscSNPrintf(pprefix, sizeof(pprefix), "backend_p%" PetscInt_FMT "_", cp));
7366       PetscCall(MatSetOptionsPrefix(mp[cp], prefix));
7367       PetscCall(MatAppendOptionsPrefix(mp[cp], pprefix));
7368       mp[cp]->product->api_user = product->api_user;
7369       PetscCall(MatProductSetFromOptions(mp[cp]));
7370       PetscCall((*mp[cp]->ops->productsymbolic)(mp[cp]));
7371       rmapt[cp] = 2;
7372       rmapa[cp] = globidx;
7373       cmapt[cp] = 2;
7374       cmapa[cp] = P_oth_idx;
7375       mptmp[cp] = PETSC_FALSE;
7376       cp++;
7377     }
7378     break;
7379   default:
7380     SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Not for product type %s", MatProductTypes[ptype]);
7381   }
7382   /* sanity check */
7383   if (size > 1)
7384     for (i = 0; i < cp; i++) PetscCheck(rmapt[i] != 2 || hasoffproc, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unexpected offproc map type for product %" PetscInt_FMT, i);
7385 
7386   PetscCall(PetscMalloc2(cp, &mmdata->mp, cp, &mmdata->mptmp));
7387   for (i = 0; i < cp; i++) {
7388     mmdata->mp[i]    = mp[i];
7389     mmdata->mptmp[i] = mptmp[i];
7390   }
7391   mmdata->cp             = cp;
7392   C->product->data       = mmdata;
7393   C->product->destroy    = MatDestroy_MatMatMPIAIJBACKEND;
7394   C->ops->productnumeric = MatProductNumeric_MPIAIJBACKEND;
7395 
7396   /* memory type */
7397   mmdata->mtype = PETSC_MEMTYPE_HOST;
7398   PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &iscuda, MATSEQAIJCUSPARSE, MATMPIAIJCUSPARSE, ""));
7399   PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &iship, MATSEQAIJHIPSPARSE, MATMPIAIJHIPSPARSE, ""));
7400   PetscCall(PetscObjectTypeCompareAny((PetscObject)C, &iskokk, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, ""));
7401   if (iscuda) mmdata->mtype = PETSC_MEMTYPE_CUDA;
7402   else if (iship) mmdata->mtype = PETSC_MEMTYPE_HIP;
7403   else if (iskokk) mmdata->mtype = PETSC_MEMTYPE_KOKKOS;
7404 
7405   /* prepare coo coordinates for values insertion */
7406 
7407   /* count total nonzeros of those intermediate seqaij Mats
7408     ncoo_d:    # of nonzeros of matrices that do not have offproc entries
7409     ncoo_o:    # of nonzeros (of matrices that might have offproc entries) that will be inserted to remote procs
7410     ncoo_oown: # of nonzeros (of matrices that might have offproc entries) that will be inserted locally
7411   */
7412   for (cp = 0, ncoo_d = 0, ncoo_o = 0, ncoo_oown = 0; cp < mmdata->cp; cp++) {
7413     Mat_SeqAIJ *mm = (Mat_SeqAIJ *)mp[cp]->data;
7414     if (mptmp[cp]) continue;
7415     if (rmapt[cp] == 2 && hasoffproc) { /* the rows need to be scatter to all processes (might include self) */
7416       const PetscInt *rmap = rmapa[cp];
7417       const PetscInt  mr   = mp[cp]->rmap->n;
7418       const PetscInt  rs   = C->rmap->rstart;
7419       const PetscInt  re   = C->rmap->rend;
7420       const PetscInt *ii   = mm->i;
7421       for (i = 0; i < mr; i++) {
7422         const PetscInt gr = rmap[i];
7423         const PetscInt nz = ii[i + 1] - ii[i];
7424         if (gr < rs || gr >= re) ncoo_o += nz; /* this row is offproc */
7425         else ncoo_oown += nz;                  /* this row is local */
7426       }
7427     } else ncoo_d += mm->nz;
7428   }
7429 
7430   /*
7431     ncoo: total number of nonzeros (including those inserted by remote procs) belonging to this proc
7432 
7433     ncoo = ncoo_d + ncoo_oown + ncoo2, which ncoo2 is number of nonzeros inserted to me by other procs.
7434 
7435     off[0] points to a big index array, which is shared by off[1,2,...]. Similarly, for own[0].
7436 
7437     off[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert to others
7438     own[p]: points to the segment for matrix mp[p], storing location of nonzeros that mp[p] will insert locally
7439     so, off[p+1]-off[p] is the number of nonzeros that mp[p] will send to others.
7440 
7441     coo_i/j/v[]: [ncoo] row/col/val of nonzeros belonging to this proc.
7442     Ex. coo_i[]: the beginning part (of size ncoo_d + ncoo_oown) stores i of local nonzeros, and the remaining part stores i of nonzeros I will receive.
7443   */
7444   PetscCall(PetscCalloc1(mmdata->cp + 1, &mmdata->off)); /* +1 to make a csr-like data structure */
7445   PetscCall(PetscCalloc1(mmdata->cp + 1, &mmdata->own));
7446 
7447   /* gather (i,j) of nonzeros inserted by remote procs */
7448   if (hasoffproc) {
7449     PetscSF  msf;
7450     PetscInt ncoo2, *coo_i2, *coo_j2;
7451 
7452     PetscCall(PetscMalloc1(ncoo_o, &mmdata->off[0]));
7453     PetscCall(PetscMalloc1(ncoo_oown, &mmdata->own[0]));
7454     PetscCall(PetscMalloc2(ncoo_o, &coo_i, ncoo_o, &coo_j)); /* to collect (i,j) of entries to be sent to others */
7455 
7456     for (cp = 0, ncoo_o = 0; cp < mmdata->cp; cp++) {
7457       Mat_SeqAIJ *mm     = (Mat_SeqAIJ *)mp[cp]->data;
7458       PetscInt   *idxoff = mmdata->off[cp];
7459       PetscInt   *idxown = mmdata->own[cp];
7460       if (!mptmp[cp] && rmapt[cp] == 2) { /* row map is sparse */
7461         const PetscInt *rmap = rmapa[cp];
7462         const PetscInt *cmap = cmapa[cp];
7463         const PetscInt *ii   = mm->i;
7464         PetscInt       *coi  = coo_i + ncoo_o;
7465         PetscInt       *coj  = coo_j + ncoo_o;
7466         const PetscInt  mr   = mp[cp]->rmap->n;
7467         const PetscInt  rs   = C->rmap->rstart;
7468         const PetscInt  re   = C->rmap->rend;
7469         const PetscInt  cs   = C->cmap->rstart;
7470         for (i = 0; i < mr; i++) {
7471           const PetscInt *jj = mm->j + ii[i];
7472           const PetscInt  gr = rmap[i];
7473           const PetscInt  nz = ii[i + 1] - ii[i];
7474           if (gr < rs || gr >= re) { /* this is an offproc row */
7475             for (j = ii[i]; j < ii[i + 1]; j++) {
7476               *coi++    = gr;
7477               *idxoff++ = j;
7478             }
7479             if (!cmapt[cp]) { /* already global */
7480               for (j = 0; j < nz; j++) *coj++ = jj[j];
7481             } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
7482               for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
7483             } else { /* offdiag */
7484               for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
7485             }
7486             ncoo_o += nz;
7487           } else { /* this is a local row */
7488             for (j = ii[i]; j < ii[i + 1]; j++) *idxown++ = j;
7489           }
7490         }
7491       }
7492       mmdata->off[cp + 1] = idxoff;
7493       mmdata->own[cp + 1] = idxown;
7494     }
7495 
7496     PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)C), &mmdata->sf));
7497     PetscCall(PetscSFSetGraphLayout(mmdata->sf, C->rmap, ncoo_o /*nleaves*/, NULL /*ilocal*/, PETSC_OWN_POINTER, coo_i));
7498     PetscCall(PetscSFGetMultiSF(mmdata->sf, &msf));
7499     PetscCall(PetscSFGetGraph(msf, &ncoo2 /*nroots*/, NULL, NULL, NULL));
7500     ncoo = ncoo_d + ncoo_oown + ncoo2;
7501     PetscCall(PetscMalloc2(ncoo, &coo_i2, ncoo, &coo_j2));
7502     PetscCall(PetscSFGatherBegin(mmdata->sf, MPIU_INT, coo_i, coo_i2 + ncoo_d + ncoo_oown)); /* put (i,j) of remote nonzeros at back */
7503     PetscCall(PetscSFGatherEnd(mmdata->sf, MPIU_INT, coo_i, coo_i2 + ncoo_d + ncoo_oown));
7504     PetscCall(PetscSFGatherBegin(mmdata->sf, MPIU_INT, coo_j, coo_j2 + ncoo_d + ncoo_oown));
7505     PetscCall(PetscSFGatherEnd(mmdata->sf, MPIU_INT, coo_j, coo_j2 + ncoo_d + ncoo_oown));
7506     PetscCall(PetscFree2(coo_i, coo_j));
7507     /* allocate MPI send buffer to collect nonzero values to be sent to remote procs */
7508     PetscCall(PetscSFMalloc(mmdata->sf, mmdata->mtype, ncoo_o * sizeof(PetscScalar), (void **)&mmdata->coo_w));
7509     coo_i = coo_i2;
7510     coo_j = coo_j2;
7511   } else { /* no offproc values insertion */
7512     ncoo = ncoo_d;
7513     PetscCall(PetscMalloc2(ncoo, &coo_i, ncoo, &coo_j));
7514 
7515     PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)C), &mmdata->sf));
7516     PetscCall(PetscSFSetGraph(mmdata->sf, 0, 0, NULL, PETSC_OWN_POINTER, NULL, PETSC_OWN_POINTER));
7517     PetscCall(PetscSFSetUp(mmdata->sf));
7518   }
7519   mmdata->hasoffproc = hasoffproc;
7520 
7521   /* gather (i,j) of nonzeros inserted locally */
7522   for (cp = 0, ncoo_d = 0; cp < mmdata->cp; cp++) {
7523     Mat_SeqAIJ     *mm   = (Mat_SeqAIJ *)mp[cp]->data;
7524     PetscInt       *coi  = coo_i + ncoo_d;
7525     PetscInt       *coj  = coo_j + ncoo_d;
7526     const PetscInt *jj   = mm->j;
7527     const PetscInt *ii   = mm->i;
7528     const PetscInt *cmap = cmapa[cp];
7529     const PetscInt *rmap = rmapa[cp];
7530     const PetscInt  mr   = mp[cp]->rmap->n;
7531     const PetscInt  rs   = C->rmap->rstart;
7532     const PetscInt  re   = C->rmap->rend;
7533     const PetscInt  cs   = C->cmap->rstart;
7534 
7535     if (mptmp[cp]) continue;
7536     if (rmapt[cp] == 1) { /* consecutive rows */
7537       /* fill coo_i */
7538       for (i = 0; i < mr; i++) {
7539         const PetscInt gr = i + rs;
7540         for (j = ii[i]; j < ii[i + 1]; j++) coi[j] = gr;
7541       }
7542       /* fill coo_j */
7543       if (!cmapt[cp]) { /* type-0, already global */
7544         PetscCall(PetscArraycpy(coj, jj, mm->nz));
7545       } else if (cmapt[cp] == 1) {                        /* type-1, local to global for consecutive columns of C */
7546         for (j = 0; j < mm->nz; j++) coj[j] = jj[j] + cs; /* lid + col start */
7547       } else {                                            /* type-2, local to global for sparse columns */
7548         for (j = 0; j < mm->nz; j++) coj[j] = cmap[jj[j]];
7549       }
7550       ncoo_d += mm->nz;
7551     } else if (rmapt[cp] == 2) { /* sparse rows */
7552       for (i = 0; i < mr; i++) {
7553         const PetscInt *jj = mm->j + ii[i];
7554         const PetscInt  gr = rmap[i];
7555         const PetscInt  nz = ii[i + 1] - ii[i];
7556         if (gr >= rs && gr < re) { /* local rows */
7557           for (j = ii[i]; j < ii[i + 1]; j++) *coi++ = gr;
7558           if (!cmapt[cp]) { /* type-0, already global */
7559             for (j = 0; j < nz; j++) *coj++ = jj[j];
7560           } else if (cmapt[cp] == 1) { /* local to global for owned columns of C */
7561             for (j = 0; j < nz; j++) *coj++ = jj[j] + cs;
7562           } else { /* type-2, local to global for sparse columns */
7563             for (j = 0; j < nz; j++) *coj++ = cmap[jj[j]];
7564           }
7565           ncoo_d += nz;
7566         }
7567       }
7568     }
7569   }
7570   if (glob) PetscCall(ISRestoreIndices(glob, &globidx));
7571   PetscCall(ISDestroy(&glob));
7572   if (P_oth_l2g) PetscCall(ISLocalToGlobalMappingRestoreIndices(P_oth_l2g, &P_oth_idx));
7573   PetscCall(ISLocalToGlobalMappingDestroy(&P_oth_l2g));
7574   /* allocate an array to store all nonzeros (inserted locally or remotely) belonging to this proc */
7575   PetscCall(PetscSFMalloc(mmdata->sf, mmdata->mtype, ncoo * sizeof(PetscScalar), (void **)&mmdata->coo_v));
7576 
7577   /* preallocate with COO data */
7578   PetscCall(MatSetPreallocationCOO(C, ncoo, coo_i, coo_j));
7579   PetscCall(PetscFree2(coo_i, coo_j));
7580   PetscFunctionReturn(PETSC_SUCCESS);
7581 }
7582 
7583 PetscErrorCode MatProductSetFromOptions_MPIAIJBACKEND(Mat mat)
7584 {
7585   Mat_Product *product = mat->product;
7586 #if defined(PETSC_HAVE_DEVICE)
7587   PetscBool match  = PETSC_FALSE;
7588   PetscBool usecpu = PETSC_FALSE;
7589 #else
7590   PetscBool match = PETSC_TRUE;
7591 #endif
7592 
7593   PetscFunctionBegin;
7594   MatCheckProduct(mat, 1);
7595 #if defined(PETSC_HAVE_DEVICE)
7596   if (!product->A->boundtocpu && !product->B->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->B, ((PetscObject)product->A)->type_name, &match));
7597   if (match) { /* we can always fallback to the CPU if requested */
7598     switch (product->type) {
7599     case MATPRODUCT_AB:
7600       if (product->api_user) {
7601         PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMult", "Mat");
7602         PetscCall(PetscOptionsBool("-matmatmult_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL));
7603         PetscOptionsEnd();
7604       } else {
7605         PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AB", "Mat");
7606         PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL));
7607         PetscOptionsEnd();
7608       }
7609       break;
7610     case MATPRODUCT_AtB:
7611       if (product->api_user) {
7612         PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatTransposeMatMult", "Mat");
7613         PetscCall(PetscOptionsBool("-mattransposematmult_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL));
7614         PetscOptionsEnd();
7615       } else {
7616         PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AtB", "Mat");
7617         PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL));
7618         PetscOptionsEnd();
7619       }
7620       break;
7621     case MATPRODUCT_PtAP:
7622       if (product->api_user) {
7623         PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatPtAP", "Mat");
7624         PetscCall(PetscOptionsBool("-matptap_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL));
7625         PetscOptionsEnd();
7626       } else {
7627         PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_PtAP", "Mat");
7628         PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL));
7629         PetscOptionsEnd();
7630       }
7631       break;
7632     default:
7633       break;
7634     }
7635     match = (PetscBool)!usecpu;
7636   }
7637 #endif
7638   if (match) {
7639     switch (product->type) {
7640     case MATPRODUCT_AB:
7641     case MATPRODUCT_AtB:
7642     case MATPRODUCT_PtAP:
7643       mat->ops->productsymbolic = MatProductSymbolic_MPIAIJBACKEND;
7644       break;
7645     default:
7646       break;
7647     }
7648   }
7649   /* fallback to MPIAIJ ops */
7650   if (!mat->ops->productsymbolic) PetscCall(MatProductSetFromOptions_MPIAIJ(mat));
7651   PetscFunctionReturn(PETSC_SUCCESS);
7652 }
7653 
7654 /*
7655    Produces a set of block column indices of the matrix row, one for each block represented in the original row
7656 
7657    n - the number of block indices in cc[]
7658    cc - the block indices (must be large enough to contain the indices)
7659 */
7660 static inline PetscErrorCode MatCollapseRow(Mat Amat, PetscInt row, PetscInt bs, PetscInt *n, PetscInt *cc)
7661 {
7662   PetscInt        cnt = -1, nidx, j;
7663   const PetscInt *idx;
7664 
7665   PetscFunctionBegin;
7666   PetscCall(MatGetRow(Amat, row, &nidx, &idx, NULL));
7667   if (nidx) {
7668     cnt     = 0;
7669     cc[cnt] = idx[0] / bs;
7670     for (j = 1; j < nidx; j++) {
7671       if (cc[cnt] < idx[j] / bs) cc[++cnt] = idx[j] / bs;
7672     }
7673   }
7674   PetscCall(MatRestoreRow(Amat, row, &nidx, &idx, NULL));
7675   *n = cnt + 1;
7676   PetscFunctionReturn(PETSC_SUCCESS);
7677 }
7678 
7679 /*
7680     Produces a set of block column indices of the matrix block row, one for each block represented in the original set of rows
7681 
7682     ncollapsed - the number of block indices
7683     collapsed - the block indices (must be large enough to contain the indices)
7684 */
7685 static inline PetscErrorCode MatCollapseRows(Mat Amat, PetscInt start, PetscInt bs, PetscInt *w0, PetscInt *w1, PetscInt *w2, PetscInt *ncollapsed, PetscInt **collapsed)
7686 {
7687   PetscInt i, nprev, *cprev = w0, ncur = 0, *ccur = w1, *merged = w2, *cprevtmp;
7688 
7689   PetscFunctionBegin;
7690   PetscCall(MatCollapseRow(Amat, start, bs, &nprev, cprev));
7691   for (i = start + 1; i < start + bs; i++) {
7692     PetscCall(MatCollapseRow(Amat, i, bs, &ncur, ccur));
7693     PetscCall(PetscMergeIntArray(nprev, cprev, ncur, ccur, &nprev, &merged));
7694     cprevtmp = cprev;
7695     cprev    = merged;
7696     merged   = cprevtmp;
7697   }
7698   *ncollapsed = nprev;
7699   if (collapsed) *collapsed = cprev;
7700   PetscFunctionReturn(PETSC_SUCCESS);
7701 }
7702 
7703 /*
7704  MatCreateGraph_Simple_AIJ - create simple scalar matrix (graph) from potentially blocked matrix
7705 
7706  Input Parameter:
7707  . Amat - matrix
7708  - symmetrize - make the result symmetric
7709  + scale - scale with diagonal
7710 
7711  Output Parameter:
7712  . a_Gmat - output scalar graph >= 0
7713 
7714 */
7715 PETSC_INTERN PetscErrorCode MatCreateGraph_Simple_AIJ(Mat Amat, PetscBool symmetrize, PetscBool scale, PetscReal filter, Mat *a_Gmat)
7716 {
7717   PetscInt  Istart, Iend, Ii, jj, kk, ncols, nloc, NN, MM, bs;
7718   MPI_Comm  comm;
7719   Mat       Gmat;
7720   PetscBool ismpiaij, isseqaij;
7721   Mat       a, b, c;
7722   MatType   jtype;
7723 
7724   PetscFunctionBegin;
7725   PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm));
7726   PetscCall(MatGetOwnershipRange(Amat, &Istart, &Iend));
7727   PetscCall(MatGetSize(Amat, &MM, &NN));
7728   PetscCall(MatGetBlockSize(Amat, &bs));
7729   nloc = (Iend - Istart) / bs;
7730 
7731   PetscCall(PetscObjectBaseTypeCompare((PetscObject)Amat, MATSEQAIJ, &isseqaij));
7732   PetscCall(PetscObjectBaseTypeCompare((PetscObject)Amat, MATMPIAIJ, &ismpiaij));
7733   PetscCheck(isseqaij || ismpiaij, comm, PETSC_ERR_USER, "Require (MPI)AIJ matrix type");
7734 
7735   /* TODO GPU: these calls are potentially expensive if matrices are large and we want to use the GPU */
7736   /* A solution consists in providing a new API, MatAIJGetCollapsedAIJ, and each class can provide a fast
7737      implementation */
7738   if (bs > 1) {
7739     PetscCall(MatGetType(Amat, &jtype));
7740     PetscCall(MatCreate(comm, &Gmat));
7741     PetscCall(MatSetType(Gmat, jtype));
7742     PetscCall(MatSetSizes(Gmat, nloc, nloc, PETSC_DETERMINE, PETSC_DETERMINE));
7743     PetscCall(MatSetBlockSizes(Gmat, 1, 1));
7744     if (isseqaij || ((Mat_MPIAIJ *)Amat->data)->garray) {
7745       PetscInt  *d_nnz, *o_nnz;
7746       MatScalar *aa, val, *AA;
7747       PetscInt  *aj, *ai, *AJ, nc, nmax = 0;
7748       if (isseqaij) {
7749         a = Amat;
7750         b = NULL;
7751       } else {
7752         Mat_MPIAIJ *d = (Mat_MPIAIJ *)Amat->data;
7753         a             = d->A;
7754         b             = d->B;
7755       }
7756       PetscCall(PetscInfo(Amat, "New bs>1 Graph. nloc=%" PetscInt_FMT "\n", nloc));
7757       PetscCall(PetscMalloc2(nloc, &d_nnz, isseqaij ? 0 : nloc, &o_nnz));
7758       for (c = a, kk = 0; c && kk < 2; c = b, kk++) {
7759         PetscInt       *nnz = (c == a) ? d_nnz : o_nnz;
7760         const PetscInt *cols1, *cols2;
7761         for (PetscInt brow = 0, nc1, nc2, ok = 1; brow < nloc * bs; brow += bs) { // block rows
7762           PetscCall(MatGetRow(c, brow, &nc2, &cols2, NULL));
7763           nnz[brow / bs] = nc2 / bs;
7764           if (nc2 % bs) ok = 0;
7765           if (nnz[brow / bs] > nmax) nmax = nnz[brow / bs];
7766           for (PetscInt ii = 1; ii < bs; ii++) { // check for non-dense blocks
7767             PetscCall(MatGetRow(c, brow + ii, &nc1, &cols1, NULL));
7768             if (nc1 != nc2) ok = 0;
7769             else {
7770               for (PetscInt jj = 0; jj < nc1 && ok == 1; jj++) {
7771                 if (cols1[jj] != cols2[jj]) ok = 0;
7772                 if (cols1[jj] % bs != jj % bs) ok = 0;
7773               }
7774             }
7775             PetscCall(MatRestoreRow(c, brow + ii, &nc1, &cols1, NULL));
7776           }
7777           PetscCall(MatRestoreRow(c, brow, &nc2, &cols2, NULL));
7778           if (!ok) {
7779             PetscCall(PetscFree2(d_nnz, o_nnz));
7780             PetscCall(PetscInfo(Amat, "Found sparse blocks - revert to slow method\n"));
7781             goto old_bs;
7782           }
7783         }
7784       }
7785       PetscCall(MatSeqAIJSetPreallocation(Gmat, 0, d_nnz));
7786       PetscCall(MatMPIAIJSetPreallocation(Gmat, 0, d_nnz, 0, o_nnz));
7787       PetscCall(PetscFree2(d_nnz, o_nnz));
7788       PetscCall(PetscMalloc2(nmax, &AA, nmax, &AJ));
7789       // diag
7790       for (PetscInt brow = 0, n, grow; brow < nloc * bs; brow += bs) { // block rows
7791         Mat_SeqAIJ *aseq = (Mat_SeqAIJ *)a->data;
7792         ai               = aseq->i;
7793         n                = ai[brow + 1] - ai[brow];
7794         aj               = aseq->j + ai[brow];
7795         for (int k = 0; k < n; k += bs) {        // block columns
7796           AJ[k / bs] = aj[k] / bs + Istart / bs; // diag starts at (Istart,Istart)
7797           val        = 0;
7798           for (int ii = 0; ii < bs; ii++) { // rows in block
7799             aa = aseq->a + ai[brow + ii] + k;
7800             for (int jj = 0; jj < bs; jj++) {         // columns in block
7801               val += PetscAbs(PetscRealPart(aa[jj])); // a sort of norm
7802             }
7803           }
7804           PetscAssert(k / bs < nmax, comm, PETSC_ERR_USER, "k / bs (%d) >= nmax (%d)", (int)(k / bs), (int)nmax);
7805           AA[k / bs] = val;
7806         }
7807         grow = Istart / bs + brow / bs;
7808         PetscCall(MatSetValues(Gmat, 1, &grow, n / bs, AJ, AA, INSERT_VALUES));
7809       }
7810       // off-diag
7811       if (ismpiaij) {
7812         Mat_MPIAIJ        *aij = (Mat_MPIAIJ *)Amat->data;
7813         const PetscScalar *vals;
7814         const PetscInt    *cols, *garray = aij->garray;
7815         PetscCheck(garray, PETSC_COMM_SELF, PETSC_ERR_USER, "No garray ?");
7816         for (PetscInt brow = 0, grow; brow < nloc * bs; brow += bs) { // block rows
7817           PetscCall(MatGetRow(b, brow, &ncols, &cols, NULL));
7818           for (int k = 0, cidx = 0; k < ncols; k += bs, cidx++) {
7819             PetscAssert(k / bs < nmax, comm, PETSC_ERR_USER, "k / bs >= nmax");
7820             AA[k / bs] = 0;
7821             AJ[cidx]   = garray[cols[k]] / bs;
7822           }
7823           nc = ncols / bs;
7824           PetscCall(MatRestoreRow(b, brow, &ncols, &cols, NULL));
7825           for (int ii = 0; ii < bs; ii++) { // rows in block
7826             PetscCall(MatGetRow(b, brow + ii, &ncols, &cols, &vals));
7827             for (int k = 0; k < ncols; k += bs) {
7828               for (int jj = 0; jj < bs; jj++) { // cols in block
7829                 PetscAssert(k / bs < nmax, comm, PETSC_ERR_USER, "k / bs (%d) >= nmax (%d)", (int)(k / bs), (int)nmax);
7830                 AA[k / bs] += PetscAbs(PetscRealPart(vals[k + jj]));
7831               }
7832             }
7833             PetscCall(MatRestoreRow(b, brow + ii, &ncols, &cols, &vals));
7834           }
7835           grow = Istart / bs + brow / bs;
7836           PetscCall(MatSetValues(Gmat, 1, &grow, nc, AJ, AA, INSERT_VALUES));
7837         }
7838       }
7839       PetscCall(MatAssemblyBegin(Gmat, MAT_FINAL_ASSEMBLY));
7840       PetscCall(MatAssemblyEnd(Gmat, MAT_FINAL_ASSEMBLY));
7841       PetscCall(PetscFree2(AA, AJ));
7842     } else {
7843       const PetscScalar *vals;
7844       const PetscInt    *idx;
7845       PetscInt          *d_nnz, *o_nnz, *w0, *w1, *w2;
7846     old_bs:
7847       /*
7848        Determine the preallocation needed for the scalar matrix derived from the vector matrix.
7849        */
7850       PetscCall(PetscInfo(Amat, "OLD bs>1 CreateGraph\n"));
7851       PetscCall(PetscMalloc2(nloc, &d_nnz, isseqaij ? 0 : nloc, &o_nnz));
7852       if (isseqaij) {
7853         PetscInt max_d_nnz;
7854         /*
7855          Determine exact preallocation count for (sequential) scalar matrix
7856          */
7857         PetscCall(MatSeqAIJGetMaxRowNonzeros(Amat, &max_d_nnz));
7858         max_d_nnz = PetscMin(nloc, bs * max_d_nnz);
7859         PetscCall(PetscMalloc3(max_d_nnz, &w0, max_d_nnz, &w1, max_d_nnz, &w2));
7860         for (Ii = 0, jj = 0; Ii < Iend; Ii += bs, jj++) PetscCall(MatCollapseRows(Amat, Ii, bs, w0, w1, w2, &d_nnz[jj], NULL));
7861         PetscCall(PetscFree3(w0, w1, w2));
7862       } else if (ismpiaij) {
7863         Mat             Daij, Oaij;
7864         const PetscInt *garray;
7865         PetscInt        max_d_nnz;
7866         PetscCall(MatMPIAIJGetSeqAIJ(Amat, &Daij, &Oaij, &garray));
7867         /*
7868          Determine exact preallocation count for diagonal block portion of scalar matrix
7869          */
7870         PetscCall(MatSeqAIJGetMaxRowNonzeros(Daij, &max_d_nnz));
7871         max_d_nnz = PetscMin(nloc, bs * max_d_nnz);
7872         PetscCall(PetscMalloc3(max_d_nnz, &w0, max_d_nnz, &w1, max_d_nnz, &w2));
7873         for (Ii = 0, jj = 0; Ii < Iend - Istart; Ii += bs, jj++) PetscCall(MatCollapseRows(Daij, Ii, bs, w0, w1, w2, &d_nnz[jj], NULL));
7874         PetscCall(PetscFree3(w0, w1, w2));
7875         /*
7876          Over estimate (usually grossly over), preallocation count for off-diagonal portion of scalar matrix
7877          */
7878         for (Ii = 0, jj = 0; Ii < Iend - Istart; Ii += bs, jj++) {
7879           o_nnz[jj] = 0;
7880           for (kk = 0; kk < bs; kk++) { /* rows that get collapsed to a single row */
7881             PetscCall(MatGetRow(Oaij, Ii + kk, &ncols, NULL, NULL));
7882             o_nnz[jj] += ncols;
7883             PetscCall(MatRestoreRow(Oaij, Ii + kk, &ncols, NULL, NULL));
7884           }
7885           if (o_nnz[jj] > (NN / bs - nloc)) o_nnz[jj] = NN / bs - nloc;
7886         }
7887       } else SETERRQ(comm, PETSC_ERR_USER, "Require AIJ matrix type");
7888       /* get scalar copy (norms) of matrix */
7889       PetscCall(MatSeqAIJSetPreallocation(Gmat, 0, d_nnz));
7890       PetscCall(MatMPIAIJSetPreallocation(Gmat, 0, d_nnz, 0, o_nnz));
7891       PetscCall(PetscFree2(d_nnz, o_nnz));
7892       for (Ii = Istart; Ii < Iend; Ii++) {
7893         PetscInt dest_row = Ii / bs;
7894         PetscCall(MatGetRow(Amat, Ii, &ncols, &idx, &vals));
7895         for (jj = 0; jj < ncols; jj++) {
7896           PetscInt    dest_col = idx[jj] / bs;
7897           PetscScalar sv       = PetscAbs(PetscRealPart(vals[jj]));
7898           PetscCall(MatSetValues(Gmat, 1, &dest_row, 1, &dest_col, &sv, ADD_VALUES));
7899         }
7900         PetscCall(MatRestoreRow(Amat, Ii, &ncols, &idx, &vals));
7901       }
7902       PetscCall(MatAssemblyBegin(Gmat, MAT_FINAL_ASSEMBLY));
7903       PetscCall(MatAssemblyEnd(Gmat, MAT_FINAL_ASSEMBLY));
7904     }
7905   } else {
7906     if (symmetrize || filter >= 0 || scale) PetscCall(MatDuplicate(Amat, MAT_COPY_VALUES, &Gmat));
7907     else {
7908       Gmat = Amat;
7909       PetscCall(PetscObjectReference((PetscObject)Gmat));
7910     }
7911     if (isseqaij) {
7912       a = Gmat;
7913       b = NULL;
7914     } else {
7915       Mat_MPIAIJ *d = (Mat_MPIAIJ *)Gmat->data;
7916       a             = d->A;
7917       b             = d->B;
7918     }
7919     if (filter >= 0 || scale) {
7920       /* take absolute value of each entry */
7921       for (c = a, kk = 0; c && kk < 2; c = b, kk++) {
7922         MatInfo      info;
7923         PetscScalar *avals;
7924         PetscCall(MatGetInfo(c, MAT_LOCAL, &info));
7925         PetscCall(MatSeqAIJGetArray(c, &avals));
7926         for (int jj = 0; jj < info.nz_used; jj++) avals[jj] = PetscAbsScalar(avals[jj]);
7927         PetscCall(MatSeqAIJRestoreArray(c, &avals));
7928       }
7929     }
7930   }
7931   if (symmetrize) {
7932     PetscBool isset, issym;
7933     PetscCall(MatIsSymmetricKnown(Amat, &isset, &issym));
7934     if (!isset || !issym) {
7935       Mat matTrans;
7936       PetscCall(MatTranspose(Gmat, MAT_INITIAL_MATRIX, &matTrans));
7937       PetscCall(MatAXPY(Gmat, 1.0, matTrans, Gmat->structurally_symmetric == PETSC_BOOL3_TRUE ? SAME_NONZERO_PATTERN : DIFFERENT_NONZERO_PATTERN));
7938       PetscCall(MatDestroy(&matTrans));
7939     }
7940     PetscCall(MatSetOption(Gmat, MAT_SYMMETRIC, PETSC_TRUE));
7941   } else if (Amat != Gmat) PetscCall(MatPropagateSymmetryOptions(Amat, Gmat));
7942   if (scale) {
7943     /* scale c for all diagonal values = 1 or -1 */
7944     Vec diag;
7945     PetscCall(MatCreateVecs(Gmat, &diag, NULL));
7946     PetscCall(MatGetDiagonal(Gmat, diag));
7947     PetscCall(VecReciprocal(diag));
7948     PetscCall(VecSqrtAbs(diag));
7949     PetscCall(MatDiagonalScale(Gmat, diag, diag));
7950     PetscCall(VecDestroy(&diag));
7951   }
7952   PetscCall(MatViewFromOptions(Gmat, NULL, "-mat_graph_view"));
7953 
7954   if (filter >= 0) {
7955     PetscCall(MatFilter(Gmat, filter, PETSC_TRUE, PETSC_TRUE));
7956     PetscCall(MatViewFromOptions(Gmat, NULL, "-mat_filter_graph_view"));
7957   }
7958   *a_Gmat = Gmat;
7959   PetscFunctionReturn(PETSC_SUCCESS);
7960 }
7961 
7962 /*
7963     Special version for direct calls from Fortran
7964 */
7965 #include <petsc/private/fortranimpl.h>
7966 
7967 /* Change these macros so can be used in void function */
7968 /* Identical to PetscCallVoid, except it assigns to *_ierr */
7969 #undef PetscCall
7970 #define PetscCall(...) \
7971   do { \
7972     PetscErrorCode ierr_msv_mpiaij = __VA_ARGS__; \
7973     if (PetscUnlikely(ierr_msv_mpiaij)) { \
7974       *_ierr = PetscError(PETSC_COMM_SELF, __LINE__, PETSC_FUNCTION_NAME, __FILE__, ierr_msv_mpiaij, PETSC_ERROR_REPEAT, " "); \
7975       return; \
7976     } \
7977   } while (0)
7978 
7979 #undef SETERRQ
7980 #define SETERRQ(comm, ierr, ...) \
7981   do { \
7982     *_ierr = PetscError(comm, __LINE__, PETSC_FUNCTION_NAME, __FILE__, ierr, PETSC_ERROR_INITIAL, __VA_ARGS__); \
7983     return; \
7984   } while (0)
7985 
7986 #if defined(PETSC_HAVE_FORTRAN_CAPS)
7987   #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
7988 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
7989   #define matsetvaluesmpiaij_ matsetvaluesmpiaij
7990 #else
7991 #endif
7992 PETSC_EXTERN void matsetvaluesmpiaij_(Mat *mmat, PetscInt *mm, const PetscInt im[], PetscInt *mn, const PetscInt in[], const PetscScalar v[], InsertMode *maddv, PetscErrorCode *_ierr)
7993 {
7994   Mat         mat = *mmat;
7995   PetscInt    m = *mm, n = *mn;
7996   InsertMode  addv = *maddv;
7997   Mat_MPIAIJ *aij  = (Mat_MPIAIJ *)mat->data;
7998   PetscScalar value;
7999 
8000   MatCheckPreallocated(mat, 1);
8001   if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
8002   else PetscCheck(mat->insertmode == addv, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Cannot mix add values and insert values");
8003   {
8004     PetscInt  i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
8005     PetscInt  cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
8006     PetscBool roworiented = aij->roworiented;
8007 
8008     /* Some Variables required in the macro */
8009     Mat         A     = aij->A;
8010     Mat_SeqAIJ *a     = (Mat_SeqAIJ *)A->data;
8011     PetscInt   *aimax = a->imax, *ai = a->i, *ailen = a->ilen, *aj = a->j;
8012     MatScalar  *aa;
8013     PetscBool   ignorezeroentries = (((a->ignorezeroentries) && (addv == ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
8014     Mat         B                 = aij->B;
8015     Mat_SeqAIJ *b                 = (Mat_SeqAIJ *)B->data;
8016     PetscInt   *bimax = b->imax, *bi = b->i, *bilen = b->ilen, *bj = b->j, bm = aij->B->rmap->n, am = aij->A->rmap->n;
8017     MatScalar  *ba;
8018     /* This variable below is only for the PETSC_HAVE_VIENNACL or PETSC_HAVE_CUDA cases, but we define it in all cases because we
8019      * cannot use "#if defined" inside a macro. */
8020     PETSC_UNUSED PetscBool inserted = PETSC_FALSE;
8021 
8022     PetscInt  *rp1, *rp2, ii, nrow1, nrow2, _i, rmax1, rmax2, N, low1, high1, low2, high2, t, lastcol1, lastcol2;
8023     PetscInt   nonew = a->nonew;
8024     MatScalar *ap1, *ap2;
8025 
8026     PetscFunctionBegin;
8027     PetscCall(MatSeqAIJGetArray(A, &aa));
8028     PetscCall(MatSeqAIJGetArray(B, &ba));
8029     for (i = 0; i < m; i++) {
8030       if (im[i] < 0) continue;
8031       PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1);
8032       if (im[i] >= rstart && im[i] < rend) {
8033         row      = im[i] - rstart;
8034         lastcol1 = -1;
8035         rp1      = aj + ai[row];
8036         ap1      = aa + ai[row];
8037         rmax1    = aimax[row];
8038         nrow1    = ailen[row];
8039         low1     = 0;
8040         high1    = nrow1;
8041         lastcol2 = -1;
8042         rp2      = bj + bi[row];
8043         ap2      = ba + bi[row];
8044         rmax2    = bimax[row];
8045         nrow2    = bilen[row];
8046         low2     = 0;
8047         high2    = nrow2;
8048 
8049         for (j = 0; j < n; j++) {
8050           if (roworiented) value = v[i * n + j];
8051           else value = v[i + j * m];
8052           if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && im[i] != in[j]) continue;
8053           if (in[j] >= cstart && in[j] < cend) {
8054             col = in[j] - cstart;
8055             MatSetValues_SeqAIJ_A_Private(row, col, value, addv, im[i], in[j]);
8056           } else if (in[j] < 0) continue;
8057           else if (PetscUnlikelyDebug(in[j] >= mat->cmap->N)) {
8058             SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1);
8059           } else {
8060             if (mat->was_assembled) {
8061               if (!aij->colmap) PetscCall(MatCreateColmap_MPIAIJ_Private(mat));
8062 #if defined(PETSC_USE_CTABLE)
8063               PetscCall(PetscHMapIGetWithDefault(aij->colmap, in[j] + 1, 0, &col));
8064               col--;
8065 #else
8066               col = aij->colmap[in[j]] - 1;
8067 #endif
8068               if (col < 0 && !((Mat_SeqAIJ *)(aij->A->data))->nonew) {
8069                 PetscCall(MatDisAssemble_MPIAIJ(mat));
8070                 col = in[j];
8071                 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
8072                 B        = aij->B;
8073                 b        = (Mat_SeqAIJ *)B->data;
8074                 bimax    = b->imax;
8075                 bi       = b->i;
8076                 bilen    = b->ilen;
8077                 bj       = b->j;
8078                 rp2      = bj + bi[row];
8079                 ap2      = ba + bi[row];
8080                 rmax2    = bimax[row];
8081                 nrow2    = bilen[row];
8082                 low2     = 0;
8083                 high2    = nrow2;
8084                 bm       = aij->B->rmap->n;
8085                 ba       = b->a;
8086                 inserted = PETSC_FALSE;
8087               }
8088             } else col = in[j];
8089             MatSetValues_SeqAIJ_B_Private(row, col, value, addv, im[i], in[j]);
8090           }
8091         }
8092       } else if (!aij->donotstash) {
8093         if (roworiented) {
8094           PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, v + i * n, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
8095         } else {
8096           PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, v + i, m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
8097         }
8098       }
8099     }
8100     PetscCall(MatSeqAIJRestoreArray(A, &aa));
8101     PetscCall(MatSeqAIJRestoreArray(B, &ba));
8102   }
8103   PetscFunctionReturnVoid();
8104 }
8105 
8106 /* Undefining these here since they were redefined from their original definition above! No
8107  * other PETSc functions should be defined past this point, as it is impossible to recover the
8108  * original definitions */
8109 #undef PetscCall
8110 #undef SETERRQ
8111