xref: /petsc/src/ksp/pc/impls/gamg/gamg.c (revision d9acb416d05abeed0a33bde3a81aeb2ea0364f6a)
1 /*
2  GAMG geometric-algebric multigrid PC - Mark Adams 2011
3  */
4 #include <../src/ksp/pc/impls/gamg/gamg.h>            /*I "petscpc.h" I*/
5 #include <../src/ksp/ksp/impls/cheby/chebyshevimpl.h> /*I "petscksp.h" I*/
6 
7 #if defined(PETSC_HAVE_CUDA)
8   #include <cuda_runtime.h>
9 #endif
10 
11 #if defined(PETSC_HAVE_HIP)
12   #include <hip/hip_runtime.h>
13 #endif
14 
15 PetscLogEvent petsc_gamg_setup_events[GAMG_NUM_SET];
16 PetscLogEvent petsc_gamg_setup_matmat_events[PETSC_MG_MAXLEVELS][3];
17 
18 // #define GAMG_STAGES
19 #if defined(GAMG_STAGES)
20 static PetscLogStage gamg_stages[PETSC_MG_MAXLEVELS];
21 #endif
22 
23 static PetscFunctionList GAMGList = NULL;
24 static PetscBool         PCGAMGPackageInitialized;
25 
26 static PetscErrorCode PCReset_GAMG(PC pc)
27 {
28   PC_MG   *mg      = (PC_MG *)pc->data;
29   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
30 
31   PetscFunctionBegin;
32   PetscCall(PetscFree(pc_gamg->data));
33   pc_gamg->data_sz = 0;
34   PetscCall(PetscFree(pc_gamg->orig_data));
35   for (PetscInt level = 0; level < PETSC_MG_MAXLEVELS; level++) {
36     mg->min_eigen_DinvA[level] = 0;
37     mg->max_eigen_DinvA[level] = 0;
38   }
39   pc_gamg->emin = 0;
40   pc_gamg->emax = 0;
41   PetscCall(PCReset_MG(pc));
42   PetscFunctionReturn(PETSC_SUCCESS);
43 }
44 
45 /*
46    PCGAMGCreateLevel_GAMG: create coarse op with RAP.  repartition and/or reduce number
47      of active processors.
48 
49    Input Parameter:
50    . pc - parameters + side effect: coarse data in 'pc_gamg->data' and
51           'pc_gamg->data_sz' are changed via repartitioning/reduction.
52    . Amat_fine - matrix on this fine (k) level
53    . cr_bs - coarse block size
54    In/Output Parameter:
55    . a_P_inout - prolongation operator to the next level (k-->k-1)
56    . a_nactive_proc - number of active procs
57    Output Parameter:
58    . a_Amat_crs - coarse matrix that is created (k-1)
59 */
60 static PetscErrorCode PCGAMGCreateLevel_GAMG(PC pc, Mat Amat_fine, PetscInt cr_bs, Mat *a_P_inout, Mat *a_Amat_crs, PetscMPIInt *a_nactive_proc, IS *Pcolumnperm, PetscBool is_last)
61 {
62   PC_MG      *mg      = (PC_MG *)pc->data;
63   PC_GAMG    *pc_gamg = (PC_GAMG *)mg->innerctx;
64   Mat         Cmat, Pold = *a_P_inout;
65   MPI_Comm    comm;
66   PetscMPIInt rank, size, new_size, nactive = *a_nactive_proc;
67   PetscInt    ncrs_eq, ncrs, f_bs;
68 
69   PetscFunctionBegin;
70   PetscCall(PetscObjectGetComm((PetscObject)Amat_fine, &comm));
71   PetscCallMPI(MPI_Comm_rank(comm, &rank));
72   PetscCallMPI(MPI_Comm_size(comm, &size));
73   PetscCall(MatGetBlockSize(Amat_fine, &f_bs));
74   PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_PTAP], 0, 0, 0, 0));
75   PetscCall(PetscLogEventBegin(petsc_gamg_setup_matmat_events[pc_gamg->current_level][1], 0, 0, 0, 0));
76   PetscCall(MatPtAP(Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat));
77   PetscCall(PetscLogEventEnd(petsc_gamg_setup_matmat_events[pc_gamg->current_level][1], 0, 0, 0, 0));
78   PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_PTAP], 0, 0, 0, 0));
79 
80   if (Pcolumnperm) *Pcolumnperm = NULL;
81 
82   /* set 'ncrs' (nodes), 'ncrs_eq' (equations)*/
83   PetscCall(MatGetLocalSize(Cmat, &ncrs_eq, NULL));
84   if (pc_gamg->data_cell_rows > 0) {
85     ncrs = pc_gamg->data_sz / pc_gamg->data_cell_cols / pc_gamg->data_cell_rows;
86   } else {
87     PetscInt bs;
88     PetscCall(MatGetBlockSize(Cmat, &bs));
89     ncrs = ncrs_eq / bs;
90   }
91   /* get number of PEs to make active 'new_size', reduce, can be any integer 1-P */
92   if (pc_gamg->level_reduction_factors[pc_gamg->current_level] == 0 && PetscDefined(HAVE_CUDA) && pc_gamg->current_level == 0) { /* 0 turns reducing to 1 process/device on; do for HIP, etc. */
93 #if defined(PETSC_HAVE_CUDA)
94     PetscShmComm pshmcomm;
95     PetscMPIInt  locrank;
96     MPI_Comm     loccomm;
97     PetscInt     s_nnodes, r_nnodes, new_new_size;
98     cudaError_t  cerr;
99     int          devCount;
100     PetscCall(PetscShmCommGet(comm, &pshmcomm));
101     PetscCall(PetscShmCommGetMpiShmComm(pshmcomm, &loccomm));
102     PetscCallMPI(MPI_Comm_rank(loccomm, &locrank));
103     s_nnodes = !locrank;
104     PetscCall(MPIU_Allreduce(&s_nnodes, &r_nnodes, 1, MPIU_INT, MPI_SUM, comm));
105     PetscCheck((size % r_nnodes) == 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "odd number of nodes np=%d nnodes%" PetscInt_FMT, size, r_nnodes);
106     devCount = 0;
107     cerr     = cudaGetDeviceCount(&devCount);
108     cudaGetLastError();                         /* Reset the last error */
109     if (cerr == cudaSuccess && devCount >= 1) { /* There are devices, else go to heuristic */
110       new_new_size = r_nnodes * devCount;
111       new_size     = new_new_size;
112       PetscCall(PetscInfo(pc, "%s: Fine grid with Cuda. %" PetscInt_FMT " nodes. Change new active set size %d --> %d (devCount=%d #nodes=%" PetscInt_FMT ")\n", ((PetscObject)pc)->prefix, r_nnodes, nactive, new_size, devCount, r_nnodes));
113     } else {
114       PetscCall(PetscInfo(pc, "%s: With Cuda but no device. Use heuristics.\n", ((PetscObject)pc)->prefix));
115       goto HEURISTIC;
116     }
117 #else
118     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "should not be here");
119 #endif
120   } else if (pc_gamg->level_reduction_factors[pc_gamg->current_level] > 0) {
121     if (nactive < pc_gamg->level_reduction_factors[pc_gamg->current_level]) {
122       new_size = 1;
123       PetscCall(PetscInfo(pc, "%s: reduction factor too small for %d active processes: reduce to one process\n", ((PetscObject)pc)->prefix, new_size));
124     } else {
125       PetscCheck(nactive % pc_gamg->level_reduction_factors[pc_gamg->current_level] == 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "odd number of active process %d wrt reduction factor %" PetscInt_FMT, nactive, pc_gamg->level_reduction_factors[pc_gamg->current_level]);
126       new_size = nactive / pc_gamg->level_reduction_factors[pc_gamg->current_level];
127       PetscCall(PetscInfo(pc, "%s: Manually setting reduction to %d active processes (%d/%" PetscInt_FMT ")\n", ((PetscObject)pc)->prefix, new_size, nactive, pc_gamg->level_reduction_factors[pc_gamg->current_level]));
128     }
129   } else if (is_last && !pc_gamg->use_parallel_coarse_grid_solver) {
130     new_size = 1;
131     PetscCall(PetscInfo(pc, "%s: Force coarsest grid reduction to %d active processes\n", ((PetscObject)pc)->prefix, new_size));
132   } else {
133     PetscInt ncrs_eq_glob;
134 #if defined(PETSC_HAVE_CUDA)
135   HEURISTIC:
136 #endif
137     PetscCall(MatGetSize(Cmat, &ncrs_eq_glob, NULL));
138     new_size = (PetscMPIInt)((float)ncrs_eq_glob / (float)pc_gamg->min_eq_proc + 0.5); /* hardwire min. number of eq/proc */
139     if (!new_size) new_size = 1;                                                       /* not likely, possible? */
140     else if (new_size >= nactive) new_size = nactive;                                  /* no change, rare */
141     PetscCall(PetscInfo(pc, "%s: Coarse grid reduction from %d to %d active processes\n", ((PetscObject)pc)->prefix, nactive, new_size));
142   }
143   if (new_size == nactive) {
144     *a_Amat_crs = Cmat; /* output - no repartitioning or reduction - could bail here */
145     if (new_size < size) {
146       /* odd case where multiple coarse grids are on one processor or no coarsening ... */
147       PetscCall(PetscInfo(pc, "%s: reduced grid using same number of processors (%d) as last grid (use larger coarse grid)\n", ((PetscObject)pc)->prefix, nactive));
148       if (pc_gamg->cpu_pin_coarse_grids) {
149         PetscCall(MatBindToCPU(*a_Amat_crs, PETSC_TRUE));
150         PetscCall(MatBindToCPU(*a_P_inout, PETSC_TRUE));
151       }
152     }
153     /* we know that the grid structure can be reused in MatPtAP */
154   } else { /* reduce active processors - we know that the grid structure can NOT be reused in MatPtAP */
155     PetscInt *counts, *newproc_idx, ii, jj, kk, strideNew, *tidx, ncrs_new, ncrs_eq_new, nloc_old, expand_factor = 1, rfactor = 1;
156     IS        is_eq_newproc, is_eq_num, is_eq_num_prim, new_eq_indices;
157     PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_REDUCE], 0, 0, 0, 0));
158     nloc_old = ncrs_eq / cr_bs;
159     PetscCheck(ncrs_eq % cr_bs == 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "ncrs_eq %" PetscInt_FMT " not divisible by cr_bs %" PetscInt_FMT, ncrs_eq, cr_bs);
160     /* get new_size and rfactor */
161     if (pc_gamg->layout_type == PCGAMG_LAYOUT_SPREAD || !pc_gamg->repart) {
162       /* find factor */
163       if (new_size == 1) rfactor = size; /* don't modify */
164       else {
165         PetscReal best_fact = 0.;
166         jj                  = -1;
167         for (kk = 1; kk <= size; kk++) {
168           if (!(size % kk)) { /* a candidate */
169             PetscReal nactpe = (PetscReal)size / (PetscReal)kk, fact = nactpe / (PetscReal)new_size;
170             if (fact > 1.0) fact = 1. / fact; /* keep fact < 1 */
171             if (fact > best_fact) {
172               best_fact = fact;
173               jj        = kk;
174             }
175           }
176         }
177         if (jj != -1) rfactor = jj;
178         else rfactor = 1; /* a prime */
179         if (pc_gamg->layout_type == PCGAMG_LAYOUT_COMPACT) expand_factor = 1;
180         else expand_factor = rfactor;
181       }
182       new_size = size / rfactor; /* make new size one that is factor */
183       if (new_size == nactive) { /* no repartitioning or reduction, bail out because nested here (rare) */
184         *a_Amat_crs = Cmat;
185         PetscCall(PetscInfo(pc, "%s: Finding factorable processor set stopped reduction: new_size=%d, neq(loc)=%" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, new_size, ncrs_eq));
186         PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_REDUCE], 0, 0, 0, 0));
187         PetscFunctionReturn(PETSC_SUCCESS);
188       }
189     }
190     /* make 'is_eq_newproc' */
191     PetscCall(PetscMalloc1(size, &counts));
192     if (pc_gamg->repart) { /* Repartition Cmat_{k} and move columns of P^{k}_{k-1} and coordinates of primal part accordingly */
193       Mat adj;
194       PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_REPART], 0, 0, 0, 0));
195       PetscCall(PetscInfo(pc, "%s: Repartition: size (active): %d --> %d, %" PetscInt_FMT " local equations, using %s process layout\n", ((PetscObject)pc)->prefix, *a_nactive_proc, new_size, ncrs_eq, (pc_gamg->layout_type == PCGAMG_LAYOUT_COMPACT) ? "compact" : "spread"));
196       /* get 'adj' */
197       if (cr_bs == 1) {
198         PetscCall(MatConvert(Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj));
199       } else {
200         /* make a scalar matrix to partition (no Stokes here) */
201         Mat                tMat;
202         PetscInt           Istart_crs, Iend_crs, ncols, jj, Ii;
203         const PetscScalar *vals;
204         const PetscInt    *idx;
205         PetscInt          *d_nnz, *o_nnz, M, N;
206         static PetscInt    llev = 0; /* ugly but just used for debugging */
207         MatType            mtype;
208 
209         PetscCall(PetscMalloc2(ncrs, &d_nnz, ncrs, &o_nnz));
210         PetscCall(MatGetOwnershipRange(Cmat, &Istart_crs, &Iend_crs));
211         PetscCall(MatGetSize(Cmat, &M, &N));
212         for (Ii = Istart_crs, jj = 0; Ii < Iend_crs; Ii += cr_bs, jj++) {
213           PetscCall(MatGetRow(Cmat, Ii, &ncols, NULL, NULL));
214           d_nnz[jj] = ncols / cr_bs;
215           o_nnz[jj] = ncols / cr_bs;
216           PetscCall(MatRestoreRow(Cmat, Ii, &ncols, NULL, NULL));
217           if (d_nnz[jj] > ncrs) d_nnz[jj] = ncrs;
218           if (o_nnz[jj] > (M / cr_bs - ncrs)) o_nnz[jj] = M / cr_bs - ncrs;
219         }
220 
221         PetscCall(MatGetType(Amat_fine, &mtype));
222         PetscCall(MatCreate(comm, &tMat));
223         PetscCall(MatSetSizes(tMat, ncrs, ncrs, PETSC_DETERMINE, PETSC_DETERMINE));
224         PetscCall(MatSetType(tMat, mtype));
225         PetscCall(MatSeqAIJSetPreallocation(tMat, 0, d_nnz));
226         PetscCall(MatMPIAIJSetPreallocation(tMat, 0, d_nnz, 0, o_nnz));
227         PetscCall(PetscFree2(d_nnz, o_nnz));
228 
229         for (ii = Istart_crs; ii < Iend_crs; ii++) {
230           PetscInt dest_row = ii / cr_bs;
231           PetscCall(MatGetRow(Cmat, ii, &ncols, &idx, &vals));
232           for (jj = 0; jj < ncols; jj++) {
233             PetscInt    dest_col = idx[jj] / cr_bs;
234             PetscScalar v        = 1.0;
235             PetscCall(MatSetValues(tMat, 1, &dest_row, 1, &dest_col, &v, ADD_VALUES));
236           }
237           PetscCall(MatRestoreRow(Cmat, ii, &ncols, &idx, &vals));
238         }
239         PetscCall(MatAssemblyBegin(tMat, MAT_FINAL_ASSEMBLY));
240         PetscCall(MatAssemblyEnd(tMat, MAT_FINAL_ASSEMBLY));
241 
242         if (llev++ == -1) {
243           PetscViewer viewer;
244           char        fname[32];
245           PetscCall(PetscSNPrintf(fname, sizeof(fname), "part_mat_%" PetscInt_FMT ".mat", llev));
246           PetscCall(PetscViewerBinaryOpen(comm, fname, FILE_MODE_WRITE, &viewer));
247           PetscCall(MatView(tMat, viewer));
248           PetscCall(PetscViewerDestroy(&viewer));
249         }
250         PetscCall(MatConvert(tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj));
251         PetscCall(MatDestroy(&tMat));
252       } /* create 'adj' */
253 
254       { /* partition: get newproc_idx */
255         char            prefix[256];
256         const char     *pcpre;
257         const PetscInt *is_idx;
258         MatPartitioning mpart;
259         IS              proc_is;
260 
261         PetscCall(MatPartitioningCreate(comm, &mpart));
262         PetscCall(MatPartitioningSetAdjacency(mpart, adj));
263         PetscCall(PCGetOptionsPrefix(pc, &pcpre));
264         PetscCall(PetscSNPrintf(prefix, sizeof(prefix), "%spc_gamg_", pcpre ? pcpre : ""));
265         PetscCall(PetscObjectSetOptionsPrefix((PetscObject)mpart, prefix));
266         PetscCall(MatPartitioningSetFromOptions(mpart));
267         PetscCall(MatPartitioningSetNParts(mpart, new_size));
268         PetscCall(MatPartitioningApply(mpart, &proc_is));
269         PetscCall(MatPartitioningDestroy(&mpart));
270 
271         /* collect IS info */
272         PetscCall(PetscMalloc1(ncrs_eq, &newproc_idx));
273         PetscCall(ISGetIndices(proc_is, &is_idx));
274         for (kk = jj = 0; kk < nloc_old; kk++) {
275           for (ii = 0; ii < cr_bs; ii++, jj++) { newproc_idx[jj] = is_idx[kk] * expand_factor; /* distribution */ }
276         }
277         PetscCall(ISRestoreIndices(proc_is, &is_idx));
278         PetscCall(ISDestroy(&proc_is));
279       }
280       PetscCall(MatDestroy(&adj));
281 
282       PetscCall(ISCreateGeneral(comm, ncrs_eq, newproc_idx, PETSC_COPY_VALUES, &is_eq_newproc));
283       PetscCall(PetscFree(newproc_idx));
284       PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_REPART], 0, 0, 0, 0));
285     } else { /* simple aggregation of parts -- 'is_eq_newproc' */
286       PetscInt targetPE;
287       PetscCheck(new_size != nactive, PETSC_COMM_SELF, PETSC_ERR_PLIB, "new_size==nactive. Should not happen");
288       PetscCall(PetscInfo(pc, "%s: Number of equations (loc) %" PetscInt_FMT " with simple aggregation\n", ((PetscObject)pc)->prefix, ncrs_eq));
289       targetPE = (rank / rfactor) * expand_factor;
290       PetscCall(ISCreateStride(comm, ncrs_eq, targetPE, 0, &is_eq_newproc));
291     } /* end simple 'is_eq_newproc' */
292 
293     /*
294       Create an index set from the is_eq_newproc index set to indicate the mapping TO
295     */
296     PetscCall(ISPartitioningToNumbering(is_eq_newproc, &is_eq_num));
297     is_eq_num_prim = is_eq_num;
298     /*
299       Determine how many equations/vertices are assigned to each processor
300     */
301     PetscCall(ISPartitioningCount(is_eq_newproc, size, counts));
302     ncrs_eq_new = counts[rank];
303     PetscCall(ISDestroy(&is_eq_newproc));
304     ncrs_new = ncrs_eq_new / cr_bs;
305 
306     PetscCall(PetscFree(counts));
307     /* data movement scope -- this could be moved to subclasses so that we don't try to cram all auxiliary data into some complex abstracted thing */
308     {
309       Vec             src_crd, dest_crd;
310       const PetscInt *idx, ndata_rows = pc_gamg->data_cell_rows, ndata_cols = pc_gamg->data_cell_cols, node_data_sz = ndata_rows * ndata_cols;
311       VecScatter      vecscat;
312       PetscScalar    *array;
313       IS              isscat;
314       /* move data (for primal equations only) */
315       /* Create a vector to contain the newly ordered element information */
316       PetscCall(VecCreate(comm, &dest_crd));
317       PetscCall(VecSetSizes(dest_crd, node_data_sz * ncrs_new, PETSC_DECIDE));
318       PetscCall(VecSetType(dest_crd, VECSTANDARD)); /* this is needed! */
319       /*
320         There are 'ndata_rows*ndata_cols' data items per node, (one can think of the vectors of having
321         a block size of ...).  Note, ISs are expanded into equation space by 'cr_bs'.
322       */
323       PetscCall(PetscMalloc1(ncrs * node_data_sz, &tidx));
324       PetscCall(ISGetIndices(is_eq_num_prim, &idx));
325       for (ii = 0, jj = 0; ii < ncrs; ii++) {
326         PetscInt id = idx[ii * cr_bs] / cr_bs; /* get node back */
327         for (kk = 0; kk < node_data_sz; kk++, jj++) tidx[jj] = id * node_data_sz + kk;
328       }
329       PetscCall(ISRestoreIndices(is_eq_num_prim, &idx));
330       PetscCall(ISCreateGeneral(comm, node_data_sz * ncrs, tidx, PETSC_COPY_VALUES, &isscat));
331       PetscCall(PetscFree(tidx));
332       /*
333         Create a vector to contain the original vertex information for each element
334       */
335       PetscCall(VecCreateSeq(PETSC_COMM_SELF, node_data_sz * ncrs, &src_crd));
336       for (jj = 0; jj < ndata_cols; jj++) {
337         const PetscInt stride0 = ncrs * pc_gamg->data_cell_rows;
338         for (ii = 0; ii < ncrs; ii++) {
339           for (kk = 0; kk < ndata_rows; kk++) {
340             PetscInt    ix = ii * ndata_rows + kk + jj * stride0, jx = ii * node_data_sz + kk * ndata_cols + jj;
341             PetscScalar tt = (PetscScalar)pc_gamg->data[ix];
342             PetscCall(VecSetValues(src_crd, 1, &jx, &tt, INSERT_VALUES));
343           }
344         }
345       }
346       PetscCall(VecAssemblyBegin(src_crd));
347       PetscCall(VecAssemblyEnd(src_crd));
348       /*
349         Scatter the element vertex information (still in the original vertex ordering)
350         to the correct processor
351       */
352       PetscCall(VecScatterCreate(src_crd, NULL, dest_crd, isscat, &vecscat));
353       PetscCall(ISDestroy(&isscat));
354       PetscCall(VecScatterBegin(vecscat, src_crd, dest_crd, INSERT_VALUES, SCATTER_FORWARD));
355       PetscCall(VecScatterEnd(vecscat, src_crd, dest_crd, INSERT_VALUES, SCATTER_FORWARD));
356       PetscCall(VecScatterDestroy(&vecscat));
357       PetscCall(VecDestroy(&src_crd));
358       /*
359         Put the element vertex data into a new allocation of the gdata->ele
360       */
361       PetscCall(PetscFree(pc_gamg->data));
362       PetscCall(PetscMalloc1(node_data_sz * ncrs_new, &pc_gamg->data));
363 
364       pc_gamg->data_sz = node_data_sz * ncrs_new;
365       strideNew        = ncrs_new * ndata_rows;
366 
367       PetscCall(VecGetArray(dest_crd, &array));
368       for (jj = 0; jj < ndata_cols; jj++) {
369         for (ii = 0; ii < ncrs_new; ii++) {
370           for (kk = 0; kk < ndata_rows; kk++) {
371             PetscInt ix = ii * ndata_rows + kk + jj * strideNew, jx = ii * node_data_sz + kk * ndata_cols + jj;
372             pc_gamg->data[ix] = PetscRealPart(array[jx]);
373           }
374         }
375       }
376       PetscCall(VecRestoreArray(dest_crd, &array));
377       PetscCall(VecDestroy(&dest_crd));
378     }
379     /* move A and P (columns) with new layout */
380     /*
381       Invert for MatCreateSubMatrix
382     */
383     PetscCall(ISInvertPermutation(is_eq_num, ncrs_eq_new, &new_eq_indices));
384     PetscCall(ISSort(new_eq_indices)); /* is this needed? */
385     PetscCall(ISSetBlockSize(new_eq_indices, cr_bs));
386     if (is_eq_num != is_eq_num_prim) { PetscCall(ISDestroy(&is_eq_num_prim)); /* could be same as 'is_eq_num' */ }
387     if (Pcolumnperm) {
388       PetscCall(PetscObjectReference((PetscObject)new_eq_indices));
389       *Pcolumnperm = new_eq_indices;
390     }
391     PetscCall(ISDestroy(&is_eq_num));
392 
393     /* 'a_Amat_crs' output */
394     {
395       Mat       mat;
396       PetscBool isset, isspd, isher;
397 #if !defined(PETSC_USE_COMPLEX)
398       PetscBool issym;
399 #endif
400 
401       PetscCall(MatCreateSubMatrix(Cmat, new_eq_indices, new_eq_indices, MAT_INITIAL_MATRIX, &mat));
402       PetscCall(MatIsSPDKnown(Cmat, &isset, &isspd)); // like MatPropagateSymmetryOptions, but should set MAT_STRUCTURALLY_SYMMETRIC ?
403       if (isset) PetscCall(MatSetOption(mat, MAT_SPD, isspd));
404       else {
405         PetscCall(MatIsHermitianKnown(Cmat, &isset, &isher));
406         if (isset) PetscCall(MatSetOption(mat, MAT_HERMITIAN, isher));
407         else {
408 #if !defined(PETSC_USE_COMPLEX)
409           PetscCall(MatIsSymmetricKnown(Cmat, &isset, &issym));
410           if (isset) PetscCall(MatSetOption(mat, MAT_SYMMETRIC, issym));
411 #endif
412         }
413       }
414       *a_Amat_crs = mat;
415     }
416     PetscCall(MatDestroy(&Cmat));
417 
418     /* prolongator */
419     {
420       IS       findices;
421       PetscInt Istart, Iend;
422       Mat      Pnew;
423 
424       PetscCall(MatGetOwnershipRange(Pold, &Istart, &Iend));
425       PetscCall(ISCreateStride(comm, Iend - Istart, Istart, 1, &findices));
426       PetscCall(ISSetBlockSize(findices, f_bs));
427       PetscCall(MatCreateSubMatrix(Pold, findices, new_eq_indices, MAT_INITIAL_MATRIX, &Pnew));
428       PetscCall(ISDestroy(&findices));
429       PetscCall(MatSetOption(Pnew, MAT_FORM_EXPLICIT_TRANSPOSE, PETSC_TRUE));
430 
431       PetscCall(MatDestroy(a_P_inout));
432 
433       /* output - repartitioned */
434       *a_P_inout = Pnew;
435     }
436     PetscCall(ISDestroy(&new_eq_indices));
437 
438     *a_nactive_proc = new_size; /* output */
439 
440     /* pinning on reduced grids, not a bad heuristic and optimization gets folded into process reduction optimization */
441     if (pc_gamg->cpu_pin_coarse_grids) {
442 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
443       static PetscInt llev = 2;
444       PetscCall(PetscInfo(pc, "%s: Pinning level %" PetscInt_FMT " to the CPU\n", ((PetscObject)pc)->prefix, llev++));
445 #endif
446       PetscCall(MatBindToCPU(*a_Amat_crs, PETSC_TRUE));
447       PetscCall(MatBindToCPU(*a_P_inout, PETSC_TRUE));
448       if (1) { /* HACK: move this to MatBindCPU_MPIAIJXXX; lvec is created, need to pin it, this is done in MatSetUpMultiply_MPIAIJ. Hack */
449         Mat         A = *a_Amat_crs, P = *a_P_inout;
450         PetscMPIInt size;
451         PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
452         if (size > 1) {
453           Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *p = (Mat_MPIAIJ *)P->data;
454           PetscCall(VecBindToCPU(a->lvec, PETSC_TRUE));
455           PetscCall(VecBindToCPU(p->lvec, PETSC_TRUE));
456         }
457       }
458     }
459     PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_REDUCE], 0, 0, 0, 0));
460   } // processor reduce
461   PetscFunctionReturn(PETSC_SUCCESS);
462 }
463 
464 // used in GEO
465 PetscErrorCode PCGAMGSquareGraph_GAMG(PC a_pc, Mat Gmat1, Mat *Gmat2)
466 {
467   const char *prefix;
468   char        addp[32];
469   PC_MG      *mg      = (PC_MG *)a_pc->data;
470   PC_GAMG    *pc_gamg = (PC_GAMG *)mg->innerctx;
471 
472   PetscFunctionBegin;
473   PetscCall(PCGetOptionsPrefix(a_pc, &prefix));
474   PetscCall(PetscInfo(a_pc, "%s: Square Graph on level %" PetscInt_FMT "\n", ((PetscObject)a_pc)->prefix, pc_gamg->current_level + 1));
475   PetscCall(MatProductCreate(Gmat1, Gmat1, NULL, Gmat2));
476   PetscCall(MatSetOptionsPrefix(*Gmat2, prefix));
477   PetscCall(PetscSNPrintf(addp, sizeof(addp), "pc_gamg_square_%" PetscInt_FMT "_", pc_gamg->current_level));
478   PetscCall(MatAppendOptionsPrefix(*Gmat2, addp));
479   if ((*Gmat2)->structurally_symmetric == PETSC_BOOL3_TRUE) {
480     PetscCall(MatProductSetType(*Gmat2, MATPRODUCT_AB));
481   } else {
482     PetscCall(MatSetOption(Gmat1, MAT_FORM_EXPLICIT_TRANSPOSE, PETSC_TRUE));
483     PetscCall(MatProductSetType(*Gmat2, MATPRODUCT_AtB));
484   }
485   PetscCall(MatProductSetFromOptions(*Gmat2));
486   PetscCall(PetscLogEventBegin(petsc_gamg_setup_matmat_events[pc_gamg->current_level][0], 0, 0, 0, 0));
487   PetscCall(MatProductSymbolic(*Gmat2));
488   PetscCall(PetscLogEventEnd(petsc_gamg_setup_matmat_events[pc_gamg->current_level][0], 0, 0, 0, 0));
489   PetscCall(MatProductClear(*Gmat2));
490   /* we only need the sparsity, cheat and tell PETSc the matrix has been assembled */
491   (*Gmat2)->assembled = PETSC_TRUE;
492   PetscFunctionReturn(PETSC_SUCCESS);
493 }
494 
495 /*
496    PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner
497                     by setting data structures and options.
498 
499    Input Parameter:
500 .  pc - the preconditioner context
501 
502 */
503 static PetscErrorCode PCSetUp_GAMG(PC pc)
504 {
505   PC_MG      *mg      = (PC_MG *)pc->data;
506   PC_GAMG    *pc_gamg = (PC_GAMG *)mg->innerctx;
507   Mat         Pmat    = pc->pmat;
508   PetscInt    fine_level, level, level1, bs, M, N, qq, lidx, nASMBlocksArr[PETSC_MG_MAXLEVELS], cr_bs;
509   MPI_Comm    comm;
510   PetscMPIInt rank, size, nactivepe;
511   Mat         Aarr[PETSC_MG_MAXLEVELS], Parr[PETSC_MG_MAXLEVELS];
512   IS         *ASMLocalIDsArr[PETSC_MG_MAXLEVELS];
513   PetscBool   is_last = PETSC_FALSE;
514 #if defined(PETSC_USE_INFO)
515   PetscLogDouble nnz0 = 0., nnztot = 0.;
516   MatInfo        info;
517 #endif
518 
519   PetscFunctionBegin;
520   PetscCall(PetscObjectGetComm((PetscObject)pc, &comm));
521   PetscCallMPI(MPI_Comm_rank(comm, &rank));
522   PetscCallMPI(MPI_Comm_size(comm, &size));
523   PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_SETUP], 0, 0, 0, 0));
524   if (pc->setupcalled) {
525     if (!pc_gamg->reuse_prol || pc->flag == DIFFERENT_NONZERO_PATTERN) {
526       /* reset everything */
527       PetscCall(PCReset_MG(pc));
528       pc->setupcalled = 0;
529     } else {
530       PC_MG_Levels **mglevels = mg->levels;
531       /* just do Galerkin grids */
532       Mat B, dA, dB;
533       if (pc_gamg->Nlevels > 1) {
534         PetscInt gl;
535         /* currently only handle case where mat and pmat are the same on coarser levels */
536         PetscCall(KSPGetOperators(mglevels[pc_gamg->Nlevels - 1]->smoothd, &dA, &dB));
537         /* (re)set to get dirty flag */
538         PetscCall(KSPSetOperators(mglevels[pc_gamg->Nlevels - 1]->smoothd, dA, dB));
539 
540         for (level = pc_gamg->Nlevels - 2, gl = 0; level >= 0; level--, gl++) {
541           MatReuse reuse = MAT_INITIAL_MATRIX;
542 #if defined(GAMG_STAGES)
543           PetscCall(PetscLogStagePush(gamg_stages[gl]));
544 #endif
545           /* matrix structure can change from repartitioning or process reduction but don't know if we have process reduction here. Should fix */
546           PetscCall(KSPGetOperators(mglevels[level]->smoothd, NULL, &B));
547           if (B->product) {
548             if (B->product->A == dB && B->product->B == mglevels[level + 1]->interpolate) reuse = MAT_REUSE_MATRIX;
549           }
550           if (reuse == MAT_INITIAL_MATRIX) PetscCall(MatDestroy(&mglevels[level]->A));
551           if (reuse == MAT_REUSE_MATRIX) {
552             PetscCall(PetscInfo(pc, "%s: RAP after initial setup, reuse matrix level %" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, level));
553           } else {
554             PetscCall(PetscInfo(pc, "%s: RAP after initial setup, with repartitioning (new matrix) level %" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, level));
555           }
556           PetscCall(PetscLogEventBegin(petsc_gamg_setup_matmat_events[gl][1], 0, 0, 0, 0));
557           PetscCall(MatPtAP(dB, mglevels[level + 1]->interpolate, reuse, PETSC_DEFAULT, &B));
558           PetscCall(PetscLogEventEnd(petsc_gamg_setup_matmat_events[gl][1], 0, 0, 0, 0));
559           if (reuse == MAT_INITIAL_MATRIX) mglevels[level]->A = B;
560           PetscCall(KSPSetOperators(mglevels[level]->smoothd, B, B));
561           // check for redoing eigen estimates
562           if (pc_gamg->recompute_esteig) {
563             PetscBool ischeb;
564             KSP       smoother;
565             PetscCall(PCMGGetSmoother(pc, level + 1, &smoother));
566             PetscCall(PetscObjectTypeCompare((PetscObject)smoother, KSPCHEBYSHEV, &ischeb));
567             if (ischeb) {
568               KSP_Chebyshev *cheb = (KSP_Chebyshev *)smoother->data;
569               cheb->emin_provided = 0;
570               cheb->emax_provided = 0;
571             }
572             /* we could call PetscCall(KSPChebyshevSetEigenvalues(smoother, 0, 0)); but the logic does not work properly */
573           }
574           // inc
575           dB = B;
576 #if defined(GAMG_STAGES)
577           PetscCall(PetscLogStagePop());
578 #endif
579         }
580       }
581 
582       PetscCall(PCSetUp_MG(pc));
583       PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_SETUP], 0, 0, 0, 0));
584       PetscFunctionReturn(PETSC_SUCCESS);
585     }
586   }
587 
588   if (!pc_gamg->data) {
589     if (pc_gamg->orig_data) {
590       PetscCall(MatGetBlockSize(Pmat, &bs));
591       PetscCall(MatGetLocalSize(Pmat, &qq, NULL));
592 
593       pc_gamg->data_sz        = (qq / bs) * pc_gamg->orig_data_cell_rows * pc_gamg->orig_data_cell_cols;
594       pc_gamg->data_cell_rows = pc_gamg->orig_data_cell_rows;
595       pc_gamg->data_cell_cols = pc_gamg->orig_data_cell_cols;
596 
597       PetscCall(PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data));
598       for (qq = 0; qq < pc_gamg->data_sz; qq++) pc_gamg->data[qq] = pc_gamg->orig_data[qq];
599     } else {
600       PetscCheck(pc_gamg->ops->createdefaultdata, comm, PETSC_ERR_PLIB, "'createdefaultdata' not set(?) need to support NULL data");
601       PetscCall(pc_gamg->ops->createdefaultdata(pc, Pmat));
602     }
603   }
604 
605   /* cache original data for reuse */
606   if (!pc_gamg->orig_data && (PetscBool)(!pc_gamg->reuse_prol)) {
607     PetscCall(PetscMalloc1(pc_gamg->data_sz, &pc_gamg->orig_data));
608     for (qq = 0; qq < pc_gamg->data_sz; qq++) pc_gamg->orig_data[qq] = pc_gamg->data[qq];
609     pc_gamg->orig_data_cell_rows = pc_gamg->data_cell_rows;
610     pc_gamg->orig_data_cell_cols = pc_gamg->data_cell_cols;
611   }
612 
613   /* get basic dims */
614   PetscCall(MatGetBlockSize(Pmat, &bs));
615   PetscCall(MatGetSize(Pmat, &M, NULL));
616 
617 #if defined(PETSC_USE_INFO)
618   PetscCall(MatGetInfo(Pmat, MAT_GLOBAL_SUM, &info)); /* global reduction */
619   nnz0   = info.nz_used;
620   nnztot = info.nz_used;
621 #endif
622   PetscCall(PetscInfo(pc, "%s: level %d) N=%" PetscInt_FMT ", n data rows=%" PetscInt_FMT ", n data cols=%" PetscInt_FMT ", nnz/row (ave)=%" PetscInt_FMT ", block size %d, np=%d\n", ((PetscObject)pc)->prefix, 0, M, pc_gamg->data_cell_rows, pc_gamg->data_cell_cols, (PetscInt)(nnz0 / (PetscReal)M + 0.5), (int)bs, size));
623 
624   /* Get A_i and R_i */
625   for (level = 0, Aarr[0] = Pmat, nactivepe = size; level < (pc_gamg->Nlevels - 1) && (level == 0 || M > pc_gamg->coarse_eq_limit); level++) {
626     pc_gamg->current_level = level;
627     PetscCheck(level < PETSC_MG_MAXLEVELS - 2, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Too many levels %" PetscInt_FMT, level + 1);
628     level1 = level + 1;
629 #if defined(GAMG_STAGES)
630     if (!gamg_stages[level]) {
631       char str[32];
632       PetscCall(PetscSNPrintf(str, PETSC_STATIC_ARRAY_LENGTH(str), "GAMG Level %d", (int)level));
633       PetscCall(PetscLogStageRegister(str, &gamg_stages[level]));
634     }
635     PetscCall(PetscLogStagePush(gamg_stages[level]));
636 #endif
637     /* construct prolongator - Parr[level1] */
638     if (level == 0 && pc_gamg->injection_index_size > 0) {
639       Mat      Prol;
640       MatType  mtype;
641       PetscInt prol_m, prol_n, Prol_N = (M / bs) * pc_gamg->injection_index_size, Istart, Iend, nn, row;
642       PetscCall(PetscInfo(pc, "Create fine grid injection space prolongation %" PetscInt_FMT " x %" PetscInt_FMT ". %s\n", M, Prol_N, pc_gamg->data ? "delete null space data" : ""));
643       PetscCall(MatGetOwnershipRange(Pmat, &Istart, &Iend));
644       PetscCall(MatGetLocalSize(Pmat, &prol_m, NULL)); // rows m x n
645       prol_n = (prol_m / bs) * pc_gamg->injection_index_size;
646       PetscCheck(pc_gamg->injection_index_size < bs, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "Injection size %d must be less that block size %d", (int)pc_gamg->injection_index_size, (int)bs);
647       PetscCall(MatGetType(Pmat, &mtype));
648       PetscCall(MatCreate(PetscObjectComm((PetscObject)pc), &Prol));
649       PetscCall(MatSetBlockSizes(Prol, bs, pc_gamg->injection_index_size));
650       PetscCall(MatSetSizes(Prol, prol_m, prol_n, M, Prol_N));
651       PetscCall(MatSetType(Prol, mtype));
652 #if PetscDefined(HAVE_DEVICE)
653       PetscBool flg;
654       PetscCall(MatBoundToCPU(Pmat, &flg));
655       PetscCall(MatBindToCPU(Prol, flg));
656       if (flg) PetscCall(MatSetBindingPropagates(Prol, PETSC_TRUE));
657 #endif
658       PetscCall(MatSeqAIJSetPreallocation(Prol, 1, NULL));
659       PetscCall(MatMPIAIJSetPreallocation(Prol, 1, NULL, 0, NULL));
660       // set I \kron [1, 1, ... ]^T
661       for (PetscInt ii = Istart, col = (Istart / bs) * pc_gamg->injection_index_size; ii < Iend; ii += bs) {
662         const PetscScalar one = 1;
663         for (PetscInt jj = 0; jj < pc_gamg->injection_index_size; jj++, col++) {
664           PetscInt row = ii + pc_gamg->injection_index[jj];
665           PetscCall(MatSetValues(Prol, 1, &row, 1, &col, &one, INSERT_VALUES));
666         }
667       }
668       PetscCall(MatAssemblyBegin(Prol, MAT_FINAL_ASSEMBLY));
669       PetscCall(MatAssemblyEnd(Prol, MAT_FINAL_ASSEMBLY));
670       PetscCall(MatViewFromOptions(Prol, NULL, "-mat_view_injection"));
671       PetscCall(MatGetBlockSizes(Prol, NULL, &cr_bs)); // column size
672       Parr[level1] = Prol;
673       // can not deal with null space -- with array of 'injection cols' we could take 'injection rows and 'injection cols' to 'data'
674       if (pc_gamg->data) {
675         pc_gamg->data_cell_cols      = pc_gamg->injection_index_size;
676         pc_gamg->data_cell_rows      = pc_gamg->injection_index_size;
677         pc_gamg->orig_data_cell_cols = 0;
678         pc_gamg->orig_data_cell_rows = 0;
679         PetscCall(PetscFree(pc_gamg->data));
680         pc_gamg->data_sz = pc_gamg->injection_index_size * prol_n;
681         PetscCall(PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data));
682         for (row = nn = 0; row < prol_n; row += pc_gamg->injection_index_size) {
683           for (int jj = 0; jj < pc_gamg->injection_index_size; jj++) {
684             int idx = row * pc_gamg->injection_index_size + jj * pc_gamg->injection_index_size;
685             for (int kk = 0; kk < pc_gamg->injection_index_size; kk++, nn++) { pc_gamg->data[idx + kk] = (jj == kk) ? 1 : 0; }
686           }
687         }
688         PetscCheck(nn == pc_gamg->data_sz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "nn != pc_gamg->data_sz %" PetscInt_FMT " %" PetscInt_FMT, pc_gamg->data_sz, nn);
689       }
690     } else {
691       Mat               Gmat, mat;
692       PetscCoarsenData *agg_lists;
693       Mat               Prol11;
694 
695       PetscCall(PCGAMGCreateGraph(pc, Aarr[level], &Gmat));
696       PetscCall(pc_gamg->ops->coarsen(pc, &Gmat, &agg_lists)); // Gmat may have ghosts for QR aggregates not in matrix
697       PetscCall(PetscCDGetMat(agg_lists, &mat));
698       if (!mat) PetscCall(PetscCDSetMat(agg_lists, Gmat));
699       PetscCall(pc_gamg->ops->prolongator(pc, Aarr[level], agg_lists, &Prol11));
700       /* could have failed to create new level */
701       if (Prol11) {
702         const char *prefix;
703         char        addp[32];
704 
705         /* get new block size of coarse matrices */
706         PetscCall(MatGetBlockSizes(Prol11, NULL, &cr_bs)); // column size
707 
708         if (pc_gamg->ops->optprolongator) {
709           /* smooth */
710           PetscCall(pc_gamg->ops->optprolongator(pc, Aarr[level], &Prol11));
711         }
712 
713         if (pc_gamg->use_aggs_in_asm) {
714           PetscInt bs;
715           PetscCall(MatGetBlockSizes(Prol11, &bs, NULL)); // row block size
716           PetscCall(PetscCDGetASMBlocks(agg_lists, bs, &nASMBlocksArr[level], &ASMLocalIDsArr[level]));
717           PetscCall(PetscInfo(pc, "%d: %" PetscInt_FMT " ASM local domains,  bs = %d\n", (int)level, nASMBlocksArr[level], (int)bs));
718         } else if (pc_gamg->asm_hem_aggs) {
719           MatCoarsen  crs;
720           const char *prefix;
721           PetscInt    bs;
722           PetscCall(PetscCDGetMat(agg_lists, &mat));
723           if (mat == Gmat) PetscCall(PetscCDClearMat(agg_lists)); // take the Mat away from the list (yuck)
724           PetscCall(PetscCDDestroy(agg_lists));
725           PetscCall(PetscInfo(pc, "HEM ASM passes = %d\n", (int)pc_gamg->asm_hem_aggs));
726           PetscCall(MatCoarsenCreate(PetscObjectComm((PetscObject)pc), &crs));
727           PetscCall(PetscObjectGetOptionsPrefix((PetscObject)pc, &prefix));
728           PetscCall(PetscObjectSetOptionsPrefix((PetscObject)crs, prefix));
729           PetscCall(MatCoarsenSetFromOptions(crs)); // get strength args
730           PetscCall(MatCoarsenSetType(crs, MATCOARSENHEM));
731           PetscCall(MatCoarsenSetMaximumIterations(crs, pc_gamg->asm_hem_aggs));
732           PetscCall(MatCoarsenSetAdjacency(crs, Gmat));
733           PetscCall(MatCoarsenSetStrictAggs(crs, PETSC_TRUE));
734           PetscCall(MatCoarsenApply(crs));
735           PetscCall(MatCoarsenViewFromOptions(crs, NULL, "-agg_hem_mat_coarsen_view"));
736           PetscCall(MatCoarsenGetData(crs, &agg_lists)); /* output */
737           PetscCall(MatCoarsenDestroy(&crs));
738           // create aggregates
739           PetscCall(MatGetBlockSizes(Aarr[level], &bs, NULL)); // row block size
740           PetscCall(PetscCDGetASMBlocks(agg_lists, bs, &nASMBlocksArr[level], &ASMLocalIDsArr[level]));
741         }
742         PetscCall(PCGetOptionsPrefix(pc, &prefix));
743         PetscCall(MatSetOptionsPrefix(Prol11, prefix));
744         PetscCall(PetscSNPrintf(addp, sizeof(addp), "pc_gamg_prolongator_%d_", (int)level));
745         PetscCall(MatAppendOptionsPrefix(Prol11, addp));
746         /* Always generate the transpose with CUDA
747            Such behaviour can be adapted with -pc_gamg_prolongator_ prefixed options */
748         PetscCall(MatSetOption(Prol11, MAT_FORM_EXPLICIT_TRANSPOSE, PETSC_TRUE));
749         PetscCall(MatSetFromOptions(Prol11));
750         Parr[level1] = Prol11;
751       } else Parr[level1] = NULL; /* failed to coarsen */
752       PetscCall(PetscCDGetMat(agg_lists, &mat));
753       if (mat == Gmat) PetscCall(PetscCDClearMat(agg_lists)); // take the Mat away from the list (yuck)
754       PetscCall(MatDestroy(&Gmat));
755       PetscCall(PetscCDDestroy(agg_lists));
756     }                               /* construct prolongator scope */
757     if (level == 0) Aarr[0] = Pmat; /* use Pmat for finest level setup */
758     if (!Parr[level1]) {            /* failed to coarsen */
759       PetscCall(PetscInfo(pc, "%s: Stop gridding, level %" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, level));
760 #if defined(GAMG_STAGES)
761       PetscCall(PetscLogStagePop());
762 #endif
763       break;
764     }
765     PetscCall(MatGetSize(Parr[level1], &M, &N)); /* N is next M, a loop test variables */
766     PetscCheck(!is_last, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Is last ?");
767     if (N <= pc_gamg->coarse_eq_limit) is_last = PETSC_TRUE;
768     if (level1 == pc_gamg->Nlevels - 1) is_last = PETSC_TRUE;
769     PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_LEVEL], 0, 0, 0, 0));
770     PetscCall(pc_gamg->ops->createlevel(pc, Aarr[level], cr_bs, &Parr[level1], &Aarr[level1], &nactivepe, NULL, is_last));
771     PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_LEVEL], 0, 0, 0, 0));
772 
773     PetscCall(MatGetSize(Aarr[level1], &M, &N)); /* M is loop test variables */
774 #if defined(PETSC_USE_INFO)
775     PetscCall(MatGetInfo(Aarr[level1], MAT_GLOBAL_SUM, &info));
776     nnztot += info.nz_used;
777 #endif
778     PetscCall(PetscInfo(pc, "%s: %d) N=%" PetscInt_FMT ", n data cols=%" PetscInt_FMT ", nnz/row (ave)=%" PetscInt_FMT ", %d active pes\n", ((PetscObject)pc)->prefix, (int)level1, M, pc_gamg->data_cell_cols, (PetscInt)(info.nz_used / (PetscReal)M), nactivepe));
779 
780 #if defined(GAMG_STAGES)
781     PetscCall(PetscLogStagePop());
782 #endif
783     /* stop if one node or one proc -- could pull back for singular problems */
784     if ((pc_gamg->data_cell_cols && M / pc_gamg->data_cell_cols < 2) || (!pc_gamg->data_cell_cols && M / bs < 2)) {
785       PetscCall(PetscInfo(pc, "%s: HARD stop of coarsening on level %" PetscInt_FMT ".  Grid too small: %" PetscInt_FMT " block nodes\n", ((PetscObject)pc)->prefix, level, M / bs));
786       level++;
787       break;
788     }
789   } /* levels */
790   PetscCall(PetscFree(pc_gamg->data));
791 
792   PetscCall(PetscInfo(pc, "%s: %" PetscInt_FMT " levels, operator complexity = %g\n", ((PetscObject)pc)->prefix, level + 1, nnztot / nnz0));
793   pc_gamg->Nlevels = level + 1;
794   fine_level       = level;
795   PetscCall(PCMGSetLevels(pc, pc_gamg->Nlevels, NULL));
796 
797   if (pc_gamg->Nlevels > 1) { /* don't setup MG if one level */
798 
799     /* set default smoothers & set operators */
800     for (lidx = 1, level = pc_gamg->Nlevels - 2; lidx <= fine_level; lidx++, level--) {
801       KSP smoother;
802       PC  subpc;
803 
804       PetscCall(PCMGGetSmoother(pc, lidx, &smoother));
805       PetscCall(KSPGetPC(smoother, &subpc));
806 
807       PetscCall(KSPSetNormType(smoother, KSP_NORM_NONE));
808       /* set ops */
809       PetscCall(KSPSetOperators(smoother, Aarr[level], Aarr[level]));
810       PetscCall(PCMGSetInterpolation(pc, lidx, Parr[level + 1]));
811 
812       /* set defaults */
813       PetscCall(KSPSetType(smoother, KSPCHEBYSHEV));
814 
815       /* set blocks for ASM smoother that uses the 'aggregates' */
816       if (pc_gamg->use_aggs_in_asm || pc_gamg->asm_hem_aggs) {
817         PetscInt sz;
818         IS      *iss;
819 
820         sz  = nASMBlocksArr[level];
821         iss = ASMLocalIDsArr[level];
822         PetscCall(PCSetType(subpc, PCASM));
823         PetscCall(PCASMSetOverlap(subpc, 0));
824         PetscCall(PCASMSetType(subpc, PC_ASM_BASIC));
825         if (!sz) {
826           IS is;
827           PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is));
828           PetscCall(PCASMSetLocalSubdomains(subpc, 1, NULL, &is));
829           PetscCall(ISDestroy(&is));
830         } else {
831           PetscInt kk;
832           PetscCall(PCASMSetLocalSubdomains(subpc, sz, iss, NULL));
833           for (kk = 0; kk < sz; kk++) PetscCall(ISDestroy(&iss[kk]));
834           PetscCall(PetscFree(iss));
835         }
836         ASMLocalIDsArr[level] = NULL;
837         nASMBlocksArr[level]  = 0;
838       } else {
839         PetscCall(PCSetType(subpc, PCJACOBI));
840       }
841     }
842     {
843       /* coarse grid */
844       KSP      smoother, *k2;
845       PC       subpc, pc2;
846       PetscInt ii, first;
847       Mat      Lmat = Aarr[(level = pc_gamg->Nlevels - 1)];
848       lidx          = 0;
849 
850       PetscCall(PCMGGetSmoother(pc, lidx, &smoother));
851       PetscCall(KSPSetOperators(smoother, Lmat, Lmat));
852       if (!pc_gamg->use_parallel_coarse_grid_solver) {
853         PetscCall(KSPSetNormType(smoother, KSP_NORM_NONE));
854         PetscCall(KSPGetPC(smoother, &subpc));
855         PetscCall(PCSetType(subpc, PCBJACOBI));
856         PetscCall(PCSetUp(subpc));
857         PetscCall(PCBJacobiGetSubKSP(subpc, &ii, &first, &k2));
858         PetscCheck(ii == 1, PETSC_COMM_SELF, PETSC_ERR_PLIB, "ii %" PetscInt_FMT " is not one", ii);
859         PetscCall(KSPGetPC(k2[0], &pc2));
860         PetscCall(PCSetType(pc2, PCLU));
861         PetscCall(PCFactorSetShiftType(pc2, MAT_SHIFT_INBLOCKS));
862         PetscCall(KSPSetTolerances(k2[0], PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT, 1));
863         PetscCall(KSPSetType(k2[0], KSPPREONLY));
864       }
865     }
866 
867     /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */
868     PetscObjectOptionsBegin((PetscObject)pc);
869     PetscCall(PCSetFromOptions_MG(pc, PetscOptionsObject));
870     PetscOptionsEnd();
871     PetscCall(PCMGSetGalerkin(pc, PC_MG_GALERKIN_EXTERNAL));
872 
873     /* set cheby eigen estimates from SA to use in the solver */
874     if (pc_gamg->use_sa_esteig) {
875       for (lidx = 1, level = pc_gamg->Nlevels - 2; level >= 0; lidx++, level--) {
876         KSP       smoother;
877         PetscBool ischeb;
878 
879         PetscCall(PCMGGetSmoother(pc, lidx, &smoother));
880         PetscCall(PetscObjectTypeCompare((PetscObject)smoother, KSPCHEBYSHEV, &ischeb));
881         if (ischeb) {
882           KSP_Chebyshev *cheb = (KSP_Chebyshev *)smoother->data;
883 
884           // The command line will override these settings because KSPSetFromOptions is called in PCSetUp_MG
885           if (mg->max_eigen_DinvA[level] > 0) {
886             // SA uses Jacobi for P; we use SA estimates if the smoother is also Jacobi or if the user explicitly requested it.
887             // TODO: This should test whether it's the same Jacobi variant (DIAG, ROWSUM, etc.)
888             PetscReal emax, emin;
889 
890             emin = mg->min_eigen_DinvA[level];
891             emax = mg->max_eigen_DinvA[level];
892             PetscCall(PetscInfo(pc, "%s: PCSetUp_GAMG: call KSPChebyshevSetEigenvalues on level %" PetscInt_FMT " (N=%" PetscInt_FMT ") with emax = %g emin = %g\n", ((PetscObject)pc)->prefix, level, Aarr[level]->rmap->N, (double)emax, (double)emin));
893             cheb->emin_provided = emin;
894             cheb->emax_provided = emax;
895           }
896         }
897       }
898     }
899 
900     PetscCall(PCSetUp_MG(pc));
901 
902     /* clean up */
903     for (level = 1; level < pc_gamg->Nlevels; level++) {
904       PetscCall(MatDestroy(&Parr[level]));
905       PetscCall(MatDestroy(&Aarr[level]));
906     }
907   } else {
908     KSP smoother;
909 
910     PetscCall(PetscInfo(pc, "%s: One level solver used (system is seen as DD). Using default solver.\n", ((PetscObject)pc)->prefix));
911     PetscCall(PCMGGetSmoother(pc, 0, &smoother));
912     PetscCall(KSPSetOperators(smoother, Aarr[0], Aarr[0]));
913     PetscCall(KSPSetType(smoother, KSPPREONLY));
914     PetscCall(PCSetUp_MG(pc));
915   }
916   PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_SETUP], 0, 0, 0, 0));
917   PetscFunctionReturn(PETSC_SUCCESS);
918 }
919 
920 /*
921  PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner
922    that was created with PCCreate_GAMG().
923 
924    Input Parameter:
925 .  pc - the preconditioner context
926 
927    Application Interface Routine: PCDestroy()
928 */
929 PetscErrorCode PCDestroy_GAMG(PC pc)
930 {
931   PC_MG   *mg      = (PC_MG *)pc->data;
932   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
933 
934   PetscFunctionBegin;
935   PetscCall(PCReset_GAMG(pc));
936   if (pc_gamg->ops->destroy) PetscCall((*pc_gamg->ops->destroy)(pc));
937   PetscCall(PetscFree(pc_gamg->ops));
938   PetscCall(PetscFree(pc_gamg->gamg_type_name));
939   PetscCall(PetscFree(pc_gamg));
940   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetProcEqLim_C", NULL));
941   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseEqLim_C", NULL));
942   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRepartition_C", NULL));
943   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetEigenvalues_C", NULL));
944   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetUseSAEstEig_C", NULL));
945   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRecomputeEstEig_C", NULL));
946   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetReuseInterpolation_C", NULL));
947   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetUseAggs_C", NULL));
948   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetParallelCoarseGridSolve_C", NULL));
949   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCpuPinCoarseGrids_C", NULL));
950   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseGridLayoutType_C", NULL));
951   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThreshold_C", NULL));
952   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRankReductionFactors_C", NULL));
953   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThresholdScale_C", NULL));
954   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetType_C", NULL));
955   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGGetType_C", NULL));
956   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetNlevels_C", NULL));
957   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetHEM_C", NULL));
958   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetInjectionIndices_C", NULL));
959   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetInjectionIndex_C", NULL));
960   PetscCall(PCDestroy_MG(pc));
961   PetscFunctionReturn(PETSC_SUCCESS);
962 }
963 
964 /*@
965   PCGAMGSetProcEqLim - Set number of equations to aim for per process on the coarse grids via processor reduction in `PCGAMG`
966 
967   Logically Collective
968 
969   Input Parameters:
970 + pc - the preconditioner context
971 - n  - the number of equations
972 
973   Options Database Key:
974 . -pc_gamg_process_eq_limit <limit> - set the limit
975 
976   Level: intermediate
977 
978   Note:
979   `PCGAMG` will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit> equations on each process
980   that has degrees of freedom
981 
982 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetCoarseEqLim()`, `PCGAMGSetRankReductionFactors()`, `PCGAMGSetRepartition()`
983 @*/
984 PetscErrorCode PCGAMGSetProcEqLim(PC pc, PetscInt n)
985 {
986   PetscFunctionBegin;
987   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
988   PetscTryMethod(pc, "PCGAMGSetProcEqLim_C", (PC, PetscInt), (pc, n));
989   PetscFunctionReturn(PETSC_SUCCESS);
990 }
991 
992 static PetscErrorCode PCGAMGSetProcEqLim_GAMG(PC pc, PetscInt n)
993 {
994   PC_MG   *mg      = (PC_MG *)pc->data;
995   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
996 
997   PetscFunctionBegin;
998   if (n > 0) pc_gamg->min_eq_proc = n;
999   PetscFunctionReturn(PETSC_SUCCESS);
1000 }
1001 
1002 /*@
1003   PCGAMGSetCoarseEqLim - Set maximum number of equations on the coarsest grid of `PCGAMG`
1004 
1005   Collective
1006 
1007   Input Parameters:
1008 + pc - the preconditioner context
1009 - n  - maximum number of equations to aim for
1010 
1011   Options Database Key:
1012 . -pc_gamg_coarse_eq_limit <limit> - set the limit
1013 
1014   Level: intermediate
1015 
1016   Note:
1017   For example -pc_gamg_coarse_eq_limit 1000 will stop coarsening once the coarse grid
1018   has less than 1000 unknowns.
1019 
1020 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetProcEqLim()`, `PCGAMGSetRankReductionFactors()`, `PCGAMGSetRepartition()`
1021 @*/
1022 PetscErrorCode PCGAMGSetCoarseEqLim(PC pc, PetscInt n)
1023 {
1024   PetscFunctionBegin;
1025   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1026   PetscTryMethod(pc, "PCGAMGSetCoarseEqLim_C", (PC, PetscInt), (pc, n));
1027   PetscFunctionReturn(PETSC_SUCCESS);
1028 }
1029 
1030 static PetscErrorCode PCGAMGSetCoarseEqLim_GAMG(PC pc, PetscInt n)
1031 {
1032   PC_MG   *mg      = (PC_MG *)pc->data;
1033   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1034 
1035   PetscFunctionBegin;
1036   if (n > 0) pc_gamg->coarse_eq_limit = n;
1037   PetscFunctionReturn(PETSC_SUCCESS);
1038 }
1039 
1040 /*@
1041   PCGAMGSetRepartition - Repartition the degrees of freedom across the processors on the coarser grids when reducing the number of MPI ranks to use
1042 
1043   Collective
1044 
1045   Input Parameters:
1046 + pc - the preconditioner context
1047 - n  - `PETSC_TRUE` or `PETSC_FALSE`
1048 
1049   Options Database Key:
1050 . -pc_gamg_repartition <true,false> - turn on the repartitioning
1051 
1052   Level: intermediate
1053 
1054   Note:
1055   This will generally improve the loading balancing of the work on each level
1056 
1057 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetProcEqLim()`, `PCGAMGSetRankReductionFactors()`
1058 @*/
1059 PetscErrorCode PCGAMGSetRepartition(PC pc, PetscBool n)
1060 {
1061   PetscFunctionBegin;
1062   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1063   PetscTryMethod(pc, "PCGAMGSetRepartition_C", (PC, PetscBool), (pc, n));
1064   PetscFunctionReturn(PETSC_SUCCESS);
1065 }
1066 
1067 static PetscErrorCode PCGAMGSetRepartition_GAMG(PC pc, PetscBool n)
1068 {
1069   PC_MG   *mg      = (PC_MG *)pc->data;
1070   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1071 
1072   PetscFunctionBegin;
1073   pc_gamg->repart = n;
1074   PetscFunctionReturn(PETSC_SUCCESS);
1075 }
1076 
1077 /*@
1078   PCGAMGSetUseSAEstEig - Use the eigen estimate from smoothed aggregation for the Chebyshev smoother during the solution process
1079 
1080   Collective
1081 
1082   Input Parameters:
1083 + pc - the preconditioner context
1084 - b  - flag
1085 
1086   Options Database Key:
1087 . -pc_gamg_use_sa_esteig <true,false> - use the eigen estimate
1088 
1089   Level: advanced
1090 
1091   Notes:
1092   Smoothed aggregation constructs the smoothed prolongator $P = (I - \omega D^{-1} A) T$ where $T$ is the tentative prolongator and $D$ is the diagonal of $A$.
1093   Eigenvalue estimates (based on a few `PCCG` or `PCGMRES` iterations) are computed to choose $\omega$ so that this is a stable smoothing operation.
1094   If Chebyshev with Jacobi (diagonal) preconditioning is used for smoothing, then the eigenvalue estimates can be reused during the solution process
1095   This option is only used when the smoother uses Jacobi, and should be turned off if a different `PCJacobiType` is used.
1096   It became default in PETSc 3.17.
1097 
1098 .seealso: [](ch_ksp), `PCGAMG`, `KSPChebyshevSetEigenvalues()`, `KSPChebyshevEstEigSet()`, `PCGAMGSetRecomputeEstEig()`
1099 @*/
1100 PetscErrorCode PCGAMGSetUseSAEstEig(PC pc, PetscBool b)
1101 {
1102   PetscFunctionBegin;
1103   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1104   PetscTryMethod(pc, "PCGAMGSetUseSAEstEig_C", (PC, PetscBool), (pc, b));
1105   PetscFunctionReturn(PETSC_SUCCESS);
1106 }
1107 
1108 static PetscErrorCode PCGAMGSetUseSAEstEig_GAMG(PC pc, PetscBool b)
1109 {
1110   PC_MG   *mg      = (PC_MG *)pc->data;
1111   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1112 
1113   PetscFunctionBegin;
1114   pc_gamg->use_sa_esteig = b;
1115   PetscFunctionReturn(PETSC_SUCCESS);
1116 }
1117 
1118 /*@
1119   PCGAMGSetRecomputeEstEig - Set flag for Chebyshev smoothers to recompute the eigen estimates
1120 
1121   Collective
1122 
1123   Input Parameters:
1124 + pc - the preconditioner context
1125 - b  - flag
1126 
1127   Options Database Key:
1128 . -pc_gamg_recompute_esteig <true> - use the eigen estimate
1129 
1130   Level: advanced
1131 
1132 .seealso: [](ch_ksp), `PCGAMG`, `KSPChebyshevSetEigenvalues()`, `KSPChebyshevEstEigSet()`
1133 @*/
1134 PetscErrorCode PCGAMGSetRecomputeEstEig(PC pc, PetscBool b)
1135 {
1136   PetscFunctionBegin;
1137   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1138   PetscTryMethod(pc, "PCGAMGSetRecomputeEstEig_C", (PC, PetscBool), (pc, b));
1139   PetscFunctionReturn(PETSC_SUCCESS);
1140 }
1141 
1142 static PetscErrorCode PCGAMGSetRecomputeEstEig_GAMG(PC pc, PetscBool b)
1143 {
1144   PC_MG   *mg      = (PC_MG *)pc->data;
1145   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1146 
1147   PetscFunctionBegin;
1148   pc_gamg->recompute_esteig = b;
1149   PetscFunctionReturn(PETSC_SUCCESS);
1150 }
1151 
1152 /*@
1153   PCGAMGSetEigenvalues - Set WHAT eigenvalues WHY?
1154 
1155   Collective
1156 
1157   Input Parameters:
1158 + pc   - the preconditioner context
1159 . emax - max eigenvalue
1160 - emin - min eigenvalue
1161 
1162   Options Database Key:
1163 . -pc_gamg_eigenvalues <emin,emax> - estimates of the eigenvalues
1164 
1165   Level: intermediate
1166 
1167 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetUseSAEstEig()`
1168 @*/
1169 PetscErrorCode PCGAMGSetEigenvalues(PC pc, PetscReal emax, PetscReal emin)
1170 {
1171   PetscFunctionBegin;
1172   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1173   PetscTryMethod(pc, "PCGAMGSetEigenvalues_C", (PC, PetscReal, PetscReal), (pc, emax, emin));
1174   PetscFunctionReturn(PETSC_SUCCESS);
1175 }
1176 
1177 static PetscErrorCode PCGAMGSetEigenvalues_GAMG(PC pc, PetscReal emax, PetscReal emin)
1178 {
1179   PC_MG   *mg      = (PC_MG *)pc->data;
1180   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1181 
1182   PetscFunctionBegin;
1183   PetscCheck(emax > emin, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "Maximum eigenvalue must be larger than minimum: max %g min %g", (double)emax, (double)emin);
1184   PetscCheck(emax * emin > 0.0, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "Both eigenvalues must be of the same sign: max %g min %g", (double)emax, (double)emin);
1185   pc_gamg->emax = emax;
1186   pc_gamg->emin = emin;
1187   PetscFunctionReturn(PETSC_SUCCESS);
1188 }
1189 
1190 /*@
1191   PCGAMGSetReuseInterpolation - Reuse prolongation when rebuilding a `PCGAMG` algebraic multigrid preconditioner
1192 
1193   Collective
1194 
1195   Input Parameters:
1196 + pc - the preconditioner context
1197 - n  - `PETSC_TRUE` or `PETSC_FALSE`
1198 
1199   Options Database Key:
1200 . -pc_gamg_reuse_interpolation <true,false> - reuse the previous interpolation
1201 
1202   Level: intermediate
1203 
1204   Note:
1205   May negatively affect the convergence rate of the method on new matrices if the matrix entries change a great deal, but allows
1206   rebuilding the preconditioner quicker.
1207 
1208 .seealso: [](ch_ksp), `PCGAMG`
1209 @*/
1210 PetscErrorCode PCGAMGSetReuseInterpolation(PC pc, PetscBool n)
1211 {
1212   PetscFunctionBegin;
1213   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1214   PetscTryMethod(pc, "PCGAMGSetReuseInterpolation_C", (PC, PetscBool), (pc, n));
1215   PetscFunctionReturn(PETSC_SUCCESS);
1216 }
1217 
1218 static PetscErrorCode PCGAMGSetReuseInterpolation_GAMG(PC pc, PetscBool n)
1219 {
1220   PC_MG   *mg      = (PC_MG *)pc->data;
1221   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1222 
1223   PetscFunctionBegin;
1224   pc_gamg->reuse_prol = n;
1225   PetscFunctionReturn(PETSC_SUCCESS);
1226 }
1227 
1228 /*@
1229   PCGAMGASMSetUseAggs - Have the `PCGAMG` smoother on each level use the aggregates defined by the coarsening process as the subdomains for the additive Schwarz preconditioner
1230   used as the smoother
1231 
1232   Collective
1233 
1234   Input Parameters:
1235 + pc  - the preconditioner context
1236 - flg - `PETSC_TRUE` to use aggregates, `PETSC_FALSE` to not
1237 
1238   Options Database Key:
1239 . -pc_gamg_asm_use_agg <true,false> - use aggregates to define the additive Schwarz subdomains
1240 
1241   Level: intermediate
1242 
1243 .seealso: [](ch_ksp), `PCGAMG`, `PCASM`, `PCSetType`
1244 @*/
1245 PetscErrorCode PCGAMGASMSetUseAggs(PC pc, PetscBool flg)
1246 {
1247   PetscFunctionBegin;
1248   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1249   PetscTryMethod(pc, "PCGAMGASMSetUseAggs_C", (PC, PetscBool), (pc, flg));
1250   PetscFunctionReturn(PETSC_SUCCESS);
1251 }
1252 
1253 static PetscErrorCode PCGAMGASMSetUseAggs_GAMG(PC pc, PetscBool flg)
1254 {
1255   PC_MG   *mg      = (PC_MG *)pc->data;
1256   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1257 
1258   PetscFunctionBegin;
1259   pc_gamg->use_aggs_in_asm = flg;
1260   PetscFunctionReturn(PETSC_SUCCESS);
1261 }
1262 
1263 /*@
1264   PCGAMGSetParallelCoarseGridSolve - allow a parallel coarse grid solver
1265 
1266   Collective
1267 
1268   Input Parameters:
1269 + pc  - the preconditioner context
1270 - flg - `PETSC_TRUE` to not force coarse grid onto one processor
1271 
1272   Options Database Key:
1273 . -pc_gamg_parallel_coarse_grid_solver - use a parallel coarse grid direct solver
1274 
1275   Level: intermediate
1276 
1277 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetCoarseGridLayoutType()`, `PCGAMGSetCpuPinCoarseGrids()`
1278 @*/
1279 PetscErrorCode PCGAMGSetParallelCoarseGridSolve(PC pc, PetscBool flg)
1280 {
1281   PetscFunctionBegin;
1282   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1283   PetscTryMethod(pc, "PCGAMGSetParallelCoarseGridSolve_C", (PC, PetscBool), (pc, flg));
1284   PetscFunctionReturn(PETSC_SUCCESS);
1285 }
1286 
1287 static PetscErrorCode PCGAMGSetParallelCoarseGridSolve_GAMG(PC pc, PetscBool flg)
1288 {
1289   PC_MG   *mg      = (PC_MG *)pc->data;
1290   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1291 
1292   PetscFunctionBegin;
1293   pc_gamg->use_parallel_coarse_grid_solver = flg;
1294   PetscFunctionReturn(PETSC_SUCCESS);
1295 }
1296 
1297 /*@
1298   PCGAMGSetCpuPinCoarseGrids - pin the coarse grids created in `PCGAMG` to run only on the CPU since the problems may be too small to run efficiently on the GPUs
1299 
1300   Collective
1301 
1302   Input Parameters:
1303 + pc  - the preconditioner context
1304 - flg - `PETSC_TRUE` to pin coarse grids to the CPU
1305 
1306   Options Database Key:
1307 . -pc_gamg_cpu_pin_coarse_grids - pin the coarse grids to the CPU
1308 
1309   Level: intermediate
1310 
1311 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetCoarseGridLayoutType()`, `PCGAMGSetParallelCoarseGridSolve()`
1312 @*/
1313 PetscErrorCode PCGAMGSetCpuPinCoarseGrids(PC pc, PetscBool flg)
1314 {
1315   PetscFunctionBegin;
1316   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1317   PetscTryMethod(pc, "PCGAMGSetCpuPinCoarseGrids_C", (PC, PetscBool), (pc, flg));
1318   PetscFunctionReturn(PETSC_SUCCESS);
1319 }
1320 
1321 static PetscErrorCode PCGAMGSetCpuPinCoarseGrids_GAMG(PC pc, PetscBool flg)
1322 {
1323   PC_MG   *mg      = (PC_MG *)pc->data;
1324   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1325 
1326   PetscFunctionBegin;
1327   pc_gamg->cpu_pin_coarse_grids = flg;
1328   PetscFunctionReturn(PETSC_SUCCESS);
1329 }
1330 
1331 /*@
1332   PCGAMGSetCoarseGridLayoutType - place coarse grids on processors with natural order (compact type)
1333 
1334   Collective
1335 
1336   Input Parameters:
1337 + pc  - the preconditioner context
1338 - flg - `PCGAMGLayoutType` type, either `PCGAMG_LAYOUT_COMPACT` or `PCGAMG_LAYOUT_SPREAD`
1339 
1340   Options Database Key:
1341 . -pc_gamg_coarse_grid_layout_type - place the coarse grids with natural ordering
1342 
1343   Level: intermediate
1344 
1345 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetParallelCoarseGridSolve()`, `PCGAMGSetCpuPinCoarseGrids()`, `PCGAMGLayoutType`, `PCGAMG_LAYOUT_COMPACT`, `PCGAMG_LAYOUT_SPREAD`
1346 @*/
1347 PetscErrorCode PCGAMGSetCoarseGridLayoutType(PC pc, PCGAMGLayoutType flg)
1348 {
1349   PetscFunctionBegin;
1350   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1351   PetscTryMethod(pc, "PCGAMGSetCoarseGridLayoutType_C", (PC, PCGAMGLayoutType), (pc, flg));
1352   PetscFunctionReturn(PETSC_SUCCESS);
1353 }
1354 
1355 static PetscErrorCode PCGAMGSetCoarseGridLayoutType_GAMG(PC pc, PCGAMGLayoutType flg)
1356 {
1357   PC_MG   *mg      = (PC_MG *)pc->data;
1358   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1359 
1360   PetscFunctionBegin;
1361   pc_gamg->layout_type = flg;
1362   PetscFunctionReturn(PETSC_SUCCESS);
1363 }
1364 
1365 /*@
1366   PCGAMGSetNlevels -  Sets the maximum number of levels `PCGAMG` will use
1367 
1368   Collective
1369 
1370   Input Parameters:
1371 + pc - the preconditioner
1372 - n  - the maximum number of levels to use
1373 
1374   Options Database Key:
1375 . -pc_mg_levels <n> - set the maximum number of levels to allow
1376 
1377   Level: intermediate
1378 
1379   Developer Notes:
1380   Should be called `PCGAMGSetMaximumNumberlevels()` and possible be shared with `PCMG`
1381 
1382 .seealso: [](ch_ksp), `PCGAMG`
1383 @*/
1384 PetscErrorCode PCGAMGSetNlevels(PC pc, PetscInt n)
1385 {
1386   PetscFunctionBegin;
1387   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1388   PetscTryMethod(pc, "PCGAMGSetNlevels_C", (PC, PetscInt), (pc, n));
1389   PetscFunctionReturn(PETSC_SUCCESS);
1390 }
1391 
1392 static PetscErrorCode PCGAMGSetNlevels_GAMG(PC pc, PetscInt n)
1393 {
1394   PC_MG   *mg      = (PC_MG *)pc->data;
1395   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1396 
1397   PetscFunctionBegin;
1398   pc_gamg->Nlevels = n;
1399   PetscFunctionReturn(PETSC_SUCCESS);
1400 }
1401 
1402 /*@
1403   PCGAMGASMSetHEM -  Sets the number of HEM matching passed
1404 
1405   Collective
1406 
1407   Input Parameters:
1408 + pc - the preconditioner
1409 - n  - number of HEM matching passed to construct ASM subdomains
1410 
1411   Options Database Key:
1412 . -pc_gamg_asm_hem <n> - set the number of HEM matching passed
1413 
1414   Level: intermediate
1415 
1416   Developer Notes:
1417   Should be called `PCGAMGSetMaximumNumberlevels()` and possible be shared with `PCMG`
1418 
1419 .seealso: [](ch_ksp), `PCGAMG`
1420 @*/
1421 PetscErrorCode PCGAMGASMSetHEM(PC pc, PetscInt n)
1422 {
1423   PetscFunctionBegin;
1424   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1425   PetscTryMethod(pc, "PCGAMGASMSetHEM_C", (PC, PetscInt), (pc, n));
1426   PetscFunctionReturn(PETSC_SUCCESS);
1427 }
1428 
1429 static PetscErrorCode PCGAMGASMSetHEM_GAMG(PC pc, PetscInt n)
1430 {
1431   PC_MG   *mg      = (PC_MG *)pc->data;
1432   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1433 
1434   PetscFunctionBegin;
1435   pc_gamg->asm_hem_aggs = n;
1436   PetscFunctionReturn(PETSC_SUCCESS);
1437 }
1438 
1439 /*@
1440   PCGAMGSetThreshold - Relative threshold to use for dropping edges in aggregation graph
1441 
1442   Not Collective
1443 
1444   Input Parameters:
1445 + pc - the preconditioner context
1446 . v  - array of threshold values for finest n levels; 0.0 means keep all nonzero entries in the graph; negative means keep even zero entries in the graph
1447 - n  - number of threshold values provided in array
1448 
1449   Options Database Key:
1450 . -pc_gamg_threshold <threshold> - the threshold to drop edges
1451 
1452   Level: intermediate
1453 
1454   Notes:
1455   Increasing the threshold decreases the rate of coarsening. Conversely reducing the threshold increases the rate of coarsening (aggressive coarsening) and thereby reduces the complexity of the coarse grids, and generally results in slower solver converge rates. Reducing coarse grid complexity reduced the complexity of Galerkin coarse grid construction considerably.
1456   Before coarsening or aggregating the graph, `PCGAMG` removes small values from the graph with this threshold, and thus reducing the coupling in the graph and a different (perhaps better) coarser set of points.
1457 
1458   If `n` is less than the total number of coarsenings (see `PCGAMGSetNlevels()`), then threshold scaling (see `PCGAMGSetThresholdScale()`) is used for each successive coarsening.
1459   In this case, `PCGAMGSetThresholdScale()` must be called before `PCGAMGSetThreshold()`.
1460   If `n` is greater than the total number of levels, the excess entries in threshold will not be used.
1461 
1462 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetAggressiveLevels()`, `PCGAMGMISkSetAggressive()`, `PCGAMGSetMinDegreeOrderingMISk()`, `PCGAMGSetThresholdScale()`
1463 @*/
1464 PetscErrorCode PCGAMGSetThreshold(PC pc, PetscReal v[], PetscInt n)
1465 {
1466   PetscFunctionBegin;
1467   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1468   if (n) PetscAssertPointer(v, 2);
1469   PetscTryMethod(pc, "PCGAMGSetThreshold_C", (PC, PetscReal[], PetscInt), (pc, v, n));
1470   PetscFunctionReturn(PETSC_SUCCESS);
1471 }
1472 
1473 static PetscErrorCode PCGAMGSetThreshold_GAMG(PC pc, PetscReal v[], PetscInt n)
1474 {
1475   PC_MG   *mg      = (PC_MG *)pc->data;
1476   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1477   PetscInt i;
1478   PetscFunctionBegin;
1479   for (i = 0; i < PetscMin(n, PETSC_MG_MAXLEVELS); i++) pc_gamg->threshold[i] = v[i];
1480   for (; i < PETSC_MG_MAXLEVELS; i++) pc_gamg->threshold[i] = pc_gamg->threshold[i - 1] * pc_gamg->threshold_scale;
1481   PetscFunctionReturn(PETSC_SUCCESS);
1482 }
1483 
1484 /*@
1485   PCGAMGSetRankReductionFactors - Set a manual schedule for MPI rank reduction on coarse grids
1486 
1487   Collective
1488 
1489   Input Parameters:
1490 + pc - the preconditioner context
1491 . v  - array of reduction factors. 0 for first value forces a reduction to one process/device on first level in CUDA
1492 - n  - number of values provided in array
1493 
1494   Options Database Key:
1495 . -pc_gamg_rank_reduction_factors <factors> - provide the schedule
1496 
1497   Level: intermediate
1498 
1499 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetProcEqLim()`, `PCGAMGSetCoarseEqLim()`
1500 @*/
1501 PetscErrorCode PCGAMGSetRankReductionFactors(PC pc, PetscInt v[], PetscInt n)
1502 {
1503   PetscFunctionBegin;
1504   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1505   if (n) PetscAssertPointer(v, 2);
1506   PetscTryMethod(pc, "PCGAMGSetRankReductionFactors_C", (PC, PetscInt[], PetscInt), (pc, v, n));
1507   PetscFunctionReturn(PETSC_SUCCESS);
1508 }
1509 
1510 static PetscErrorCode PCGAMGSetRankReductionFactors_GAMG(PC pc, PetscInt v[], PetscInt n)
1511 {
1512   PC_MG   *mg      = (PC_MG *)pc->data;
1513   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1514   PetscInt i;
1515   PetscFunctionBegin;
1516   for (i = 0; i < PetscMin(n, PETSC_MG_MAXLEVELS); i++) pc_gamg->level_reduction_factors[i] = v[i];
1517   for (; i < PETSC_MG_MAXLEVELS; i++) pc_gamg->level_reduction_factors[i] = -1; /* 0 stop putting one process/device on first level */
1518   PetscFunctionReturn(PETSC_SUCCESS);
1519 }
1520 
1521 /*@
1522   PCGAMGSetThresholdScale - Relative threshold reduction at each level
1523 
1524   Not Collective
1525 
1526   Input Parameters:
1527 + pc - the preconditioner context
1528 - v  - the threshold value reduction, usually < 1.0
1529 
1530   Options Database Key:
1531 . -pc_gamg_threshold_scale <v> - set the relative threshold reduction on each level
1532 
1533   Level: advanced
1534 
1535   Note:
1536   The initial threshold (for an arbitrary number of levels starting from the finest) can be set with `PCGAMGSetThreshold()`.
1537   This scaling is used for each subsequent coarsening, but must be called before `PCGAMGSetThreshold()`.
1538 
1539 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetThreshold()`
1540 @*/
1541 PetscErrorCode PCGAMGSetThresholdScale(PC pc, PetscReal v)
1542 {
1543   PetscFunctionBegin;
1544   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1545   PetscTryMethod(pc, "PCGAMGSetThresholdScale_C", (PC, PetscReal), (pc, v));
1546   PetscFunctionReturn(PETSC_SUCCESS);
1547 }
1548 
1549 static PetscErrorCode PCGAMGSetThresholdScale_GAMG(PC pc, PetscReal v)
1550 {
1551   PC_MG   *mg      = (PC_MG *)pc->data;
1552   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1553   PetscFunctionBegin;
1554   pc_gamg->threshold_scale = v;
1555   PetscFunctionReturn(PETSC_SUCCESS);
1556 }
1557 
1558 /*@C
1559   PCGAMGSetType - Set the type of algorithm `PCGAMG` should use
1560 
1561   Collective
1562 
1563   Input Parameters:
1564 + pc   - the preconditioner context
1565 - type - `PCGAMGAGG`, `PCGAMGGEO`, or `PCGAMGCLASSICAL`
1566 
1567   Options Database Key:
1568 . -pc_gamg_type <agg,geo,classical> - type of algebraic multigrid to apply - only agg supported
1569 
1570   Level: intermediate
1571 
1572 .seealso: [](ch_ksp), `PCGAMGGetType()`, `PCGAMG`, `PCGAMGType`
1573 @*/
1574 PetscErrorCode PCGAMGSetType(PC pc, PCGAMGType type)
1575 {
1576   PetscFunctionBegin;
1577   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1578   PetscTryMethod(pc, "PCGAMGSetType_C", (PC, PCGAMGType), (pc, type));
1579   PetscFunctionReturn(PETSC_SUCCESS);
1580 }
1581 
1582 /*@C
1583   PCGAMGGetType - Get the type of algorithm `PCGAMG` will use
1584 
1585   Collective
1586 
1587   Input Parameter:
1588 . pc - the preconditioner context
1589 
1590   Output Parameter:
1591 . type - the type of algorithm used
1592 
1593   Level: intermediate
1594 
1595 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetType()`, `PCGAMGType`
1596 @*/
1597 PetscErrorCode PCGAMGGetType(PC pc, PCGAMGType *type)
1598 {
1599   PetscFunctionBegin;
1600   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1601   PetscUseMethod(pc, "PCGAMGGetType_C", (PC, PCGAMGType *), (pc, type));
1602   PetscFunctionReturn(PETSC_SUCCESS);
1603 }
1604 
1605 static PetscErrorCode PCGAMGGetType_GAMG(PC pc, PCGAMGType *type)
1606 {
1607   PC_MG   *mg      = (PC_MG *)pc->data;
1608   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1609 
1610   PetscFunctionBegin;
1611   *type = pc_gamg->type;
1612   PetscFunctionReturn(PETSC_SUCCESS);
1613 }
1614 
1615 static PetscErrorCode PCGAMGSetType_GAMG(PC pc, PCGAMGType type)
1616 {
1617   PC_MG   *mg      = (PC_MG *)pc->data;
1618   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1619   PetscErrorCode (*r)(PC);
1620 
1621   PetscFunctionBegin;
1622   pc_gamg->type = type;
1623   PetscCall(PetscFunctionListFind(GAMGList, type, &r));
1624   PetscCheck(r, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_UNKNOWN_TYPE, "Unknown GAMG type %s given", type);
1625   if (pc_gamg->ops->destroy) {
1626     PetscCall((*pc_gamg->ops->destroy)(pc));
1627     PetscCall(PetscMemzero(pc_gamg->ops, sizeof(struct _PCGAMGOps)));
1628     pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1629     /* cleaning up common data in pc_gamg - this should disappear someday */
1630     pc_gamg->data_cell_cols      = 0;
1631     pc_gamg->data_cell_rows      = 0;
1632     pc_gamg->orig_data_cell_cols = 0;
1633     pc_gamg->orig_data_cell_rows = 0;
1634     PetscCall(PetscFree(pc_gamg->data));
1635     pc_gamg->data_sz = 0;
1636   }
1637   PetscCall(PetscFree(pc_gamg->gamg_type_name));
1638   PetscCall(PetscStrallocpy(type, &pc_gamg->gamg_type_name));
1639   PetscCall((*r)(pc));
1640   PetscFunctionReturn(PETSC_SUCCESS);
1641 }
1642 
1643 static PetscErrorCode PCView_GAMG(PC pc, PetscViewer viewer)
1644 {
1645   PC_MG    *mg      = (PC_MG *)pc->data;
1646   PC_GAMG  *pc_gamg = (PC_GAMG *)mg->innerctx;
1647   PetscReal gc = 0, oc = 0;
1648 
1649   PetscFunctionBegin;
1650   PetscCall(PetscViewerASCIIPrintf(viewer, "    GAMG specific options\n"));
1651   PetscCall(PetscViewerASCIIPrintf(viewer, "      Threshold for dropping small values in graph on each level ="));
1652   for (PetscInt i = 0; i < mg->nlevels; i++) PetscCall(PetscViewerASCIIPrintf(viewer, " %g", (double)pc_gamg->threshold[i]));
1653   PetscCall(PetscViewerASCIIPrintf(viewer, "\n"));
1654   PetscCall(PetscViewerASCIIPrintf(viewer, "      Threshold scaling factor for each level not specified = %g\n", (double)pc_gamg->threshold_scale));
1655   if (pc_gamg->use_aggs_in_asm) PetscCall(PetscViewerASCIIPrintf(viewer, "      Using aggregates from coarsening process to define subdomains for PCASM\n")); // this take presedence
1656   else if (pc_gamg->asm_hem_aggs) PetscCall(PetscViewerASCIIPrintf(viewer, "      Using aggregates made with %d applications of heavy edge matching (HEM) to define subdomains for PCASM\n", (int)pc_gamg->asm_hem_aggs));
1657   if (pc_gamg->use_parallel_coarse_grid_solver) PetscCall(PetscViewerASCIIPrintf(viewer, "      Using parallel coarse grid solver (all coarse grid equations not put on one process)\n"));
1658   if (pc_gamg->injection_index_size) {
1659     PetscCall(PetscViewerASCIIPrintf(viewer, "      Using injection restriction/prolongation on first level, dofs:"));
1660     for (int i = 0; i < pc_gamg->injection_index_size; i++) PetscCall(PetscViewerASCIIPrintf(viewer, " %d", (int)pc_gamg->injection_index[i]));
1661     PetscCall(PetscViewerASCIIPrintf(viewer, "\n"));
1662   }
1663   if (pc_gamg->ops->view) PetscCall((*pc_gamg->ops->view)(pc, viewer));
1664   PetscCall(PCMGGetGridComplexity(pc, &gc, &oc));
1665   PetscCall(PetscViewerASCIIPrintf(viewer, "      Complexity:    grid = %g    operator = %g\n", (double)gc, (double)oc));
1666   PetscFunctionReturn(PETSC_SUCCESS);
1667 }
1668 
1669 /*@
1670   PCGAMGSetInjectionIndex - Array of subset of variables per vertex to inject into coarse grid space
1671 
1672   Logically Collective
1673 
1674   Input Parameters:
1675 + pc  - the coarsen context
1676 . n   - number of indices
1677 - idx - array of indices
1678 
1679   Options Database Key:
1680 . -pc_gamg_injection_index - array of subset of variables per vertex to use for injection coarse grid space
1681 
1682   Level: intermediate
1683 
1684 .seealso: `PCGAMG`
1685 @*/
1686 PetscErrorCode PCGAMGSetInjectionIndex(PC pc, PetscInt n, PetscInt idx[])
1687 {
1688   PetscFunctionBegin;
1689   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1690   PetscValidLogicalCollectiveInt(pc, n, 2);
1691   PetscTryMethod(pc, "PCGAMGSetInjectionIndex_C", (PC, PetscInt, PetscInt[]), (pc, n, idx));
1692   PetscFunctionReturn(PETSC_SUCCESS);
1693 }
1694 
1695 static PetscErrorCode PCGAMGSetInjectionIndex_GAMG(PC pc, PetscInt n, PetscInt idx[])
1696 {
1697   PC_MG   *mg      = (PC_MG *)pc->data;
1698   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1699   PetscFunctionBegin;
1700   pc_gamg->injection_index_size = n;
1701   PetscCheck(n < MAT_COARSEN_STRENGTH_INDEX_SIZE, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "array size %d larger than max %d", (int)n, MAT_COARSEN_STRENGTH_INDEX_SIZE);
1702   for (int iii = 0; iii < n; iii++) pc_gamg->injection_index[iii] = idx[iii];
1703   PetscFunctionReturn(PETSC_SUCCESS);
1704 }
1705 
1706 static PetscErrorCode PCSetFromOptions_GAMG(PC pc, PetscOptionItems *PetscOptionsObject)
1707 {
1708   PC_MG             *mg      = (PC_MG *)pc->data;
1709   PC_GAMG           *pc_gamg = (PC_GAMG *)mg->innerctx;
1710   PetscBool          flag;
1711   MPI_Comm           comm;
1712   char               prefix[256], tname[32];
1713   PetscInt           i, n;
1714   const char        *pcpre;
1715   static const char *LayoutTypes[] = {"compact", "spread", "PCGAMGLayoutType", "PC_GAMG_LAYOUT", NULL};
1716 
1717   PetscFunctionBegin;
1718   PetscCall(PetscObjectGetComm((PetscObject)pc, &comm));
1719   PetscOptionsHeadBegin(PetscOptionsObject, "GAMG options");
1720   PetscCall(PetscOptionsFList("-pc_gamg_type", "Type of AMG method (only 'agg' supported and useful)", "PCGAMGSetType", GAMGList, pc_gamg->gamg_type_name, tname, sizeof(tname), &flag));
1721   if (flag) PetscCall(PCGAMGSetType(pc, tname));
1722   PetscCall(PetscOptionsBool("-pc_gamg_repartition", "Repartion coarse grids", "PCGAMGSetRepartition", pc_gamg->repart, &pc_gamg->repart, NULL));
1723   PetscCall(PetscOptionsBool("-pc_gamg_use_sa_esteig", "Use eigen estimate from smoothed aggregation for smoother", "PCGAMGSetUseSAEstEig", pc_gamg->use_sa_esteig, &pc_gamg->use_sa_esteig, NULL));
1724   PetscCall(PetscOptionsBool("-pc_gamg_recompute_esteig", "Set flag to recompute eigen estimates for Chebyshev when matrix changes", "PCGAMGSetRecomputeEstEig", pc_gamg->recompute_esteig, &pc_gamg->recompute_esteig, NULL));
1725   PetscCall(PetscOptionsBool("-pc_gamg_reuse_interpolation", "Reuse prolongation operator", "PCGAMGReuseInterpolation", pc_gamg->reuse_prol, &pc_gamg->reuse_prol, NULL));
1726   PetscCall(PetscOptionsBool("-pc_gamg_asm_use_agg", "Use aggregation aggregates for ASM smoother", "PCGAMGASMSetUseAggs", pc_gamg->use_aggs_in_asm, &pc_gamg->use_aggs_in_asm, NULL));
1727   PetscCall(PetscOptionsBool("-pc_gamg_parallel_coarse_grid_solver", "Use parallel coarse grid solver (otherwise put last grid on one process)", "PCGAMGSetParallelCoarseGridSolve", pc_gamg->use_parallel_coarse_grid_solver, &pc_gamg->use_parallel_coarse_grid_solver, NULL));
1728   PetscCall(PetscOptionsBool("-pc_gamg_cpu_pin_coarse_grids", "Pin coarse grids to the CPU", "PCGAMGSetCpuPinCoarseGrids", pc_gamg->cpu_pin_coarse_grids, &pc_gamg->cpu_pin_coarse_grids, NULL));
1729   PetscCall(PetscOptionsEnum("-pc_gamg_coarse_grid_layout_type", "compact: place reduced grids on processes in natural order; spread: distribute to whole machine for more memory bandwidth", "PCGAMGSetCoarseGridLayoutType", LayoutTypes,
1730                              (PetscEnum)pc_gamg->layout_type, (PetscEnum *)&pc_gamg->layout_type, NULL));
1731   PetscCall(PetscOptionsInt("-pc_gamg_process_eq_limit", "Limit (goal) on number of equations per process on coarse grids", "PCGAMGSetProcEqLim", pc_gamg->min_eq_proc, &pc_gamg->min_eq_proc, NULL));
1732   PetscCall(PetscOptionsInt("-pc_gamg_coarse_eq_limit", "Limit on number of equations for the coarse grid", "PCGAMGSetCoarseEqLim", pc_gamg->coarse_eq_limit, &pc_gamg->coarse_eq_limit, NULL));
1733   PetscCall(PetscOptionsInt("-pc_gamg_asm_hem_aggs", "Number of HEM matching passed in aggregates for ASM smoother", "PCGAMGASMSetHEM", pc_gamg->asm_hem_aggs, &pc_gamg->asm_hem_aggs, NULL));
1734   PetscCall(PetscOptionsReal("-pc_gamg_threshold_scale", "Scaling of threshold for each level not specified", "PCGAMGSetThresholdScale", pc_gamg->threshold_scale, &pc_gamg->threshold_scale, NULL));
1735   n = PETSC_MG_MAXLEVELS;
1736   PetscCall(PetscOptionsRealArray("-pc_gamg_threshold", "Relative threshold to use for dropping edges in aggregation graph", "PCGAMGSetThreshold", pc_gamg->threshold, &n, &flag));
1737   if (!flag || n < PETSC_MG_MAXLEVELS) {
1738     if (!flag) n = 1;
1739     i = n;
1740     do {
1741       pc_gamg->threshold[i] = pc_gamg->threshold[i - 1] * pc_gamg->threshold_scale;
1742     } while (++i < PETSC_MG_MAXLEVELS);
1743   }
1744   PetscCall(PetscOptionsInt("-pc_mg_levels", "Set number of MG levels (should get from base class)", "PCGAMGSetNlevels", pc_gamg->Nlevels, &pc_gamg->Nlevels, NULL));
1745   PetscCheck(pc_gamg->Nlevels <= PETSC_MG_MAXLEVELS, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "-pc_mg_levels (%d) >= PETSC_MG_MAXLEVELS (%d)", (int)pc_gamg->Nlevels, (int)PETSC_MG_MAXLEVELS);
1746   n = PETSC_MG_MAXLEVELS;
1747   PetscCall(PetscOptionsIntArray("-pc_gamg_rank_reduction_factors", "Manual schedule of coarse grid reduction factors that overrides internal heuristics (0 for first reduction puts one process/device)", "PCGAMGSetRankReductionFactors", pc_gamg->level_reduction_factors, &n, &flag));
1748   if (!flag) i = 0;
1749   else i = n;
1750   do {
1751     pc_gamg->level_reduction_factors[i] = -1;
1752   } while (++i < PETSC_MG_MAXLEVELS);
1753   {
1754     PetscReal eminmax[2] = {0., 0.};
1755     n                    = 2;
1756     PetscCall(PetscOptionsRealArray("-pc_gamg_eigenvalues", "extreme eigenvalues for smoothed aggregation", "PCGAMGSetEigenvalues", eminmax, &n, &flag));
1757     if (flag) {
1758       PetscCheck(n == 2, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "-pc_gamg_eigenvalues: must specify 2 parameters, min and max eigenvalues");
1759       PetscCall(PCGAMGSetEigenvalues(pc, eminmax[1], eminmax[0]));
1760     }
1761   }
1762   pc_gamg->injection_index_size = MAT_COARSEN_STRENGTH_INDEX_SIZE;
1763   PetscCall(PetscOptionsIntArray("-pc_gamg_injection_index", "Array of indices to use to use injection coarse grid space", "PCGAMGSetInjectionIndex", pc_gamg->injection_index, &pc_gamg->injection_index_size, NULL));
1764   /* set options for subtype */
1765   PetscCall((*pc_gamg->ops->setfromoptions)(pc, PetscOptionsObject));
1766 
1767   PetscCall(PCGetOptionsPrefix(pc, &pcpre));
1768   PetscCall(PetscSNPrintf(prefix, sizeof(prefix), "%spc_gamg_", pcpre ? pcpre : ""));
1769   PetscOptionsHeadEnd();
1770   PetscFunctionReturn(PETSC_SUCCESS);
1771 }
1772 
1773 /*MC
1774   PCGAMG - Geometric algebraic multigrid (AMG) preconditioner
1775 
1776   Options Database Keys:
1777 + -pc_gamg_type <type,default=agg> - one of agg, geo, or classical (only smoothed aggregation, agg, supported)
1778 . -pc_gamg_repartition  <bool,default=false> - repartition the degrees of freedom across the coarse grids as they are determined
1779 . -pc_gamg_asm_use_agg <bool,default=false> - use the aggregates from the coasening process to defined the subdomains on each level for the PCASM smoother
1780 . -pc_gamg_process_eq_limit <limit, default=50> - `PCGAMG` will reduce the number of MPI ranks used directly on the coarse grids so that there are around <limit>
1781                                         equations on each process that has degrees of freedom
1782 . -pc_gamg_coarse_eq_limit <limit, default=50> - Set maximum number of equations on coarsest grid to aim for.
1783 . -pc_gamg_reuse_interpolation <bool,default=true> - when rebuilding the algebraic multigrid preconditioner reuse the previously computed interpolations (should always be true)
1784 . -pc_gamg_threshold[] <thresh,default=[-1,...]> - Before aggregating the graph `PCGAMG` will remove small values from the graph on each level (< 0 does no filtering)
1785 - -pc_gamg_threshold_scale <scale,default=1> - Scaling of threshold on each coarser grid if not specified
1786 
1787   Options Database Keys for Aggregation:
1788 + -pc_gamg_agg_nsmooths <nsmooth, default=1> - number of smoothing steps to use with smooth aggregation
1789 . -pc_gamg_aggressive_coarsening <n,default=1> - number of aggressive coarsening (MIS-2) levels from finest.
1790 . -pc_gamg_aggressive_square_graph <bool,default=false> - Use square graph (A'A) or MIS-k (k=2) for aggressive coarsening
1791 . -pc_gamg_mis_k_minimum_degree_ordering <bool,default=true> - Use minimum degree ordering in greedy MIS algorithm
1792 . -pc_gamg_pc_gamg_asm_hem_aggs <n,default=0> - Number of HEM aggregation steps for ASM smoother
1793 - -pc_gamg_aggressive_mis_k <n,default=2> - Number (k) distance in MIS coarsening (>2 is 'aggressive')
1794 
1795   Options Database Keys for Multigrid:
1796 + -pc_mg_cycle_type <v> - v or w, see `PCMGSetCycleType()`
1797 . -pc_mg_distinct_smoothup - configure the up and down (pre and post) smoothers separately, see PCMGSetDistinctSmoothUp()
1798 . -pc_mg_type <multiplicative> - (one of) additive multiplicative full kascade
1799 - -pc_mg_levels <levels> - Number of levels of multigrid to use. GAMG has a heuristic so pc_mg_levels is not usually used with GAMG
1800 
1801   Level: intermediate
1802 
1803   Notes:
1804   To obtain good performance for `PCGAMG` for vector valued problems you must
1805   call `MatSetBlockSize()` to indicate the number of degrees of freedom per grid point
1806   call `MatSetNearNullSpace()` (or `PCSetCoordinates()` if solving the equations of elasticity) to indicate the near null space of the operator
1807 
1808   The many options for `PCMG` also work directly for `PCGAMG` such as controlling the smoothers on each level etc.
1809 
1810   See [the Users Manual section on PCGAMG](sec_amg) and [the Users Manual section on PCMG](sec_mg)for more details.
1811 
1812 .seealso: [](ch_ksp), `PCCreate()`, `PCSetType()`, `MatSetBlockSize()`, `PCMGType`, `PCSetCoordinates()`, `MatSetNearNullSpace()`, `PCGAMGSetType()`, `PCGAMGAGG`, `PCGAMGGEO`, `PCGAMGCLASSICAL`, `PCGAMGSetProcEqLim()`,
1813           `PCGAMGSetCoarseEqLim()`, `PCGAMGSetRepartition()`, `PCGAMGRegister()`, `PCGAMGSetReuseInterpolation()`, `PCGAMGASMSetUseAggs()`, `PCGAMGSetParallelCoarseGridSolve()`, `PCGAMGSetNlevels()`, `PCGAMGSetThreshold()`, `PCGAMGGetType()`, `PCGAMGSetUseSAEstEig()`
1814 M*/
1815 PETSC_EXTERN PetscErrorCode PCCreate_GAMG(PC pc)
1816 {
1817   PC_GAMG *pc_gamg;
1818   PC_MG   *mg;
1819 
1820   PetscFunctionBegin;
1821   /* register AMG type */
1822   PetscCall(PCGAMGInitializePackage());
1823 
1824   /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */
1825   PetscCall(PCSetType(pc, PCMG));
1826   PetscCall(PetscObjectChangeTypeName((PetscObject)pc, PCGAMG));
1827 
1828   /* create a supporting struct and attach it to pc */
1829   PetscCall(PetscNew(&pc_gamg));
1830   PetscCall(PCMGSetGalerkin(pc, PC_MG_GALERKIN_EXTERNAL));
1831   mg           = (PC_MG *)pc->data;
1832   mg->innerctx = pc_gamg;
1833 
1834   PetscCall(PetscNew(&pc_gamg->ops));
1835 
1836   /* these should be in subctx but repartitioning needs simple arrays */
1837   pc_gamg->data_sz = 0;
1838   pc_gamg->data    = NULL;
1839 
1840   /* overwrite the pointers of PCMG by the functions of base class PCGAMG */
1841   pc->ops->setfromoptions = PCSetFromOptions_GAMG;
1842   pc->ops->setup          = PCSetUp_GAMG;
1843   pc->ops->reset          = PCReset_GAMG;
1844   pc->ops->destroy        = PCDestroy_GAMG;
1845   mg->view                = PCView_GAMG;
1846 
1847   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetProcEqLim_C", PCGAMGSetProcEqLim_GAMG));
1848   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseEqLim_C", PCGAMGSetCoarseEqLim_GAMG));
1849   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRepartition_C", PCGAMGSetRepartition_GAMG));
1850   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetEigenvalues_C", PCGAMGSetEigenvalues_GAMG));
1851   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetUseSAEstEig_C", PCGAMGSetUseSAEstEig_GAMG));
1852   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRecomputeEstEig_C", PCGAMGSetRecomputeEstEig_GAMG));
1853   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetReuseInterpolation_C", PCGAMGSetReuseInterpolation_GAMG));
1854   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetUseAggs_C", PCGAMGASMSetUseAggs_GAMG));
1855   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetParallelCoarseGridSolve_C", PCGAMGSetParallelCoarseGridSolve_GAMG));
1856   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCpuPinCoarseGrids_C", PCGAMGSetCpuPinCoarseGrids_GAMG));
1857   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseGridLayoutType_C", PCGAMGSetCoarseGridLayoutType_GAMG));
1858   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThreshold_C", PCGAMGSetThreshold_GAMG));
1859   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRankReductionFactors_C", PCGAMGSetRankReductionFactors_GAMG));
1860   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThresholdScale_C", PCGAMGSetThresholdScale_GAMG));
1861   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetType_C", PCGAMGSetType_GAMG));
1862   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGGetType_C", PCGAMGGetType_GAMG));
1863   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetNlevels_C", PCGAMGSetNlevels_GAMG));
1864   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetHEM_C", PCGAMGASMSetHEM_GAMG));
1865   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetInjectionIndex_C", PCGAMGSetInjectionIndex_GAMG));
1866   pc_gamg->repart                          = PETSC_FALSE;
1867   pc_gamg->reuse_prol                      = PETSC_TRUE;
1868   pc_gamg->use_aggs_in_asm                 = PETSC_FALSE;
1869   pc_gamg->use_parallel_coarse_grid_solver = PETSC_FALSE;
1870   pc_gamg->cpu_pin_coarse_grids            = PETSC_FALSE;
1871   pc_gamg->layout_type                     = PCGAMG_LAYOUT_SPREAD;
1872   pc_gamg->min_eq_proc                     = 50;
1873   pc_gamg->asm_hem_aggs                    = 0;
1874   pc_gamg->coarse_eq_limit                 = 50;
1875   for (int i = 0; i < PETSC_MG_MAXLEVELS; i++) pc_gamg->threshold[i] = -1;
1876   pc_gamg->threshold_scale  = 1.;
1877   pc_gamg->Nlevels          = PETSC_MG_MAXLEVELS;
1878   pc_gamg->current_level    = 0; /* don't need to init really */
1879   pc_gamg->use_sa_esteig    = PETSC_TRUE;
1880   pc_gamg->recompute_esteig = PETSC_TRUE;
1881   pc_gamg->emin             = 0;
1882   pc_gamg->emax             = 0;
1883 
1884   pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1885 
1886   /* PCSetUp_GAMG assumes that the type has been set, so set it to the default now */
1887   PetscCall(PCGAMGSetType(pc, PCGAMGAGG));
1888   PetscFunctionReturn(PETSC_SUCCESS);
1889 }
1890 
1891 /*@C
1892   PCGAMGInitializePackage - This function initializes everything in the `PCGAMG` package. It is called
1893   from `PCInitializePackage()`.
1894 
1895   Level: developer
1896 
1897 .seealso: [](ch_ksp), `PetscInitialize()`
1898 @*/
1899 PetscErrorCode PCGAMGInitializePackage(void)
1900 {
1901   PetscInt l;
1902 
1903   PetscFunctionBegin;
1904   if (PCGAMGPackageInitialized) PetscFunctionReturn(PETSC_SUCCESS);
1905   PCGAMGPackageInitialized = PETSC_TRUE;
1906   PetscCall(PetscFunctionListAdd(&GAMGList, PCGAMGGEO, PCCreateGAMG_GEO));
1907   PetscCall(PetscFunctionListAdd(&GAMGList, PCGAMGAGG, PCCreateGAMG_AGG));
1908   PetscCall(PetscFunctionListAdd(&GAMGList, PCGAMGCLASSICAL, PCCreateGAMG_Classical));
1909   PetscCall(PetscRegisterFinalize(PCGAMGFinalizePackage));
1910 
1911   /* general events */
1912   PetscCall(PetscLogEventRegister("PCSetUp_GAMG+", PC_CLASSID, &petsc_gamg_setup_events[GAMG_SETUP]));
1913   PetscCall(PetscLogEventRegister(" PCGAMGCreateG", PC_CLASSID, &petsc_gamg_setup_events[GAMG_GRAPH]));
1914   PetscCall(PetscLogEventRegister(" GAMG Coarsen", PC_CLASSID, &petsc_gamg_setup_events[GAMG_COARSEN]));
1915   PetscCall(PetscLogEventRegister("  GAMG MIS/Agg", PC_CLASSID, &petsc_gamg_setup_events[GAMG_MIS]));
1916   PetscCall(PetscLogEventRegister(" PCGAMGProl", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PROL]));
1917   PetscCall(PetscLogEventRegister("  GAMG Prol-col", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PROLA]));
1918   PetscCall(PetscLogEventRegister("  GAMG Prol-lift", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PROLB]));
1919   PetscCall(PetscLogEventRegister(" PCGAMGOptProl", PC_CLASSID, &petsc_gamg_setup_events[GAMG_OPT]));
1920   PetscCall(PetscLogEventRegister("  GAMG smooth", PC_CLASSID, &petsc_gamg_setup_events[GAMG_OPTSM]));
1921   PetscCall(PetscLogEventRegister(" PCGAMGCreateL", PC_CLASSID, &petsc_gamg_setup_events[GAMG_LEVEL]));
1922   PetscCall(PetscLogEventRegister("  GAMG PtAP", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PTAP]));
1923   PetscCall(PetscLogEventRegister("  GAMG Reduce", PC_CLASSID, &petsc_gamg_setup_events[GAMG_REDUCE]));
1924   PetscCall(PetscLogEventRegister("   GAMG Repart", PC_CLASSID, &petsc_gamg_setup_events[GAMG_REPART]));
1925   /* PetscCall(PetscLogEventRegister("   GAMG Inv-Srt", PC_CLASSID, &petsc_gamg_setup_events[SET13])); */
1926   /* PetscCall(PetscLogEventRegister("   GAMG Move A", PC_CLASSID, &petsc_gamg_setup_events[SET14])); */
1927   /* PetscCall(PetscLogEventRegister("   GAMG Move P", PC_CLASSID, &petsc_gamg_setup_events[SET15])); */
1928   for (l = 0; l < PETSC_MG_MAXLEVELS; l++) {
1929     char ename[32];
1930 
1931     PetscCall(PetscSNPrintf(ename, sizeof(ename), "PCGAMG Squ l%02" PetscInt_FMT, l));
1932     PetscCall(PetscLogEventRegister(ename, PC_CLASSID, &petsc_gamg_setup_matmat_events[l][0]));
1933     PetscCall(PetscSNPrintf(ename, sizeof(ename), "PCGAMG Gal l%02" PetscInt_FMT, l));
1934     PetscCall(PetscLogEventRegister(ename, PC_CLASSID, &petsc_gamg_setup_matmat_events[l][1]));
1935     PetscCall(PetscSNPrintf(ename, sizeof(ename), "PCGAMG Opt l%02" PetscInt_FMT, l));
1936     PetscCall(PetscLogEventRegister(ename, PC_CLASSID, &petsc_gamg_setup_matmat_events[l][2]));
1937   }
1938 #if defined(GAMG_STAGES)
1939   { /* create timer stages */
1940     char str[32];
1941     PetscCall(PetscSNPrintf(str, PETSC_STATIC_ARRAY_LENGTH(str), "GAMG Level %d", 0));
1942     PetscCall(PetscLogStageRegister(str, &gamg_stages[0]));
1943   }
1944 #endif
1945   PetscFunctionReturn(PETSC_SUCCESS);
1946 }
1947 
1948 /*@C
1949   PCGAMGFinalizePackage - This function frees everything from the `PCGAMG` package. It is
1950   called from `PetscFinalize()` automatically.
1951 
1952   Level: developer
1953 
1954 .seealso: [](ch_ksp), `PetscFinalize()`
1955 @*/
1956 PetscErrorCode PCGAMGFinalizePackage(void)
1957 {
1958   PetscFunctionBegin;
1959   PCGAMGPackageInitialized = PETSC_FALSE;
1960   PetscCall(PetscFunctionListDestroy(&GAMGList));
1961   PetscFunctionReturn(PETSC_SUCCESS);
1962 }
1963 
1964 /*@C
1965   PCGAMGRegister - Register a `PCGAMG` implementation.
1966 
1967   Input Parameters:
1968 + type   - string that will be used as the name of the `PCGAMG` type.
1969 - create - function for creating the gamg context.
1970 
1971   Level: developer
1972 
1973 .seealso: [](ch_ksp), `PCGAMGType`, `PCGAMG`, `PCGAMGSetType()`
1974 @*/
1975 PetscErrorCode PCGAMGRegister(PCGAMGType type, PetscErrorCode (*create)(PC))
1976 {
1977   PetscFunctionBegin;
1978   PetscCall(PCGAMGInitializePackage());
1979   PetscCall(PetscFunctionListAdd(&GAMGList, type, create));
1980   PetscFunctionReturn(PETSC_SUCCESS);
1981 }
1982 
1983 /*@C
1984   PCGAMGCreateGraph - Creates a graph that is used by the ``PCGAMGType`` in the coarsening process
1985 
1986   Input Parameters:
1987 + pc - the `PCGAMG`
1988 - A  - the matrix, for any level
1989 
1990   Output Parameter:
1991 . G - the graph
1992 
1993   Level: advanced
1994 
1995 .seealso: [](ch_ksp), `PCGAMGType`, `PCGAMG`, `PCGAMGSetType()`
1996 @*/
1997 PetscErrorCode PCGAMGCreateGraph(PC pc, Mat A, Mat *G)
1998 {
1999   PC_MG   *mg      = (PC_MG *)pc->data;
2000   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
2001 
2002   PetscFunctionBegin;
2003   PetscCall(pc_gamg->ops->creategraph(pc, A, G));
2004   PetscFunctionReturn(PETSC_SUCCESS);
2005 }
2006