xref: /petsc/src/ksp/pc/impls/gamg/gamg.c (revision 65f8aed5f7eaa1e2ef2ddeffe666264e0669c876)
1 /*
2  GAMG geometric-algebric multigrid PC - Mark Adams 2011
3  */
4 #include <petsc/private/matimpl.h>
5 #include <../src/ksp/pc/impls/gamg/gamg.h>           /*I "petscpc.h" I*/
6 #include <../src/ksp/pc/impls/bjacobi/bjacobi.h> /* Hack to access same_local_solves */
7 
8 #if defined PETSC_GAMG_USE_LOG
9 PetscLogEvent petsc_gamg_setup_events[NUM_SET];
10 #endif
11 
12 #if defined PETSC_USE_LOG
13 PetscLogEvent PC_GAMGGraph_AGG;
14 PetscLogEvent PC_GAMGGraph_GEO;
15 PetscLogEvent PC_GAMGCoarsen_AGG;
16 PetscLogEvent PC_GAMGCoarsen_GEO;
17 PetscLogEvent PC_GAMGProlongator_AGG;
18 PetscLogEvent PC_GAMGProlongator_GEO;
19 PetscLogEvent PC_GAMGOptProlongator_AGG;
20 #endif
21 
22 /* #define GAMG_STAGES */
23 #if (defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES)
24 static PetscLogStage gamg_stages[PETSC_GAMG_MAXLEVELS];
25 #endif
26 
27 static PetscFunctionList GAMGList = 0;
28 static PetscBool PCGAMGPackageInitialized;
29 
30 /* ----------------------------------------------------------------------------- */
31 PetscErrorCode PCReset_GAMG(PC pc)
32 {
33   PetscErrorCode ierr;
34   PC_MG          *mg      = (PC_MG*)pc->data;
35   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
36 
37   PetscFunctionBegin;
38   if (pc_gamg->data) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_PLIB,"This should not happen, cleaned up in SetUp\n");
39   pc_gamg->data_sz = 0;
40   ierr = PetscFree(pc_gamg->orig_data);CHKERRQ(ierr);
41   PetscFunctionReturn(0);
42 }
43 
44 /* -------------------------------------------------------------------------- */
45 /*
46    PCGAMGCreateLevel_GAMG: create coarse op with RAP.  repartition and/or reduce number
47      of active processors.
48 
49    Input Parameter:
50    . pc - parameters + side effect: coarse data in 'pc_gamg->data' and
51           'pc_gamg->data_sz' are changed via repartitioning/reduction.
52    . Amat_fine - matrix on this fine (k) level
53    . cr_bs - coarse block size
54    In/Output Parameter:
55    . a_P_inout - prolongation operator to the next level (k-->k-1)
56    . a_nactive_proc - number of active procs
57    Output Parameter:
58    . a_Amat_crs - coarse matrix that is created (k-1)
59 */
60 
61 static PetscErrorCode PCGAMGCreateLevel_GAMG(PC pc,Mat Amat_fine,PetscInt cr_bs,Mat *a_P_inout,Mat *a_Amat_crs,PetscMPIInt *a_nactive_proc,IS * Pcolumnperm, PetscBool is_last)
62 {
63   PetscErrorCode  ierr;
64   PC_MG           *mg         = (PC_MG*)pc->data;
65   PC_GAMG         *pc_gamg    = (PC_GAMG*)mg->innerctx;
66   Mat             Cmat,Pold=*a_P_inout;
67   MPI_Comm        comm;
68   PetscMPIInt     rank,size,new_size,nactive=*a_nactive_proc;
69   PetscInt        ncrs_eq,ncrs,f_bs;
70 
71   PetscFunctionBegin;
72   ierr = PetscObjectGetComm((PetscObject)Amat_fine,&comm);CHKERRQ(ierr);
73   ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
74   ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
75   ierr = MatGetBlockSize(Amat_fine, &f_bs);CHKERRQ(ierr);
76   ierr = MatPtAP(Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat);CHKERRQ(ierr);
77 
78   /* set 'ncrs' (nodes), 'ncrs_eq' (equations)*/
79   ierr = MatGetLocalSize(Cmat, &ncrs_eq, NULL);CHKERRQ(ierr);
80   if (pc_gamg->data_cell_rows>0) {
81     ncrs = pc_gamg->data_sz/pc_gamg->data_cell_cols/pc_gamg->data_cell_rows;
82   } else {
83     PetscInt  bs;
84     ierr = MatGetBlockSize(Cmat, &bs);CHKERRQ(ierr);
85     ncrs = ncrs_eq/bs;
86   }
87 
88   /* get number of PEs to make active 'new_size', reduce, can be any integer 1-P */
89   if (is_last && !pc_gamg->use_parallel_coarse_grid_solver) new_size = 1;
90   else {
91     PetscInt ncrs_eq_glob;
92     ierr     = MatGetSize(Cmat, &ncrs_eq_glob, NULL);CHKERRQ(ierr);
93     new_size = (PetscMPIInt)((float)ncrs_eq_glob/(float)pc_gamg->min_eq_proc + 0.5); /* hardwire min. number of eq/proc */
94     if (!new_size) new_size = 1; /* not likely, posible? */
95     else if (new_size >= nactive) new_size = nactive; /* no change, rare */
96   }
97 
98   if (Pcolumnperm) *Pcolumnperm = NULL;
99 
100   if (!pc_gamg->repart && new_size==nactive) {
101     *a_Amat_crs = Cmat; /* output - no repartitioning or reduction - could bail here */
102     /* we know that the grid structure can be reused in MatPtAP */
103   } else {
104     /* we know that the grid structure can NOT be reused in MatPtAP */
105     PetscInt       *counts,*newproc_idx,ii,jj,kk,strideNew,*tidx,ncrs_new,ncrs_eq_new,nloc_old;
106     IS             is_eq_newproc,is_eq_num,is_eq_num_prim,new_eq_indices;
107 
108     nloc_old = ncrs_eq/cr_bs;
109     if (ncrs_eq % cr_bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"ncrs_eq %D not divisible by cr_bs %D",ncrs_eq,cr_bs);
110 #if defined PETSC_GAMG_USE_LOG
111     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET12],0,0,0,0);CHKERRQ(ierr);
112 #endif
113     /* make 'is_eq_newproc' */
114     ierr = PetscMalloc1(size, &counts);CHKERRQ(ierr);
115     if (pc_gamg->repart) {
116       /* Repartition Cmat_{k} and move colums of P^{k}_{k-1} and coordinates of primal part accordingly */
117       Mat adj;
118 
119      ierr = PetscInfo3(pc,"Repartition: size (active): %D --> %D, %D local equations\n",*a_nactive_proc,new_size,ncrs_eq);CHKERRQ(ierr);
120 
121       /* get 'adj' */
122       if (cr_bs == 1) {
123         ierr = MatConvert(Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj);CHKERRQ(ierr);
124       } else {
125         /* make a scalar matrix to partition (no Stokes here) */
126         Mat               tMat;
127         PetscInt          Istart_crs,Iend_crs,ncols,jj,Ii;
128         const PetscScalar *vals;
129         const PetscInt    *idx;
130         PetscInt          *d_nnz, *o_nnz, M, N;
131         static PetscInt   llev = 0;
132         MatType           mtype;
133 
134         ierr = PetscMalloc2(ncrs, &d_nnz,ncrs, &o_nnz);CHKERRQ(ierr);
135         ierr = MatGetOwnershipRange(Cmat, &Istart_crs, &Iend_crs);CHKERRQ(ierr);
136         ierr = MatGetSize(Cmat, &M, &N);CHKERRQ(ierr);
137         for (Ii = Istart_crs, jj = 0; Ii < Iend_crs; Ii += cr_bs, jj++) {
138           ierr      = MatGetRow(Cmat,Ii,&ncols,0,0);CHKERRQ(ierr);
139           d_nnz[jj] = ncols/cr_bs;
140           o_nnz[jj] = ncols/cr_bs;
141           ierr      = MatRestoreRow(Cmat,Ii,&ncols,0,0);CHKERRQ(ierr);
142           if (d_nnz[jj] > ncrs) d_nnz[jj] = ncrs;
143           if (o_nnz[jj] > (M/cr_bs-ncrs)) o_nnz[jj] = M/cr_bs-ncrs;
144         }
145 
146         ierr = MatGetType(Amat_fine,&mtype);CHKERRQ(ierr);
147         ierr = MatCreate(comm, &tMat);CHKERRQ(ierr);
148         ierr = MatSetSizes(tMat, ncrs, ncrs,PETSC_DETERMINE, PETSC_DETERMINE);CHKERRQ(ierr);
149         ierr = MatSetType(tMat,mtype);CHKERRQ(ierr);
150         ierr = MatSeqAIJSetPreallocation(tMat,0,d_nnz);CHKERRQ(ierr);
151         ierr = MatMPIAIJSetPreallocation(tMat,0,d_nnz,0,o_nnz);CHKERRQ(ierr);
152         ierr = PetscFree2(d_nnz,o_nnz);CHKERRQ(ierr);
153 
154         for (ii = Istart_crs; ii < Iend_crs; ii++) {
155           PetscInt dest_row = ii/cr_bs;
156           ierr = MatGetRow(Cmat,ii,&ncols,&idx,&vals);CHKERRQ(ierr);
157           for (jj = 0; jj < ncols; jj++) {
158             PetscInt    dest_col = idx[jj]/cr_bs;
159             PetscScalar v        = 1.0;
160             ierr = MatSetValues(tMat,1,&dest_row,1,&dest_col,&v,ADD_VALUES);CHKERRQ(ierr);
161           }
162           ierr = MatRestoreRow(Cmat,ii,&ncols,&idx,&vals);CHKERRQ(ierr);
163         }
164         ierr = MatAssemblyBegin(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
165         ierr = MatAssemblyEnd(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
166 
167         if (llev++ == -1) {
168           PetscViewer viewer; char fname[32];
169           ierr = PetscSNPrintf(fname,sizeof(fname),"part_mat_%D.mat",llev);CHKERRQ(ierr);
170           PetscViewerBinaryOpen(comm,fname,FILE_MODE_WRITE,&viewer);
171           ierr = MatView(tMat, viewer);CHKERRQ(ierr);
172           ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
173         }
174         ierr = MatConvert(tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj);CHKERRQ(ierr);
175         ierr = MatDestroy(&tMat);CHKERRQ(ierr);
176       } /* create 'adj' */
177 
178       { /* partition: get newproc_idx */
179         char            prefix[256];
180         const char      *pcpre;
181         const PetscInt  *is_idx;
182         MatPartitioning mpart;
183         IS              proc_is;
184         PetscInt        targetPE;
185 
186         ierr = MatPartitioningCreate(comm, &mpart);CHKERRQ(ierr);
187         ierr = MatPartitioningSetAdjacency(mpart, adj);CHKERRQ(ierr);
188         ierr = PCGetOptionsPrefix(pc, &pcpre);CHKERRQ(ierr);
189         ierr = PetscSNPrintf(prefix,sizeof(prefix),"%spc_gamg_",pcpre ? pcpre : "");CHKERRQ(ierr);
190         ierr = PetscObjectSetOptionsPrefix((PetscObject)mpart,prefix);CHKERRQ(ierr);
191         ierr = MatPartitioningSetFromOptions(mpart);CHKERRQ(ierr);
192         ierr = MatPartitioningSetNParts(mpart, new_size);CHKERRQ(ierr);
193         ierr = MatPartitioningApply(mpart, &proc_is);CHKERRQ(ierr);
194         ierr = MatPartitioningDestroy(&mpart);CHKERRQ(ierr);
195 
196         /* collect IS info */
197         ierr     = PetscMalloc1(ncrs_eq, &newproc_idx);CHKERRQ(ierr);
198         ierr     = ISGetIndices(proc_is, &is_idx);CHKERRQ(ierr);
199         targetPE = 1; /* bring to "front" of machine */
200         /*targetPE = size/new_size;*/ /* spread partitioning across machine */
201         for (kk = jj = 0 ; kk < nloc_old ; kk++) {
202           for (ii = 0 ; ii < cr_bs ; ii++, jj++) {
203             newproc_idx[jj] = is_idx[kk] * targetPE; /* distribution */
204           }
205         }
206         ierr = ISRestoreIndices(proc_is, &is_idx);CHKERRQ(ierr);
207         ierr = ISDestroy(&proc_is);CHKERRQ(ierr);
208       }
209       ierr = MatDestroy(&adj);CHKERRQ(ierr);
210 
211       ierr = ISCreateGeneral(comm, ncrs_eq, newproc_idx, PETSC_COPY_VALUES, &is_eq_newproc);CHKERRQ(ierr);
212       ierr = PetscFree(newproc_idx);CHKERRQ(ierr);
213     } else { /* simple aggreagtion of parts -- 'is_eq_newproc' */
214       PetscInt rfactor,targetPE;
215 
216       /* find factor */
217       if (new_size == 1) rfactor = size; /* easy */
218       else {
219         PetscReal best_fact = 0.;
220         jj = -1;
221         for (kk = 1 ; kk <= size ; kk++) {
222           if (!(size%kk)) { /* a candidate */
223             PetscReal nactpe = (PetscReal)size/(PetscReal)kk, fact = nactpe/(PetscReal)new_size;
224             if (fact > 1.0) fact = 1./fact; /* keep fact < 1 */
225             if (fact > best_fact) {
226               best_fact = fact; jj = kk;
227             }
228           }
229         }
230         if (jj != -1) rfactor = jj;
231         else rfactor = 1; /* does this happen .. a prime */
232       }
233       new_size = size/rfactor;
234 
235       if (new_size==nactive) {
236         *a_Amat_crs = Cmat; /* output - no repartitioning or reduction, bail out because nested here */
237         ierr        = PetscFree(counts);CHKERRQ(ierr);
238         ierr = PetscInfo2(pc,"Aggregate processors noop: new_size=%D, neq(loc)=%D\n",new_size,ncrs_eq);CHKERRQ(ierr);
239 #if defined PETSC_GAMG_USE_LOG
240         ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET12],0,0,0,0);CHKERRQ(ierr);
241 #endif
242         PetscFunctionReturn(0);
243       }
244 
245       ierr = PetscInfo1(pc,"Number of equations (loc) %D with simple aggregation\n",ncrs_eq);CHKERRQ(ierr);
246       targetPE = rank/rfactor;
247       ierr     = ISCreateStride(comm, ncrs_eq, targetPE, 0, &is_eq_newproc);CHKERRQ(ierr);
248     } /* end simple 'is_eq_newproc' */
249 
250     /*
251      Create an index set from the is_eq_newproc index set to indicate the mapping TO
252      */
253     ierr = ISPartitioningToNumbering(is_eq_newproc, &is_eq_num);CHKERRQ(ierr);
254     is_eq_num_prim = is_eq_num;
255     /*
256       Determine how many equations/vertices are assigned to each processor
257      */
258     ierr        = ISPartitioningCount(is_eq_newproc, size, counts);CHKERRQ(ierr);
259     ncrs_eq_new = counts[rank];
260     ierr        = ISDestroy(&is_eq_newproc);CHKERRQ(ierr);
261     ncrs_new = ncrs_eq_new/cr_bs; /* eqs */
262 
263     ierr = PetscFree(counts);CHKERRQ(ierr);
264 #if defined PETSC_GAMG_USE_LOG
265     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET12],0,0,0,0);CHKERRQ(ierr);
266 #endif
267     /* data movement scope -- this could be moved to subclasses so that we don't try to cram all auxilary data into some complex abstracted thing */
268     {
269     Vec            src_crd, dest_crd;
270     const PetscInt *idx,ndata_rows=pc_gamg->data_cell_rows,ndata_cols=pc_gamg->data_cell_cols,node_data_sz=ndata_rows*ndata_cols;
271     VecScatter     vecscat;
272     PetscScalar    *array;
273     IS isscat;
274 
275     /* move data (for primal equations only) */
276     /* Create a vector to contain the newly ordered element information */
277     ierr = VecCreate(comm, &dest_crd);CHKERRQ(ierr);
278     ierr = VecSetSizes(dest_crd, node_data_sz*ncrs_new, PETSC_DECIDE);CHKERRQ(ierr);
279     ierr = VecSetType(dest_crd,VECSTANDARD);CHKERRQ(ierr); /* this is needed! */
280     /*
281      There are 'ndata_rows*ndata_cols' data items per node, (one can think of the vectors of having
282      a block size of ...).  Note, ISs are expanded into equation space by 'cr_bs'.
283      */
284     ierr = PetscMalloc1(ncrs*node_data_sz, &tidx);CHKERRQ(ierr);
285     ierr = ISGetIndices(is_eq_num_prim, &idx);CHKERRQ(ierr);
286     for (ii=0,jj=0; ii<ncrs; ii++) {
287       PetscInt id = idx[ii*cr_bs]/cr_bs; /* get node back */
288       for (kk=0; kk<node_data_sz; kk++, jj++) tidx[jj] = id*node_data_sz + kk;
289     }
290     ierr = ISRestoreIndices(is_eq_num_prim, &idx);CHKERRQ(ierr);
291     ierr = ISCreateGeneral(comm, node_data_sz*ncrs, tidx, PETSC_COPY_VALUES, &isscat);CHKERRQ(ierr);
292     ierr = PetscFree(tidx);CHKERRQ(ierr);
293     /*
294      Create a vector to contain the original vertex information for each element
295      */
296     ierr = VecCreateSeq(PETSC_COMM_SELF, node_data_sz*ncrs, &src_crd);CHKERRQ(ierr);
297     for (jj=0; jj<ndata_cols; jj++) {
298       const PetscInt stride0=ncrs*pc_gamg->data_cell_rows;
299       for (ii=0; ii<ncrs; ii++) {
300         for (kk=0; kk<ndata_rows; kk++) {
301           PetscInt    ix = ii*ndata_rows + kk + jj*stride0, jx = ii*node_data_sz + kk*ndata_cols + jj;
302           PetscScalar tt = (PetscScalar)pc_gamg->data[ix];
303           ierr = VecSetValues(src_crd, 1, &jx, &tt, INSERT_VALUES);CHKERRQ(ierr);
304         }
305       }
306     }
307     ierr = VecAssemblyBegin(src_crd);CHKERRQ(ierr);
308     ierr = VecAssemblyEnd(src_crd);CHKERRQ(ierr);
309     /*
310       Scatter the element vertex information (still in the original vertex ordering)
311       to the correct processor
312     */
313     ierr = VecScatterCreate(src_crd, NULL, dest_crd, isscat, &vecscat);CHKERRQ(ierr);
314     ierr = ISDestroy(&isscat);CHKERRQ(ierr);
315     ierr = VecScatterBegin(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
316     ierr = VecScatterEnd(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
317     ierr = VecScatterDestroy(&vecscat);CHKERRQ(ierr);
318     ierr = VecDestroy(&src_crd);CHKERRQ(ierr);
319     /*
320       Put the element vertex data into a new allocation of the gdata->ele
321     */
322     ierr = PetscFree(pc_gamg->data);CHKERRQ(ierr);
323     ierr = PetscMalloc1(node_data_sz*ncrs_new, &pc_gamg->data);CHKERRQ(ierr);
324 
325     pc_gamg->data_sz = node_data_sz*ncrs_new;
326     strideNew        = ncrs_new*ndata_rows;
327 
328     ierr = VecGetArray(dest_crd, &array);CHKERRQ(ierr);
329     for (jj=0; jj<ndata_cols; jj++) {
330       for (ii=0; ii<ncrs_new; ii++) {
331         for (kk=0; kk<ndata_rows; kk++) {
332           PetscInt ix = ii*ndata_rows + kk + jj*strideNew, jx = ii*node_data_sz + kk*ndata_cols + jj;
333           pc_gamg->data[ix] = PetscRealPart(array[jx]);
334         }
335       }
336     }
337     ierr = VecRestoreArray(dest_crd, &array);CHKERRQ(ierr);
338     ierr = VecDestroy(&dest_crd);CHKERRQ(ierr);
339     }
340     /* move A and P (columns) with new layout */
341 #if defined PETSC_GAMG_USE_LOG
342     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET13],0,0,0,0);CHKERRQ(ierr);
343 #endif
344 
345     /*
346       Invert for MatCreateSubMatrix
347     */
348     ierr = ISInvertPermutation(is_eq_num, ncrs_eq_new, &new_eq_indices);CHKERRQ(ierr);
349     ierr = ISSort(new_eq_indices);CHKERRQ(ierr); /* is this needed? */
350     ierr = ISSetBlockSize(new_eq_indices, cr_bs);CHKERRQ(ierr);
351     if (is_eq_num != is_eq_num_prim) {
352       ierr = ISDestroy(&is_eq_num_prim);CHKERRQ(ierr); /* could be same as 'is_eq_num' */
353     }
354     if (Pcolumnperm) {
355       ierr = PetscObjectReference((PetscObject)new_eq_indices);CHKERRQ(ierr);
356       *Pcolumnperm = new_eq_indices;
357     }
358     ierr = ISDestroy(&is_eq_num);CHKERRQ(ierr);
359 #if defined PETSC_GAMG_USE_LOG
360     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET13],0,0,0,0);CHKERRQ(ierr);
361     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET14],0,0,0,0);CHKERRQ(ierr);
362 #endif
363     /* 'a_Amat_crs' output */
364     {
365       Mat mat;
366       ierr        = MatCreateSubMatrix(Cmat, new_eq_indices, new_eq_indices, MAT_INITIAL_MATRIX, &mat);CHKERRQ(ierr);
367       *a_Amat_crs = mat;
368     }
369     ierr = MatDestroy(&Cmat);CHKERRQ(ierr);
370 
371 #if defined PETSC_GAMG_USE_LOG
372     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET14],0,0,0,0);CHKERRQ(ierr);
373 #endif
374     /* prolongator */
375     {
376       IS       findices;
377       PetscInt Istart,Iend;
378       Mat      Pnew;
379 
380       ierr = MatGetOwnershipRange(Pold, &Istart, &Iend);CHKERRQ(ierr);
381 #if defined PETSC_GAMG_USE_LOG
382       ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET15],0,0,0,0);CHKERRQ(ierr);
383 #endif
384       ierr = ISCreateStride(comm,Iend-Istart,Istart,1,&findices);CHKERRQ(ierr);
385       ierr = ISSetBlockSize(findices,f_bs);CHKERRQ(ierr);
386       ierr = MatCreateSubMatrix(Pold, findices, new_eq_indices, MAT_INITIAL_MATRIX, &Pnew);CHKERRQ(ierr);
387       ierr = ISDestroy(&findices);CHKERRQ(ierr);
388 
389 #if defined PETSC_GAMG_USE_LOG
390       ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET15],0,0,0,0);CHKERRQ(ierr);
391 #endif
392       ierr = MatDestroy(a_P_inout);CHKERRQ(ierr);
393 
394       /* output - repartitioned */
395       *a_P_inout = Pnew;
396     }
397     ierr = ISDestroy(&new_eq_indices);CHKERRQ(ierr);
398 
399     *a_nactive_proc = new_size; /* output */
400   }
401   PetscFunctionReturn(0);
402 }
403 
404 /* -------------------------------------------------------------------------- */
405 /*
406    PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner
407                     by setting data structures and options.
408 
409    Input Parameter:
410 .  pc - the preconditioner context
411 
412 */
413 PetscErrorCode PCSetUp_GAMG(PC pc)
414 {
415   PetscErrorCode ierr;
416   PC_MG          *mg      = (PC_MG*)pc->data;
417   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
418   Mat            Pmat     = pc->pmat;
419   PetscInt       fine_level,level,level1,bs,M,N,qq,lidx,nASMBlocksArr[PETSC_GAMG_MAXLEVELS];
420   MPI_Comm       comm;
421   PetscMPIInt    rank,size,nactivepe;
422   Mat            Aarr[PETSC_GAMG_MAXLEVELS],Parr[PETSC_GAMG_MAXLEVELS];
423   IS             *ASMLocalIDsArr[PETSC_GAMG_MAXLEVELS];
424   PetscLogDouble nnz0=0.,nnztot=0.;
425   MatInfo        info;
426   PetscBool      is_last = PETSC_FALSE;
427 
428   PetscFunctionBegin;
429   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
430   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
431   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
432 
433   if (pc_gamg->setup_count++ > 0) {
434     if ((PetscBool)(!pc_gamg->reuse_prol)) {
435       /* reset everything */
436       ierr = PCReset_MG(pc);CHKERRQ(ierr);
437       pc->setupcalled = 0;
438     } else {
439       PC_MG_Levels **mglevels = mg->levels;
440       /* just do Galerkin grids */
441       Mat          B,dA,dB;
442 
443       if (!pc->setupcalled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCSetUp() has not been called yet");
444       if (pc_gamg->Nlevels > 1) {
445         /* currently only handle case where mat and pmat are the same on coarser levels */
446         ierr = KSPGetOperators(mglevels[pc_gamg->Nlevels-1]->smoothd,&dA,&dB);CHKERRQ(ierr);
447         /* (re)set to get dirty flag */
448         ierr = KSPSetOperators(mglevels[pc_gamg->Nlevels-1]->smoothd,dA,dB);CHKERRQ(ierr);
449 
450         for (level=pc_gamg->Nlevels-2; level>=0; level--) {
451           /* 2nd solve, matrix structure can change from repartitioning or process reduction but don't know if we have process reduction here. Should fix */
452           if (pc_gamg->setup_count==2 /* && pc_gamg->repart||reduction */) {
453             ierr = PetscInfo2(pc,"new RAP after first solve level %D, %D setup\n",level,pc_gamg->setup_count);CHKERRQ(ierr);
454             ierr = MatPtAP(dB,mglevels[level+1]->interpolate,MAT_INITIAL_MATRIX,2.0,&B);CHKERRQ(ierr);
455             ierr = MatDestroy(&mglevels[level]->A);CHKERRQ(ierr);
456             mglevels[level]->A = B;
457           } else {
458             ierr = PetscInfo2(pc,"RAP after first solve reusing matrix level %D, %D setup\n",level,pc_gamg->setup_count);CHKERRQ(ierr);
459             ierr = KSPGetOperators(mglevels[level]->smoothd,NULL,&B);CHKERRQ(ierr);
460             ierr = MatPtAP(dB,mglevels[level+1]->interpolate,MAT_REUSE_MATRIX,1.0,&B);CHKERRQ(ierr);
461           }
462           ierr = KSPSetOperators(mglevels[level]->smoothd,B,B);CHKERRQ(ierr);
463           dB   = B;
464         }
465       }
466 
467       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
468       PetscFunctionReturn(0);
469     }
470   }
471 
472   if (!pc_gamg->data) {
473     if (pc_gamg->orig_data) {
474       ierr = MatGetBlockSize(Pmat, &bs);CHKERRQ(ierr);
475       ierr = MatGetLocalSize(Pmat, &qq, NULL);CHKERRQ(ierr);
476 
477       pc_gamg->data_sz        = (qq/bs)*pc_gamg->orig_data_cell_rows*pc_gamg->orig_data_cell_cols;
478       pc_gamg->data_cell_rows = pc_gamg->orig_data_cell_rows;
479       pc_gamg->data_cell_cols = pc_gamg->orig_data_cell_cols;
480 
481       ierr = PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data);CHKERRQ(ierr);
482       for (qq=0; qq<pc_gamg->data_sz; qq++) pc_gamg->data[qq] = pc_gamg->orig_data[qq];
483     } else {
484       if (!pc_gamg->ops->createdefaultdata) SETERRQ(comm,PETSC_ERR_PLIB,"'createdefaultdata' not set(?) need to support NULL data");
485       ierr = pc_gamg->ops->createdefaultdata(pc,Pmat);CHKERRQ(ierr);
486     }
487   }
488 
489   /* cache original data for reuse */
490   if (!pc_gamg->orig_data && (PetscBool)(!pc_gamg->reuse_prol)) {
491     ierr = PetscMalloc1(pc_gamg->data_sz, &pc_gamg->orig_data);CHKERRQ(ierr);
492     for (qq=0; qq<pc_gamg->data_sz; qq++) pc_gamg->orig_data[qq] = pc_gamg->data[qq];
493     pc_gamg->orig_data_cell_rows = pc_gamg->data_cell_rows;
494     pc_gamg->orig_data_cell_cols = pc_gamg->data_cell_cols;
495   }
496 
497   /* get basic dims */
498   ierr = MatGetBlockSize(Pmat, &bs);CHKERRQ(ierr);
499   ierr = MatGetSize(Pmat, &M, &N);CHKERRQ(ierr);
500 
501   ierr = MatGetInfo(Pmat,MAT_GLOBAL_SUM,&info);CHKERRQ(ierr); /* global reduction */
502   nnz0   = info.nz_used;
503   nnztot = info.nz_used;
504   ierr = PetscInfo6(pc,"level %d) N=%D, n data rows=%d, n data cols=%d, nnz/row (ave)=%d, np=%d\n",0,M,pc_gamg->data_cell_rows,pc_gamg->data_cell_cols,(int)(nnz0/(PetscReal)M+0.5),size);CHKERRQ(ierr);
505 
506   /* Get A_i and R_i */
507   for (level=0, Aarr[0]=Pmat, nactivepe = size; level < (pc_gamg->Nlevels-1) && (!level || M>pc_gamg->coarse_eq_limit); level++) {
508     pc_gamg->current_level = level;
509     level1 = level + 1;
510 #if defined PETSC_GAMG_USE_LOG
511     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET1],0,0,0,0);CHKERRQ(ierr);
512 #if (defined GAMG_STAGES)
513     ierr = PetscLogStagePush(gamg_stages[level]);CHKERRQ(ierr);
514 #endif
515 #endif
516     { /* construct prolongator */
517       Mat              Gmat;
518       PetscCoarsenData *agg_lists;
519       Mat              Prol11;
520 
521       ierr = pc_gamg->ops->graph(pc,Aarr[level], &Gmat);CHKERRQ(ierr);
522       ierr = pc_gamg->ops->coarsen(pc, &Gmat, &agg_lists);CHKERRQ(ierr);
523       ierr = pc_gamg->ops->prolongator(pc,Aarr[level],Gmat,agg_lists,&Prol11);CHKERRQ(ierr);
524 
525       /* could have failed to create new level */
526       if (Prol11) {
527         /* get new block size of coarse matrices */
528         ierr = MatGetBlockSizes(Prol11, NULL, &bs);CHKERRQ(ierr);
529 
530         if (pc_gamg->ops->optprolongator) {
531           /* smooth */
532           ierr = pc_gamg->ops->optprolongator(pc, Aarr[level], &Prol11);CHKERRQ(ierr);
533         }
534 
535         Parr[level1] = Prol11;
536       } else Parr[level1] = NULL; /* failed to coarsen */
537 
538       if (pc_gamg->use_aggs_in_asm) {
539         PetscInt bs;
540         ierr = MatGetBlockSizes(Prol11, &bs, NULL);CHKERRQ(ierr);
541         ierr = PetscCDGetASMBlocks(agg_lists, bs, Gmat, &nASMBlocksArr[level], &ASMLocalIDsArr[level]);CHKERRQ(ierr);
542       }
543 
544       ierr = MatDestroy(&Gmat);CHKERRQ(ierr);
545       ierr = PetscCDDestroy(agg_lists);CHKERRQ(ierr);
546     } /* construct prolongator scope */
547 #if defined PETSC_GAMG_USE_LOG
548     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET1],0,0,0,0);CHKERRQ(ierr);
549 #endif
550     if (!level) Aarr[0] = Pmat; /* use Pmat for finest level setup */
551     if (!Parr[level1]) { /* failed to coarsen */
552       ierr =  PetscInfo1(pc,"Stop gridding, level %D\n",level);CHKERRQ(ierr);
553 #if defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES
554       ierr = PetscLogStagePop();CHKERRQ(ierr);
555 #endif
556       break;
557     }
558 #if defined PETSC_GAMG_USE_LOG
559     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET2],0,0,0,0);CHKERRQ(ierr);
560 #endif
561     ierr = MatGetSize(Parr[level1], &M, &N);CHKERRQ(ierr); /* N is next M, a loop test variables */
562     if (is_last) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Is last ????????");
563     if (N <= pc_gamg->coarse_eq_limit) is_last = PETSC_TRUE;
564     if (level1 == pc_gamg->Nlevels-1) is_last = PETSC_TRUE;
565     ierr = pc_gamg->ops->createlevel(pc, Aarr[level], bs, &Parr[level1], &Aarr[level1], &nactivepe, NULL, is_last);CHKERRQ(ierr);
566 
567 #if defined PETSC_GAMG_USE_LOG
568     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET2],0,0,0,0);CHKERRQ(ierr);
569 #endif
570     ierr = MatGetSize(Aarr[level1], &M, &N);CHKERRQ(ierr); /* M is loop test variables */
571     ierr = MatGetInfo(Aarr[level1], MAT_GLOBAL_SUM, &info);CHKERRQ(ierr);
572     nnztot += info.nz_used;
573     ierr = PetscInfo5(pc,"%d) N=%D, n data cols=%d, nnz/row (ave)=%d, %d active pes\n",level1,M,pc_gamg->data_cell_cols,(int)(info.nz_used/(PetscReal)M),nactivepe);CHKERRQ(ierr);
574 
575 #if (defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES)
576     ierr = PetscLogStagePop();CHKERRQ(ierr);
577 #endif
578     /* stop if one node or one proc -- could pull back for singular problems */
579     if ( (pc_gamg->data_cell_cols && M/pc_gamg->data_cell_cols < 2) || (!pc_gamg->data_cell_cols && M/bs < 2) ) {
580       ierr =  PetscInfo2(pc,"HARD stop of coarsening on level %D.  Grid too small: %D block nodes\n",level,M/bs);CHKERRQ(ierr);
581       level++;
582       break;
583     }
584   } /* levels */
585   ierr                  = PetscFree(pc_gamg->data);CHKERRQ(ierr);
586 
587   ierr = PetscInfo2(pc,"%D levels, grid complexity = %g\n",level+1,nnztot/nnz0);CHKERRQ(ierr);
588   pc_gamg->Nlevels = level + 1;
589   fine_level       = level;
590   ierr             = PCMGSetLevels(pc,pc_gamg->Nlevels,NULL);CHKERRQ(ierr);
591 
592   if (pc_gamg->Nlevels > 1) { /* don't setup MG if one level */
593     /* set default smoothers & set operators */
594     for (lidx = 1, level = pc_gamg->Nlevels-2; lidx <= fine_level; lidx++, level--) {
595       KSP smoother;
596       PC  subpc;
597 
598       ierr = PCMGGetSmoother(pc, lidx, &smoother);CHKERRQ(ierr);
599       ierr = KSPGetPC(smoother, &subpc);CHKERRQ(ierr);
600 
601       ierr = KSPSetNormType(smoother, KSP_NORM_NONE);CHKERRQ(ierr);
602       /* set ops */
603       ierr = KSPSetOperators(smoother, Aarr[level], Aarr[level]);CHKERRQ(ierr);
604       ierr = PCMGSetInterpolation(pc, lidx, Parr[level+1]);CHKERRQ(ierr);
605 
606       /* set defaults */
607       ierr = KSPSetType(smoother, KSPCHEBYSHEV);CHKERRQ(ierr);
608 
609       /* set blocks for ASM smoother that uses the 'aggregates' */
610       if (pc_gamg->use_aggs_in_asm) {
611         PetscInt sz;
612         IS       *iss;
613 
614         sz   = nASMBlocksArr[level];
615         iss   = ASMLocalIDsArr[level];
616         ierr = PCSetType(subpc, PCASM);CHKERRQ(ierr);
617         ierr = PCASMSetOverlap(subpc, 0);CHKERRQ(ierr);
618         ierr = PCASMSetType(subpc,PC_ASM_BASIC);CHKERRQ(ierr);
619         if (!sz) {
620           IS       is;
621           ierr = ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is);CHKERRQ(ierr);
622           ierr = PCASMSetLocalSubdomains(subpc, 1, NULL, &is);CHKERRQ(ierr);
623           ierr = ISDestroy(&is);CHKERRQ(ierr);
624         } else {
625           PetscInt kk;
626           ierr = PCASMSetLocalSubdomains(subpc, sz, NULL, iss);CHKERRQ(ierr);
627           for (kk=0; kk<sz; kk++) {
628             ierr = ISDestroy(&iss[kk]);CHKERRQ(ierr);
629           }
630           ierr = PetscFree(iss);CHKERRQ(ierr);
631         }
632         ASMLocalIDsArr[level] = NULL;
633         nASMBlocksArr[level]  = 0;
634       } else {
635         ierr = PCSetType(subpc, PCSOR);CHKERRQ(ierr);
636       }
637     }
638     {
639       /* coarse grid */
640       KSP smoother,*k2; PC subpc,pc2; PetscInt ii,first;
641       Mat Lmat = Aarr[(level=pc_gamg->Nlevels-1)]; lidx = 0;
642       ierr = PCMGGetSmoother(pc, lidx, &smoother);CHKERRQ(ierr);
643       ierr = KSPSetOperators(smoother, Lmat, Lmat);CHKERRQ(ierr);
644       if (!pc_gamg->use_parallel_coarse_grid_solver) {
645         ierr = KSPSetNormType(smoother, KSP_NORM_NONE);CHKERRQ(ierr);
646         ierr = KSPGetPC(smoother, &subpc);CHKERRQ(ierr);
647         ierr = PCSetType(subpc, PCBJACOBI);CHKERRQ(ierr);
648         ierr = PCSetUp(subpc);CHKERRQ(ierr);
649         ierr = PCBJacobiGetSubKSP(subpc,&ii,&first,&k2);CHKERRQ(ierr);
650         if (ii != 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"ii %D is not one",ii);
651         ierr = KSPGetPC(k2[0],&pc2);CHKERRQ(ierr);
652         ierr = PCSetType(pc2, PCLU);CHKERRQ(ierr);
653         ierr = PCFactorSetShiftType(pc2,MAT_SHIFT_INBLOCKS);CHKERRQ(ierr);
654         ierr = KSPSetTolerances(k2[0],PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,1);CHKERRQ(ierr);
655         ierr = KSPSetType(k2[0], KSPPREONLY);CHKERRQ(ierr);
656         /* This flag gets reset by PCBJacobiGetSubKSP(), but our BJacobi really does the same algorithm everywhere (and in
657          * fact, all but one process will have zero dofs), so we reset the flag to avoid having PCView_BJacobi attempt to
658          * view every subdomain as though they were different. */
659         ((PC_BJacobi*)subpc->data)->same_local_solves = PETSC_TRUE;
660       }
661     }
662 
663     /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */
664     ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
665     ierr = PCSetFromOptions_MG(PetscOptionsObject,pc);CHKERRQ(ierr);
666     ierr = PetscOptionsEnd();CHKERRQ(ierr);
667     ierr = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
668 
669     /* clean up */
670     for (level=1; level<pc_gamg->Nlevels; level++) {
671       ierr = MatDestroy(&Parr[level]);CHKERRQ(ierr);
672       ierr = MatDestroy(&Aarr[level]);CHKERRQ(ierr);
673     }
674     ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
675   } else {
676     KSP smoother;
677     ierr = PetscInfo(pc,"One level solver used (system is seen as DD). Using default solver.\n");CHKERRQ(ierr);
678     ierr = PCMGGetSmoother(pc, 0, &smoother);CHKERRQ(ierr);
679     ierr = KSPSetOperators(smoother, Aarr[0], Aarr[0]);CHKERRQ(ierr);
680     ierr = KSPSetType(smoother, KSPPREONLY);CHKERRQ(ierr);
681     ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
682   }
683   PetscFunctionReturn(0);
684 }
685 
686 /* ------------------------------------------------------------------------- */
687 /*
688  PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner
689    that was created with PCCreate_GAMG().
690 
691    Input Parameter:
692 .  pc - the preconditioner context
693 
694    Application Interface Routine: PCDestroy()
695 */
696 PetscErrorCode PCDestroy_GAMG(PC pc)
697 {
698   PetscErrorCode ierr;
699   PC_MG          *mg     = (PC_MG*)pc->data;
700   PC_GAMG        *pc_gamg= (PC_GAMG*)mg->innerctx;
701 
702   PetscFunctionBegin;
703   ierr = PCReset_GAMG(pc);CHKERRQ(ierr);
704   if (pc_gamg->ops->destroy) {
705     ierr = (*pc_gamg->ops->destroy)(pc);CHKERRQ(ierr);
706   }
707   ierr = PetscFree(pc_gamg->ops);CHKERRQ(ierr);
708   ierr = PetscFree(pc_gamg->gamg_type_name);CHKERRQ(ierr);
709   ierr = PetscFree(pc_gamg);CHKERRQ(ierr);
710   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
711   PetscFunctionReturn(0);
712 }
713 
714 /*@
715    PCGAMGSetProcEqLim - Set number of equations to aim for per process on the coarse grids via processor reduction.
716 
717    Logically Collective on PC
718 
719    Input Parameters:
720 +  pc - the preconditioner context
721 -  n - the number of equations
722 
723 
724    Options Database Key:
725 .  -pc_gamg_process_eq_limit <limit>
726 
727    Notes:
728     GAMG will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit> equations on each process
729           that has degrees of freedom
730 
731    Level: intermediate
732 
733    Concepts: Unstructured multigrid preconditioner
734 
735 .seealso: PCGAMGSetCoarseEqLim()
736 @*/
737 PetscErrorCode  PCGAMGSetProcEqLim(PC pc, PetscInt n)
738 {
739   PetscErrorCode ierr;
740 
741   PetscFunctionBegin;
742   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
743   ierr = PetscTryMethod(pc,"PCGAMGSetProcEqLim_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
744   PetscFunctionReturn(0);
745 }
746 
747 static PetscErrorCode PCGAMGSetProcEqLim_GAMG(PC pc, PetscInt n)
748 {
749   PC_MG   *mg      = (PC_MG*)pc->data;
750   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
751 
752   PetscFunctionBegin;
753   if (n>0) pc_gamg->min_eq_proc = n;
754   PetscFunctionReturn(0);
755 }
756 
757 /*@
758    PCGAMGSetCoarseEqLim - Set maximum number of equations on coarsest grid.
759 
760  Collective on PC
761 
762    Input Parameters:
763 +  pc - the preconditioner context
764 -  n - maximum number of equations to aim for
765 
766    Options Database Key:
767 .  -pc_gamg_coarse_eq_limit <limit>
768 
769    Level: intermediate
770 
771    Concepts: Unstructured multigrid preconditioner
772 
773 .seealso: PCGAMGSetProcEqLim()
774 @*/
775 PetscErrorCode PCGAMGSetCoarseEqLim(PC pc, PetscInt n)
776 {
777   PetscErrorCode ierr;
778 
779   PetscFunctionBegin;
780   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
781   ierr = PetscTryMethod(pc,"PCGAMGSetCoarseEqLim_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
782   PetscFunctionReturn(0);
783 }
784 
785 static PetscErrorCode PCGAMGSetCoarseEqLim_GAMG(PC pc, PetscInt n)
786 {
787   PC_MG   *mg      = (PC_MG*)pc->data;
788   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
789 
790   PetscFunctionBegin;
791   if (n>0) pc_gamg->coarse_eq_limit = n;
792   PetscFunctionReturn(0);
793 }
794 
795 /*@
796    PCGAMGSetRepartition - Repartition the degrees of freedom across the processors on the coarser grids
797 
798    Collective on PC
799 
800    Input Parameters:
801 +  pc - the preconditioner context
802 -  n - PETSC_TRUE or PETSC_FALSE
803 
804    Options Database Key:
805 .  -pc_gamg_repartition <true,false>
806 
807    Notes:
808     this will generally improve the loading balancing of the work on each level
809 
810    Level: intermediate
811 
812    Concepts: Unstructured multigrid preconditioner
813 
814 .seealso: ()
815 @*/
816 PetscErrorCode PCGAMGSetRepartition(PC pc, PetscBool n)
817 {
818   PetscErrorCode ierr;
819 
820   PetscFunctionBegin;
821   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
822   ierr = PetscTryMethod(pc,"PCGAMGSetRepartition_C",(PC,PetscBool),(pc,n));CHKERRQ(ierr);
823   PetscFunctionReturn(0);
824 }
825 
826 static PetscErrorCode PCGAMGSetRepartition_GAMG(PC pc, PetscBool n)
827 {
828   PC_MG   *mg      = (PC_MG*)pc->data;
829   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
830 
831   PetscFunctionBegin;
832   pc_gamg->repart = n;
833   PetscFunctionReturn(0);
834 }
835 
836 /*@
837    PCGAMGSetReuseInterpolation - Reuse prolongation when rebuilding algebraic multigrid preconditioner
838 
839    Collective on PC
840 
841    Input Parameters:
842 +  pc - the preconditioner context
843 -  n - PETSC_TRUE or PETSC_FALSE
844 
845    Options Database Key:
846 .  -pc_gamg_reuse_interpolation <true,false>
847 
848    Level: intermediate
849 
850    Notes:
851     this may negatively affect the convergence rate of the method on new matrices if the matrix entries change a great deal, but allows
852           rebuilding the preconditioner quicker.
853 
854    Concepts: Unstructured multigrid preconditioner
855 
856 .seealso: ()
857 @*/
858 PetscErrorCode PCGAMGSetReuseInterpolation(PC pc, PetscBool n)
859 {
860   PetscErrorCode ierr;
861 
862   PetscFunctionBegin;
863   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
864   ierr = PetscTryMethod(pc,"PCGAMGSetReuseInterpolation_C",(PC,PetscBool),(pc,n));CHKERRQ(ierr);
865   PetscFunctionReturn(0);
866 }
867 
868 static PetscErrorCode PCGAMGSetReuseInterpolation_GAMG(PC pc, PetscBool n)
869 {
870   PC_MG   *mg      = (PC_MG*)pc->data;
871   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
872 
873   PetscFunctionBegin;
874   pc_gamg->reuse_prol = n;
875   PetscFunctionReturn(0);
876 }
877 
878 /*@
879    PCGAMGASMSetUseAggs - Have the PCGAMG smoother on each level use the aggregates defined by the coarsening process as the subdomains for the additive Schwarz preconditioner.
880 
881    Collective on PC
882 
883    Input Parameters:
884 +  pc - the preconditioner context
885 -  flg - PETSC_TRUE to use aggregates, PETSC_FALSE to not
886 
887    Options Database Key:
888 .  -pc_gamg_asm_use_agg
889 
890    Level: intermediate
891 
892    Concepts: Unstructured multigrid preconditioner
893 
894 .seealso: ()
895 @*/
896 PetscErrorCode PCGAMGASMSetUseAggs(PC pc, PetscBool flg)
897 {
898   PetscErrorCode ierr;
899 
900   PetscFunctionBegin;
901   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
902   ierr = PetscTryMethod(pc,"PCGAMGASMSetUseAggs_C",(PC,PetscBool),(pc,flg));CHKERRQ(ierr);
903   PetscFunctionReturn(0);
904 }
905 
906 static PetscErrorCode PCGAMGASMSetUseAggs_GAMG(PC pc, PetscBool flg)
907 {
908   PC_MG   *mg      = (PC_MG*)pc->data;
909   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
910 
911   PetscFunctionBegin;
912   pc_gamg->use_aggs_in_asm = flg;
913   PetscFunctionReturn(0);
914 }
915 
916 /*@
917    PCGAMGSetUseParallelCoarseGridSolve - allow a parallel coarse grid solver
918 
919    Collective on PC
920 
921    Input Parameters:
922 +  pc - the preconditioner context
923 -  flg - PETSC_TRUE to not force coarse grid onto one processor
924 
925    Options Database Key:
926 .  -pc_gamg_use_parallel_coarse_grid_solver
927 
928    Level: intermediate
929 
930    Concepts: Unstructured multigrid preconditioner
931 
932 .seealso: ()
933 @*/
934 PetscErrorCode PCGAMGSetUseParallelCoarseGridSolve(PC pc, PetscBool flg)
935 {
936   PetscErrorCode ierr;
937 
938   PetscFunctionBegin;
939   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
940   ierr = PetscTryMethod(pc,"PCGAMGSetUseParallelCoarseGridSolve_C",(PC,PetscBool),(pc,flg));CHKERRQ(ierr);
941   PetscFunctionReturn(0);
942 }
943 
944 static PetscErrorCode PCGAMGSetUseParallelCoarseGridSolve_GAMG(PC pc, PetscBool flg)
945 {
946   PC_MG   *mg      = (PC_MG*)pc->data;
947   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
948 
949   PetscFunctionBegin;
950   pc_gamg->use_parallel_coarse_grid_solver = flg;
951   PetscFunctionReturn(0);
952 }
953 
954 /*@
955    PCGAMGSetNlevels -  Sets the maximum number of levels PCGAMG will use
956 
957    Not collective on PC
958 
959    Input Parameters:
960 +  pc - the preconditioner
961 -  n - the maximum number of levels to use
962 
963    Options Database Key:
964 .  -pc_mg_levels
965 
966    Level: intermediate
967 
968    Concepts: Unstructured multigrid preconditioner
969 
970 .seealso: ()
971 @*/
972 PetscErrorCode PCGAMGSetNlevels(PC pc, PetscInt n)
973 {
974   PetscErrorCode ierr;
975 
976   PetscFunctionBegin;
977   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
978   ierr = PetscTryMethod(pc,"PCGAMGSetNlevels_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
979   PetscFunctionReturn(0);
980 }
981 
982 static PetscErrorCode PCGAMGSetNlevels_GAMG(PC pc, PetscInt n)
983 {
984   PC_MG   *mg      = (PC_MG*)pc->data;
985   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
986 
987   PetscFunctionBegin;
988   pc_gamg->Nlevels = n;
989   PetscFunctionReturn(0);
990 }
991 
992 /*@
993    PCGAMGSetThreshold - Relative threshold to use for dropping edges in aggregation graph
994 
995    Not collective on PC
996 
997    Input Parameters:
998 +  pc - the preconditioner context
999 -  threshold - the threshold value, 0.0 means keep all nonzero entries in the graph; negative means keep even zero entries in the graph
1000 
1001    Options Database Key:
1002 .  -pc_gamg_threshold <threshold>
1003 
1004    Notes:
1005     Increasing the threshold decreases the rate of coarsening. Conversely reducing the threshold increases the rate of coarsening (aggressive coarsening) and thereby reduces the complexity of the coarse grids, and generally results in slower solver converge rates. Reducing coarse grid complexity reduced the complexity of Galerkin coarse grid construction considerably.
1006     Before coarsening or aggregating the graph, GAMG removes small values from the graph with this threshold, and thus reducing the coupling in the graph and a different (perhaps better) coarser set of points.
1007 
1008    Level: intermediate
1009 
1010    Concepts: Unstructured multigrid preconditioner
1011 
1012 .seealso: PCGAMGFilterGraph(), PCGAMGSetSquareGraph()
1013 @*/
1014 PetscErrorCode PCGAMGSetThreshold(PC pc, PetscReal v[], PetscInt n)
1015 {
1016   PetscErrorCode ierr;
1017 
1018   PetscFunctionBegin;
1019   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1020   ierr = PetscTryMethod(pc,"PCGAMGSetThreshold_C",(PC,PetscReal[],PetscInt),(pc,v,n));CHKERRQ(ierr);
1021   PetscFunctionReturn(0);
1022 }
1023 
1024 static PetscErrorCode PCGAMGSetThreshold_GAMG(PC pc, PetscReal v[], PetscInt n)
1025 {
1026   PC_MG   *mg      = (PC_MG*)pc->data;
1027   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1028   PetscInt i;
1029   PetscFunctionBegin;
1030   for (i=0;i<n;i++) pc_gamg->threshold[i] = v[i];
1031   do {pc_gamg->threshold[i] = pc_gamg->threshold[i-1]*pc_gamg->threshold_scale;} while (++i<PETSC_GAMG_MAXLEVELS);
1032   PetscFunctionReturn(0);
1033 }
1034 
1035 /*@
1036    PCGAMGSetThresholdScale - Relative threshold reduction at each level
1037 
1038    Not collective on PC
1039 
1040    Input Parameters:
1041 +  pc - the preconditioner context
1042 -  scale - the threshold value reduction, ussually < 1.0
1043 
1044    Options Database Key:
1045 .  -pc_gamg_threshold_scale <v>
1046 
1047    Level: advanced
1048 
1049    Concepts: Unstructured multigrid preconditioner
1050 
1051 .seealso: ()
1052 @*/
1053 PetscErrorCode PCGAMGSetThresholdScale(PC pc, PetscReal v)
1054 {
1055   PetscErrorCode ierr;
1056 
1057   PetscFunctionBegin;
1058   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1059   ierr = PetscTryMethod(pc,"PCGAMGSetThresholdScale_C",(PC,PetscReal),(pc,v));CHKERRQ(ierr);
1060   PetscFunctionReturn(0);
1061 }
1062 
1063 static PetscErrorCode PCGAMGSetThresholdScale_GAMG(PC pc, PetscReal v)
1064 {
1065   PC_MG   *mg      = (PC_MG*)pc->data;
1066   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1067   PetscFunctionBegin;
1068   pc_gamg->threshold_scale = v;
1069   PetscFunctionReturn(0);
1070 }
1071 
1072 /*@C
1073    PCGAMGSetType - Set solution method
1074 
1075    Collective on PC
1076 
1077    Input Parameters:
1078 +  pc - the preconditioner context
1079 -  type - PCGAMGAGG, PCGAMGGEO, or PCGAMGCLASSICAL
1080 
1081    Options Database Key:
1082 .  -pc_gamg_type <agg,geo,classical> - type of algebraic multigrid to apply
1083 
1084    Level: intermediate
1085 
1086    Concepts: Unstructured multigrid preconditioner
1087 
1088 .seealso: PCGAMGGetType(), PCGAMG, PCGAMGType
1089 @*/
1090 PetscErrorCode PCGAMGSetType(PC pc, PCGAMGType type)
1091 {
1092   PetscErrorCode ierr;
1093 
1094   PetscFunctionBegin;
1095   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1096   ierr = PetscTryMethod(pc,"PCGAMGSetType_C",(PC,PCGAMGType),(pc,type));CHKERRQ(ierr);
1097   PetscFunctionReturn(0);
1098 }
1099 
1100 /*@C
1101    PCGAMGGetType - Get solution method
1102 
1103    Collective on PC
1104 
1105    Input Parameter:
1106 .  pc - the preconditioner context
1107 
1108    Output Parameter:
1109 .  type - the type of algorithm used
1110 
1111    Level: intermediate
1112 
1113    Concepts: Unstructured multigrid preconditioner
1114 
1115 .seealso: PCGAMGSetType(), PCGAMGType
1116 @*/
1117 PetscErrorCode PCGAMGGetType(PC pc, PCGAMGType *type)
1118 {
1119   PetscErrorCode ierr;
1120 
1121   PetscFunctionBegin;
1122   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1123   ierr = PetscUseMethod(pc,"PCGAMGGetType_C",(PC,PCGAMGType*),(pc,type));CHKERRQ(ierr);
1124   PetscFunctionReturn(0);
1125 }
1126 
1127 static PetscErrorCode PCGAMGGetType_GAMG(PC pc, PCGAMGType *type)
1128 {
1129   PC_MG          *mg      = (PC_MG*)pc->data;
1130   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1131 
1132   PetscFunctionBegin;
1133   *type = pc_gamg->type;
1134   PetscFunctionReturn(0);
1135 }
1136 
1137 static PetscErrorCode PCGAMGSetType_GAMG(PC pc, PCGAMGType type)
1138 {
1139   PetscErrorCode ierr,(*r)(PC);
1140   PC_MG          *mg      = (PC_MG*)pc->data;
1141   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1142 
1143   PetscFunctionBegin;
1144   pc_gamg->type = type;
1145   ierr = PetscFunctionListFind(GAMGList,type,&r);CHKERRQ(ierr);
1146   if (!r) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown GAMG type %s given",type);
1147   if (pc_gamg->ops->destroy) {
1148     ierr = (*pc_gamg->ops->destroy)(pc);CHKERRQ(ierr);
1149     ierr = PetscMemzero(pc_gamg->ops,sizeof(struct _PCGAMGOps));CHKERRQ(ierr);
1150     pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1151     /* cleaning up common data in pc_gamg - this should disapear someday */
1152     pc_gamg->data_cell_cols = 0;
1153     pc_gamg->data_cell_rows = 0;
1154     pc_gamg->orig_data_cell_cols = 0;
1155     pc_gamg->orig_data_cell_rows = 0;
1156     ierr = PetscFree(pc_gamg->data);CHKERRQ(ierr);
1157     pc_gamg->data_sz = 0;
1158   }
1159   ierr = PetscFree(pc_gamg->gamg_type_name);CHKERRQ(ierr);
1160   ierr = PetscStrallocpy(type,&pc_gamg->gamg_type_name);CHKERRQ(ierr);
1161   ierr = (*r)(pc);CHKERRQ(ierr);
1162   PetscFunctionReturn(0);
1163 }
1164 
1165 static PetscErrorCode PCView_GAMG(PC pc,PetscViewer viewer)
1166 {
1167   PetscErrorCode ierr,i;
1168   PC_MG          *mg      = (PC_MG*)pc->data;
1169   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1170 
1171   PetscFunctionBegin;
1172   ierr = PetscViewerASCIIPrintf(viewer,"    GAMG specific options\n");CHKERRQ(ierr);
1173   ierr = PetscViewerASCIIPrintf(viewer,"      Threshold for dropping small values in graph on each level =");CHKERRQ(ierr);
1174   for (i=0;i<pc_gamg->current_level;i++) {
1175     ierr = PetscViewerASCIIPrintf(viewer," %g",(double)pc_gamg->threshold[i]);CHKERRQ(ierr);
1176   }
1177   ierr = PetscViewerASCIIPrintf(viewer,"\n");CHKERRQ(ierr);
1178   ierr = PetscViewerASCIIPrintf(viewer,"      Threshold scaling factor for each level not specified = %g\n",(double)pc_gamg->threshold_scale);CHKERRQ(ierr);
1179   if (pc_gamg->use_aggs_in_asm) {
1180     ierr = PetscViewerASCIIPrintf(viewer,"      Using aggregates from coarsening process to define subdomains for PCASM\n");CHKERRQ(ierr);
1181   }
1182   if (pc_gamg->use_parallel_coarse_grid_solver) {
1183     ierr = PetscViewerASCIIPrintf(viewer,"      Using parallel coarse grid solver (all coarse grid equations not put on one process)\n");CHKERRQ(ierr);
1184   }
1185   if (pc_gamg->ops->view) {
1186     ierr = (*pc_gamg->ops->view)(pc,viewer);CHKERRQ(ierr);
1187   }
1188   PetscFunctionReturn(0);
1189 }
1190 
1191 PetscErrorCode PCSetFromOptions_GAMG(PetscOptionItems *PetscOptionsObject,PC pc)
1192 {
1193   PetscErrorCode ierr;
1194   PC_MG          *mg      = (PC_MG*)pc->data;
1195   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1196   PetscBool      flag;
1197   MPI_Comm       comm;
1198   char           prefix[256];
1199   PetscInt       i,n;
1200   const char     *pcpre;
1201 
1202   PetscFunctionBegin;
1203   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
1204   ierr = PetscOptionsHead(PetscOptionsObject,"GAMG options");CHKERRQ(ierr);
1205   {
1206     char tname[256];
1207     ierr = PetscOptionsFList("-pc_gamg_type","Type of AMG method","PCGAMGSetType",GAMGList, pc_gamg->gamg_type_name, tname, sizeof(tname), &flag);CHKERRQ(ierr);
1208     if (flag) {
1209       ierr = PCGAMGSetType(pc,tname);CHKERRQ(ierr);
1210     }
1211     ierr = PetscOptionsBool("-pc_gamg_repartition","Repartion coarse grids","PCGAMGSetRepartition",pc_gamg->repart,&pc_gamg->repart,NULL);CHKERRQ(ierr);
1212     ierr = PetscOptionsBool("-pc_gamg_reuse_interpolation","Reuse prolongation operator","PCGAMGReuseInterpolation",pc_gamg->reuse_prol,&pc_gamg->reuse_prol,NULL);CHKERRQ(ierr);
1213     ierr = PetscOptionsBool("-pc_gamg_asm_use_agg","Use aggregation aggregates for ASM smoother","PCGAMGASMSetUseAggs",pc_gamg->use_aggs_in_asm,&pc_gamg->use_aggs_in_asm,NULL);CHKERRQ(ierr);
1214     ierr = PetscOptionsBool("-pc_gamg_use_parallel_coarse_grid_solver","Use parallel coarse grid solver (otherwise put last grid on one process)","PCGAMGSetUseParallelCoarseGridSolve",pc_gamg->use_parallel_coarse_grid_solver,&pc_gamg->use_parallel_coarse_grid_solver,NULL);CHKERRQ(ierr);
1215     ierr = PetscOptionsInt("-pc_gamg_process_eq_limit","Limit (goal) on number of equations per process on coarse grids","PCGAMGSetProcEqLim",pc_gamg->min_eq_proc,&pc_gamg->min_eq_proc,NULL);CHKERRQ(ierr);
1216     ierr = PetscOptionsInt("-pc_gamg_coarse_eq_limit","Limit on number of equations for the coarse grid","PCGAMGSetCoarseEqLim",pc_gamg->coarse_eq_limit,&pc_gamg->coarse_eq_limit,NULL);CHKERRQ(ierr);
1217     ierr = PetscOptionsReal("-pc_gamg_threshold_scale","Scaling of threshold for each level not specified","PCGAMGSetThresholdScale",pc_gamg->threshold_scale,&pc_gamg->threshold_scale,NULL);CHKERRQ(ierr);
1218     n = PETSC_GAMG_MAXLEVELS;
1219     ierr = PetscOptionsRealArray("-pc_gamg_threshold","Relative threshold to use for dropping edges in aggregation graph","PCGAMGSetThreshold",pc_gamg->threshold,&n,&flag);CHKERRQ(ierr);
1220     if (!flag || n < PETSC_GAMG_MAXLEVELS) {
1221       if (!flag) n = 1;
1222       i = n;
1223       do {pc_gamg->threshold[i] = pc_gamg->threshold[i-1]*pc_gamg->threshold_scale;} while (++i<PETSC_GAMG_MAXLEVELS);
1224     }
1225     ierr = PetscOptionsInt("-pc_mg_levels","Set number of MG levels","PCGAMGSetNlevels",pc_gamg->Nlevels,&pc_gamg->Nlevels,NULL);CHKERRQ(ierr);
1226 
1227     /* set options for subtype */
1228     if (pc_gamg->ops->setfromoptions) {ierr = (*pc_gamg->ops->setfromoptions)(PetscOptionsObject,pc);CHKERRQ(ierr);}
1229   }
1230   ierr = PCGetOptionsPrefix(pc, &pcpre);CHKERRQ(ierr);
1231   ierr = PetscSNPrintf(prefix,sizeof(prefix),"%spc_gamg_",pcpre ? pcpre : "");CHKERRQ(ierr);
1232   ierr = PetscOptionsTail();CHKERRQ(ierr);
1233   PetscFunctionReturn(0);
1234 }
1235 
1236 /* -------------------------------------------------------------------------- */
1237 /*MC
1238      PCGAMG - Geometric algebraic multigrid (AMG) preconditioner
1239 
1240    Options Database Keys:
1241 +   -pc_gamg_type <type> - one of agg, geo, or classical
1242 .   -pc_gamg_repartition  <true,default=false> - repartition the degrees of freedom accross the coarse grids as they are determined
1243 .   -pc_gamg_reuse_interpolation <true,default=false> - when rebuilding the algebraic multigrid preconditioner reuse the previously computed interpolations
1244 .   -pc_gamg_asm_use_agg <true,default=false> - use the aggregates from the coasening process to defined the subdomains on each level for the PCASM smoother
1245 .   -pc_gamg_process_eq_limit <limit, default=50> - GAMG will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit>
1246                                         equations on each process that has degrees of freedom
1247 .   -pc_gamg_coarse_eq_limit <limit, default=50> - Set maximum number of equations on coarsest grid to aim for.
1248 .   -pc_gamg_threshold[] <thresh,default=0> - Before aggregating the graph GAMG will remove small values from the graph on each level
1249 -   -pc_gamg_threshold_scale <scale,default=1> - Scaling of threshold on each coarser grid if not specified
1250 
1251    Options Database Keys for default Aggregation:
1252 +  -pc_gamg_agg_nsmooths <nsmooth, default=1> - number of smoothing steps to use with smooth aggregation
1253 .  -pc_gamg_sym_graph <true,default=false> - symmetrize the graph before computing the aggregation
1254 -  -pc_gamg_square_graph <n,default=1> - number of levels to square the graph before aggregating it
1255 
1256    Multigrid options:
1257 +  -pc_mg_cycles <v> - v or w, see PCMGSetCycleType()
1258 .  -pc_mg_distinct_smoothup - configure the up and down (pre and post) smoothers separately, see PCMGSetDistinctSmoothUp()
1259 .  -pc_mg_type <multiplicative> - (one of) additive multiplicative full kascade
1260 -  -pc_mg_levels <levels> - Number of levels of multigrid to use.
1261 
1262 
1263   Notes:
1264     In order to obtain good performance for PCGAMG for vector valued problems you must
1265        Call MatSetBlockSize() to indicate the number of degrees of freedom per grid point
1266        Call MatSetNearNullSpace() (or PCSetCoordinates() if solving the equations of elasticity) to indicate the near null space of the operator
1267        See the Users Manual Chapter 4 for more details
1268 
1269   Level: intermediate
1270 
1271   Concepts: algebraic multigrid
1272 
1273 .seealso:  PCCreate(), PCSetType(), MatSetBlockSize(), PCMGType, PCSetCoordinates(), MatSetNearNullSpace(), PCGAMGSetType(), PCGAMGAGG, PCGAMGGEO, PCGAMGCLASSICAL, PCGAMGSetProcEqLim(),
1274            PCGAMGSetCoarseEqLim(), PCGAMGSetRepartition(), PCGAMGRegister(), PCGAMGSetReuseInterpolation(), PCGAMGASMSetUseAggs(), PCGAMGSetUseParallelCoarseGridSolve(), PCGAMGSetNlevels(), PCGAMGSetThreshold(), PCGAMGGetType(), PCGAMGSetReuseInterpolation()
1275 M*/
1276 
1277 PETSC_EXTERN PetscErrorCode PCCreate_GAMG(PC pc)
1278 {
1279   PetscErrorCode ierr,i;
1280   PC_GAMG        *pc_gamg;
1281   PC_MG          *mg;
1282 
1283   PetscFunctionBegin;
1284    /* register AMG type */
1285   ierr = PCGAMGInitializePackage();CHKERRQ(ierr);
1286 
1287   /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */
1288   ierr = PCSetType(pc, PCMG);CHKERRQ(ierr);
1289   ierr = PetscObjectChangeTypeName((PetscObject)pc, PCGAMG);CHKERRQ(ierr);
1290 
1291   /* create a supporting struct and attach it to pc */
1292   ierr         = PetscNewLog(pc,&pc_gamg);CHKERRQ(ierr);
1293   ierr         = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
1294   mg           = (PC_MG*)pc->data;
1295   mg->innerctx = pc_gamg;
1296 
1297   ierr = PetscNewLog(pc,&pc_gamg->ops);CHKERRQ(ierr);
1298 
1299   pc_gamg->setup_count = 0;
1300   /* these should be in subctx but repartitioning needs simple arrays */
1301   pc_gamg->data_sz = 0;
1302   pc_gamg->data    = 0;
1303 
1304   /* overwrite the pointers of PCMG by the functions of base class PCGAMG */
1305   pc->ops->setfromoptions = PCSetFromOptions_GAMG;
1306   pc->ops->setup          = PCSetUp_GAMG;
1307   pc->ops->reset          = PCReset_GAMG;
1308   pc->ops->destroy        = PCDestroy_GAMG;
1309   mg->view                = PCView_GAMG;
1310 
1311   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetProcEqLim_C",PCGAMGSetProcEqLim_GAMG);CHKERRQ(ierr);
1312   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCoarseEqLim_C",PCGAMGSetCoarseEqLim_GAMG);CHKERRQ(ierr);
1313   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetRepartition_C",PCGAMGSetRepartition_GAMG);CHKERRQ(ierr);
1314   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetReuseInterpolation_C",PCGAMGSetReuseInterpolation_GAMG);CHKERRQ(ierr);
1315   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGASMSetUseAggs_C",PCGAMGASMSetUseAggs_GAMG);CHKERRQ(ierr);
1316   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetUseParallelCoarseGridSolve_C",PCGAMGSetUseParallelCoarseGridSolve_GAMG);CHKERRQ(ierr);
1317   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetThreshold_C",PCGAMGSetThreshold_GAMG);CHKERRQ(ierr);
1318   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetThresholdScale_C",PCGAMGSetThresholdScale_GAMG);CHKERRQ(ierr);
1319   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetType_C",PCGAMGSetType_GAMG);CHKERRQ(ierr);
1320   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGGetType_C",PCGAMGGetType_GAMG);CHKERRQ(ierr);
1321   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetNlevels_C",PCGAMGSetNlevels_GAMG);CHKERRQ(ierr);
1322   pc_gamg->repart           = PETSC_FALSE;
1323   pc_gamg->reuse_prol       = PETSC_FALSE;
1324   pc_gamg->use_aggs_in_asm  = PETSC_FALSE;
1325   pc_gamg->use_parallel_coarse_grid_solver = PETSC_FALSE;
1326   pc_gamg->min_eq_proc      = 50;
1327   pc_gamg->coarse_eq_limit  = 50;
1328   for (i=0;i<PETSC_GAMG_MAXLEVELS;i++) pc_gamg->threshold[i] = 0.;
1329   pc_gamg->threshold_scale = 1.;
1330   pc_gamg->Nlevels          = PETSC_GAMG_MAXLEVELS;
1331   pc_gamg->current_level    = 0; /* don't need to init really */
1332   pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1333 
1334   /* PCSetUp_GAMG assumes that the type has been set, so set it to the default now */
1335   ierr = PCGAMGSetType(pc,PCGAMGAGG);CHKERRQ(ierr);
1336   PetscFunctionReturn(0);
1337 }
1338 
1339 /*@C
1340  PCGAMGInitializePackage - This function initializes everything in the PCGAMG package. It is called
1341     from PetscDLLibraryRegister() when using dynamic libraries, and on the first call to PCCreate_GAMG()
1342     when using static libraries.
1343 
1344  Level: developer
1345 
1346  .keywords: PC, PCGAMG, initialize, package
1347  .seealso: PetscInitialize()
1348 @*/
1349 PetscErrorCode PCGAMGInitializePackage(void)
1350 {
1351   PetscErrorCode ierr;
1352 
1353   PetscFunctionBegin;
1354   if (PCGAMGPackageInitialized) PetscFunctionReturn(0);
1355   PCGAMGPackageInitialized = PETSC_TRUE;
1356   ierr = PetscFunctionListAdd(&GAMGList,PCGAMGGEO,PCCreateGAMG_GEO);CHKERRQ(ierr);
1357   ierr = PetscFunctionListAdd(&GAMGList,PCGAMGAGG,PCCreateGAMG_AGG);CHKERRQ(ierr);
1358   ierr = PetscFunctionListAdd(&GAMGList,PCGAMGCLASSICAL,PCCreateGAMG_Classical);CHKERRQ(ierr);
1359   ierr = PetscRegisterFinalize(PCGAMGFinalizePackage);CHKERRQ(ierr);
1360 
1361   /* general events */
1362   ierr = PetscLogEventRegister("PCGAMGGraph_AGG", 0, &PC_GAMGGraph_AGG);CHKERRQ(ierr);
1363   ierr = PetscLogEventRegister("PCGAMGGraph_GEO", PC_CLASSID, &PC_GAMGGraph_GEO);CHKERRQ(ierr);
1364   ierr = PetscLogEventRegister("PCGAMGCoarse_AGG", PC_CLASSID, &PC_GAMGCoarsen_AGG);CHKERRQ(ierr);
1365   ierr = PetscLogEventRegister("PCGAMGCoarse_GEO", PC_CLASSID, &PC_GAMGCoarsen_GEO);CHKERRQ(ierr);
1366   ierr = PetscLogEventRegister("PCGAMGProl_AGG", PC_CLASSID, &PC_GAMGProlongator_AGG);CHKERRQ(ierr);
1367   ierr = PetscLogEventRegister("PCGAMGProl_GEO", PC_CLASSID, &PC_GAMGProlongator_GEO);CHKERRQ(ierr);
1368   ierr = PetscLogEventRegister("PCGAMGPOpt_AGG", PC_CLASSID, &PC_GAMGOptProlongator_AGG);CHKERRQ(ierr);
1369 
1370 #if defined PETSC_GAMG_USE_LOG
1371   ierr = PetscLogEventRegister("GAMG: createProl", PC_CLASSID, &petsc_gamg_setup_events[SET1]);CHKERRQ(ierr);
1372   ierr = PetscLogEventRegister("  Graph", PC_CLASSID, &petsc_gamg_setup_events[GRAPH]);CHKERRQ(ierr);
1373   /* PetscLogEventRegister("    G.Mat", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_MAT]); */
1374   /* PetscLogEventRegister("    G.Filter", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_FILTER]); */
1375   /* PetscLogEventRegister("    G.Square", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_SQR]); */
1376   ierr = PetscLogEventRegister("  MIS/Agg", PC_CLASSID, &petsc_gamg_setup_events[SET4]);CHKERRQ(ierr);
1377   ierr = PetscLogEventRegister("  geo: growSupp", PC_CLASSID, &petsc_gamg_setup_events[SET5]);CHKERRQ(ierr);
1378   ierr = PetscLogEventRegister("  geo: triangle", PC_CLASSID, &petsc_gamg_setup_events[SET6]);CHKERRQ(ierr);
1379   ierr = PetscLogEventRegister("    search-set", PC_CLASSID, &petsc_gamg_setup_events[FIND_V]);CHKERRQ(ierr);
1380   ierr = PetscLogEventRegister("  SA: col data", PC_CLASSID, &petsc_gamg_setup_events[SET7]);CHKERRQ(ierr);
1381   ierr = PetscLogEventRegister("  SA: frmProl0", PC_CLASSID, &petsc_gamg_setup_events[SET8]);CHKERRQ(ierr);
1382   ierr = PetscLogEventRegister("  SA: smooth", PC_CLASSID, &petsc_gamg_setup_events[SET9]);CHKERRQ(ierr);
1383   ierr = PetscLogEventRegister("GAMG: partLevel", PC_CLASSID, &petsc_gamg_setup_events[SET2]);CHKERRQ(ierr);
1384   ierr = PetscLogEventRegister("  repartition", PC_CLASSID, &petsc_gamg_setup_events[SET12]);CHKERRQ(ierr);
1385   ierr = PetscLogEventRegister("  Invert-Sort", PC_CLASSID, &petsc_gamg_setup_events[SET13]);CHKERRQ(ierr);
1386   ierr = PetscLogEventRegister("  Move A", PC_CLASSID, &petsc_gamg_setup_events[SET14]);CHKERRQ(ierr);
1387   ierr = PetscLogEventRegister("  Move P", PC_CLASSID, &petsc_gamg_setup_events[SET15]);CHKERRQ(ierr);
1388 
1389   /* PetscLogEventRegister(" PL move data", PC_CLASSID, &petsc_gamg_setup_events[SET13]); */
1390   /* PetscLogEventRegister("GAMG: fix", PC_CLASSID, &petsc_gamg_setup_events[SET10]); */
1391   /* PetscLogEventRegister("GAMG: set levels", PC_CLASSID, &petsc_gamg_setup_events[SET11]); */
1392   /* create timer stages */
1393 #if defined GAMG_STAGES
1394   {
1395     char     str[32];
1396     PetscInt lidx;
1397     sprintf(str,"MG Level %d (finest)",0);
1398     ierr = PetscLogStageRegister(str, &gamg_stages[0]);CHKERRQ(ierr);
1399     for (lidx=1; lidx<9; lidx++) {
1400       sprintf(str,"MG Level %d",lidx);
1401       ierr = PetscLogStageRegister(str, &gamg_stages[lidx]);CHKERRQ(ierr);
1402     }
1403   }
1404 #endif
1405 #endif
1406   PetscFunctionReturn(0);
1407 }
1408 
1409 /*@C
1410  PCGAMGFinalizePackage - This function frees everything from the PCGAMG package. It is
1411     called from PetscFinalize() automatically.
1412 
1413  Level: developer
1414 
1415  .keywords: Petsc, destroy, package
1416  .seealso: PetscFinalize()
1417 @*/
1418 PetscErrorCode PCGAMGFinalizePackage(void)
1419 {
1420   PetscErrorCode ierr;
1421 
1422   PetscFunctionBegin;
1423   PCGAMGPackageInitialized = PETSC_FALSE;
1424   ierr = PetscFunctionListDestroy(&GAMGList);CHKERRQ(ierr);
1425   PetscFunctionReturn(0);
1426 }
1427 
1428 /*@C
1429  PCGAMGRegister - Register a PCGAMG implementation.
1430 
1431  Input Parameters:
1432  + type - string that will be used as the name of the GAMG type.
1433  - create - function for creating the gamg context.
1434 
1435   Level: advanced
1436 
1437  .seealso: PCGAMGType, PCGAMG, PCGAMGSetType()
1438 @*/
1439 PetscErrorCode PCGAMGRegister(PCGAMGType type, PetscErrorCode (*create)(PC))
1440 {
1441   PetscErrorCode ierr;
1442 
1443   PetscFunctionBegin;
1444   ierr = PCGAMGInitializePackage();CHKERRQ(ierr);
1445   ierr = PetscFunctionListAdd(&GAMGList,type,create);CHKERRQ(ierr);
1446   PetscFunctionReturn(0);
1447 }
1448 
1449