xref: /libCEED/examples/petsc/multigrid.c (revision 15ce0ef07c20b2c091421828298fd7f9f18cf321)
1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
3 // reserved. See files LICENSE and NOTICE for details.
4 //
5 // This file is part of CEED, a collection of benchmarks, miniapps, software
6 // libraries and APIs for efficient high-order finite element and spectral
7 // element discretizations for exascale applications. For more information and
8 // source code availability see http://github.com/ceed.
9 //
10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
11 // a collaborative effort of two U.S. Department of Energy organizations (Office
12 // of Science and the National Nuclear Security Administration) responsible for
13 // the planning and preparation of a capable exascale ecosystem, including
14 // software, applications, hardware, advanced system engineering and early
15 // testbed platforms, in support of the nation's exascale computing imperative.
16 
17 //                        libCEED + PETSc Example: CEED BPs 3-6 with Multigrid
18 //
19 // This example demonstrates a simple usage of libCEED with PETSc to solve the
20 // CEED BP benchmark problems, see http://ceed.exascaleproject.org/bps.
21 //
22 // The code uses higher level communication protocols in DMPlex.
23 //
24 // Build with:
25 //
26 //     make multigrid [PETSC_DIR=</path/to/petsc>] [CEED_DIR=</path/to/libceed>]
27 //
28 // Sample runs:
29 //
30 //     multigrid -problem bp3
31 //     multigrid -problem bp4 -ceed /cpu/self
32 //     multigrid -problem bp5 -ceed /cpu/occa
33 //     multigrid -problem bp6 -ceed /gpu/cuda
34 //
35 //TESTARGS -ceed {ceed_resource} -test -problem bp3 -degree 3
36 
37 /// @file
38 /// CEED BPs 1-6 multigrid example using PETSc
39 const char help[] = "Solve CEED BPs using p-multigrid with PETSc and DMPlex\n";
40 
41 #define multigrid
42 #include "setup.h"
43 
44 int main(int argc, char **argv) {
45   PetscInt ierr;
46   MPI_Comm comm;
47   char filename[PETSC_MAX_PATH_LEN],
48        ceedresource[PETSC_MAX_PATH_LEN] = "/cpu/self";
49   double my_rt_start, my_rt, rt_min, rt_max;
50   PetscInt degree = 3, qextra, *lsize, *xlsize, *gsize, dim = 3,
51            melem[3] = {3, 3, 3}, ncompu = 1, numlevels = degree, *leveldegrees;
52   PetscScalar *r;
53   PetscBool test_mode, benchmark_mode, read_mesh, write_solution;
54   DM  *dm, dmOrig;
55   SNES snes_dummy;
56   KSP ksp;
57   PC pc;
58   Mat *matO, *matI, *matR, matCoarse;
59   Vec *X, *Xloc, *mult, rhs, rhsloc;
60   UserO *userO;
61   UserIR *userI, *userR;
62   Ceed ceed;
63   CeedData *ceeddata;
64   CeedVector rhsceed, diagceed, target;
65   CeedQFunction qf_error, qf_restrict, qf_prolong;
66   CeedOperator op_error;
67   bpType bpChoice;
68   coarsenType coarsen;
69 
70   ierr = PetscInitialize(&argc, &argv, NULL, help);
71   if (ierr) return ierr;
72   comm = PETSC_COMM_WORLD;
73 
74   // Parse command line options
75   ierr = PetscOptionsBegin(comm, NULL, "CEED BPs in PETSc", NULL); CHKERRQ(ierr);
76   bpChoice = CEED_BP3;
77   ierr = PetscOptionsEnum("-problem",
78                           "CEED benchmark problem to solve", NULL,
79                           bpTypes, (PetscEnum)bpChoice, (PetscEnum *)&bpChoice,
80                           NULL); CHKERRQ(ierr);
81   ncompu = bpOptions[bpChoice].ncompu;
82   test_mode = PETSC_FALSE;
83   ierr = PetscOptionsBool("-test",
84                           "Testing mode (do not print unless error is large)",
85                           NULL, test_mode, &test_mode, NULL); CHKERRQ(ierr);
86   benchmark_mode = PETSC_FALSE;
87   ierr = PetscOptionsBool("-benchmark",
88                           "Benchmarking mode (prints benchmark statistics)",
89                           NULL, benchmark_mode, &benchmark_mode, NULL);
90   CHKERRQ(ierr);
91   write_solution = PETSC_FALSE;
92   ierr = PetscOptionsBool("-write_solution",
93                           "Write solution for visualization",
94                           NULL, write_solution, &write_solution, NULL);
95   CHKERRQ(ierr);
96   degree = test_mode ? 3 : 2;
97   ierr = PetscOptionsInt("-degree", "Polynomial degree of tensor product basis",
98                          NULL, degree, &degree, NULL); CHKERRQ(ierr);
99   if (degree < 1) SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE,
100                              "-degree %D must be at least 1", degree);
101   qextra = bpOptions[bpChoice].qextra;
102   ierr = PetscOptionsInt("-qextra", "Number of extra quadrature points",
103                          NULL, qextra, &qextra, NULL); CHKERRQ(ierr);
104   ierr = PetscOptionsString("-ceed", "CEED resource specifier",
105                             NULL, ceedresource, ceedresource,
106                             sizeof(ceedresource), NULL); CHKERRQ(ierr);
107   coarsen = COARSEN_UNIFORM;
108   ierr = PetscOptionsEnum("-coarsen",
109                           "Coarsening strategy to use", NULL,
110                           coarsenTypes, (PetscEnum)coarsen,
111                           (PetscEnum *)&coarsen, NULL); CHKERRQ(ierr);
112   read_mesh = PETSC_FALSE;
113   ierr = PetscOptionsString("-mesh", "Read mesh from file", NULL,
114                             filename, filename, sizeof(filename), &read_mesh);
115   CHKERRQ(ierr);
116   if (!read_mesh) {
117     PetscInt tmp = dim;
118     ierr = PetscOptionsIntArray("-cells","Number of cells per dimension", NULL,
119                                 melem, &tmp, NULL); CHKERRQ(ierr);
120   }
121   ierr = PetscOptionsEnd(); CHKERRQ(ierr);
122 
123   // Setup DM
124   if (read_mesh) {
125     ierr = DMPlexCreateFromFile(PETSC_COMM_WORLD, filename, PETSC_TRUE, &dmOrig);
126     CHKERRQ(ierr);
127   } else {
128     ierr = DMPlexCreateBoxMesh(PETSC_COMM_WORLD, dim, PETSC_FALSE, melem, NULL,
129                                NULL, NULL, PETSC_TRUE,&dmOrig); CHKERRQ(ierr);
130   }
131 
132   {
133     DM dmDist = NULL;
134     PetscPartitioner part;
135 
136     ierr = DMPlexGetPartitioner(dmOrig, &part); CHKERRQ(ierr);
137     ierr = PetscPartitionerSetFromOptions(part); CHKERRQ(ierr);
138     ierr = DMPlexDistribute(dmOrig, 0, NULL, &dmDist); CHKERRQ(ierr);
139     if (dmDist) {
140       ierr = DMDestroy(&dmOrig); CHKERRQ(ierr);
141       dmOrig = dmDist;
142     }
143   }
144 
145   // Allocate arrays for PETSc objects for each level
146   switch (coarsen) {
147   case COARSEN_UNIFORM:
148     numlevels = degree;
149     break;
150   case COARSEN_LOGARITHMIC:
151     numlevels = ceil(log(degree)/log(2)) + 1;
152     break;
153   }
154   ierr = PetscMalloc1(numlevels, &leveldegrees); CHKERRQ(ierr);
155   switch (coarsen) {
156   case COARSEN_UNIFORM:
157     for (int i=0; i<numlevels; i++) leveldegrees[i] = i + 1;
158     break;
159   case COARSEN_LOGARITHMIC:
160     for (int i=0; i<numlevels-1; i++) leveldegrees[i] = pow(2,i);
161     leveldegrees[numlevels-1] = degree;
162     break;
163   }
164   ierr = PetscMalloc1(numlevels, &dm); CHKERRQ(ierr);
165   ierr = PetscMalloc1(numlevels, &X); CHKERRQ(ierr);
166   ierr = PetscMalloc1(numlevels, &Xloc); CHKERRQ(ierr);
167   ierr = PetscMalloc1(numlevels, &mult); CHKERRQ(ierr);
168   ierr = PetscMalloc1(numlevels, &userO); CHKERRQ(ierr);
169   ierr = PetscMalloc1(numlevels, &userI); CHKERRQ(ierr);
170   ierr = PetscMalloc1(numlevels, &userR); CHKERRQ(ierr);
171   ierr = PetscMalloc1(numlevels, &matO); CHKERRQ(ierr);
172   ierr = PetscMalloc1(numlevels, &matI); CHKERRQ(ierr);
173   ierr = PetscMalloc1(numlevels, &matR); CHKERRQ(ierr);
174   ierr = PetscMalloc1(numlevels, &lsize); CHKERRQ(ierr);
175   ierr = PetscMalloc1(numlevels, &xlsize); CHKERRQ(ierr);
176   ierr = PetscMalloc1(numlevels, &gsize); CHKERRQ(ierr);
177 
178   // Setup DM and Operator Mat Shells for each level
179   for (CeedInt i=0; i<numlevels; i++) {
180     // Create DM
181     ierr = DMClone(dmOrig, &dm[i]); CHKERRQ(ierr);
182     ierr = SetupDMByDegree(dm[i], leveldegrees[i], ncompu, bpChoice);
183     CHKERRQ(ierr);
184 
185     // Create vectors
186     ierr = DMCreateGlobalVector(dm[i], &X[i]); CHKERRQ(ierr);
187     ierr = VecGetLocalSize(X[i], &lsize[i]); CHKERRQ(ierr);
188     ierr = VecGetSize(X[i], &gsize[i]); CHKERRQ(ierr);
189     ierr = DMCreateLocalVector(dm[i], &Xloc[i]); CHKERRQ(ierr);
190     ierr = VecGetSize(Xloc[i], &xlsize[i]); CHKERRQ(ierr);
191 
192     // Operator
193     ierr = PetscMalloc1(1, &userO[i]); CHKERRQ(ierr);
194     ierr = MatCreateShell(comm, lsize[i], lsize[i], gsize[i], gsize[i],
195                           userO[i], &matO[i]); CHKERRQ(ierr);
196     ierr = MatShellSetOperation(matO[i], MATOP_MULT,
197                                 (void(*)(void))MatMult_Ceed);
198     ierr = MatShellSetOperation(matO[i], MATOP_GET_DIAGONAL,
199                                 (void(*)(void))MatGetDiag);
200     CHKERRQ(ierr);
201 
202     // Level transfers
203     if (i > 0) {
204       // Interp
205       ierr = PetscMalloc1(1, &userI[i]); CHKERRQ(ierr);
206       ierr = MatCreateShell(comm, lsize[i], lsize[i-1], gsize[i], gsize[i-1],
207                             userI[i], &matI[i]); CHKERRQ(ierr);
208       ierr = MatShellSetOperation(matI[i], MATOP_MULT,
209                                   (void(*)(void))MatMult_Interp);
210       CHKERRQ(ierr);
211 
212       // Restrict
213       ierr = PetscMalloc1(1, &userR[i]); CHKERRQ(ierr);
214       ierr = MatCreateShell(comm, lsize[i-1], lsize[i], gsize[i-1], gsize[i],
215                             userR[i], &matR[i]); CHKERRQ(ierr);
216       ierr = MatShellSetOperation(matR[i], MATOP_MULT,
217                                   (void(*)(void))MatMult_Restrict);
218       CHKERRQ(ierr);
219     }
220   }
221   ierr = VecDuplicate(X[numlevels-1], &rhs); CHKERRQ(ierr);
222 
223   // Set up libCEED
224   CeedInit(ceedresource, &ceed);
225 
226   // Print global grid information
227   if (!test_mode) {
228     PetscInt P = degree + 1, Q = P + qextra;
229     const char *usedresource;
230     CeedGetResource(ceed, &usedresource);
231     ierr = PetscPrintf(comm,
232                        "\n-- CEED Benchmark Problem %d -- libCEED + PETSc + PCMG --\n"
233                        "  libCEED:\n"
234                        "    libCEED Backend                    : %s\n"
235                        "  Mesh:\n"
236                        "    Number of 1D Basis Nodes (p)       : %d\n"
237                        "    Number of 1D Quadrature Points (q) : %d\n"
238                        "    Global Nodes                       : %D\n"
239                        "    Owned Nodes                        : %D\n"
240                        "    DoF per node                       : %D\n"
241                        "  Multigrid:\n"
242                        "    Number of Levels                   : %d\n",
243                        bpChoice+1, usedresource, P, Q,
244                        gsize[numlevels-1]/ncompu, lsize[numlevels-1]/ncompu,
245                        ncompu, numlevels); CHKERRQ(ierr);
246   }
247 
248   // Create RHS vector
249   ierr = VecDuplicate(Xloc[numlevels-1], &rhsloc); CHKERRQ(ierr);
250   ierr = VecZeroEntries(rhsloc); CHKERRQ(ierr);
251   ierr = VecGetArray(rhsloc, &r); CHKERRQ(ierr);
252   CeedVectorCreate(ceed, xlsize[numlevels-1], &rhsceed);
253   CeedVectorSetArray(rhsceed, CEED_MEM_HOST, CEED_USE_POINTER, r);
254 
255   // Set up libCEED operators on each level
256   ierr = PetscMalloc1(numlevels, &ceeddata); CHKERRQ(ierr);
257   for (int i=0; i<numlevels; i++) {
258     // Print level information
259     if (!test_mode && (i == 0 || i == numlevels-1)) {
260       ierr = PetscPrintf(comm,"    Level %D (%s):\n"
261                          "      Number of 1D Basis Nodes (p)     : %d\n"
262                          "      Global Nodes                     : %D\n"
263                          "      Owned Nodes                      : %D\n",
264                          i, (i? "fine" : "coarse"), leveldegrees[i] + 1,
265                          gsize[i]/ncompu, lsize[i]/ncompu); CHKERRQ(ierr);
266     }
267     ierr = PetscMalloc1(1, &ceeddata[i]); CHKERRQ(ierr);
268     ierr = SetupLibceedByDegree(dm[i], ceed, leveldegrees[i], dim, qextra,
269                                 ncompu, gsize[i], xlsize[i], bpChoice,
270                                 ceeddata[i], i==(numlevels-1), rhsceed,
271                                 &target); CHKERRQ(ierr);
272   }
273 
274   // Gather RHS
275   ierr = VecRestoreArray(rhsloc, &r); CHKERRQ(ierr);
276   ierr = VecZeroEntries(rhs); CHKERRQ(ierr);
277   ierr = DMLocalToGlobalBegin(dm[numlevels-1], rhsloc, ADD_VALUES, rhs);
278   CHKERRQ(ierr);
279   ierr = DMLocalToGlobalEnd(dm[numlevels-1], rhsloc, ADD_VALUES, rhs);
280   CHKERRQ(ierr);
281   CeedVectorDestroy(&rhsceed);
282 
283   // Create the restriction/interpolation Q-function
284   CeedQFunctionCreateIdentity(ceed, ncompu, CEED_EVAL_NONE, CEED_EVAL_INTERP,
285                               &qf_restrict);
286   CeedQFunctionCreateIdentity(ceed, ncompu, CEED_EVAL_INTERP, CEED_EVAL_NONE,
287                               &qf_prolong);
288 
289   // Set up libCEED level transfer operators
290   ierr = CeedLevelTransferSetup(ceed, numlevels, ncompu, bpChoice, ceeddata,
291                                 leveldegrees, qf_restrict, qf_prolong);
292   CHKERRQ(ierr);
293 
294   // Create the error Q-function
295   CeedQFunctionCreateInterior(ceed, 1, bpOptions[bpChoice].error,
296                               bpOptions[bpChoice].errorfname, &qf_error);
297   CeedQFunctionAddInput(qf_error, "u", ncompu, CEED_EVAL_INTERP);
298   CeedQFunctionAddInput(qf_error, "true_soln", ncompu, CEED_EVAL_NONE);
299   CeedQFunctionAddOutput(qf_error, "error", ncompu, CEED_EVAL_NONE);
300 
301   // Create the error operator
302   CeedOperatorCreate(ceed, qf_error, CEED_QFUNCTION_NONE, CEED_QFUNCTION_NONE,
303                      &op_error);
304   CeedOperatorSetField(op_error, "u", ceeddata[numlevels-1]->Erestrictu,
305                        ceeddata[numlevels-1]->basisu, CEED_VECTOR_ACTIVE);
306   CeedOperatorSetField(op_error, "true_soln", ceeddata[numlevels-1]->Erestrictui,
307                        CEED_BASIS_COLLOCATED, target);
308   CeedOperatorSetField(op_error, "error", ceeddata[numlevels-1]->Erestrictui,
309                        CEED_BASIS_COLLOCATED, CEED_VECTOR_ACTIVE);
310 
311   // Calculate multiplicity
312   for (int i=0; i<numlevels; i++) {
313     PetscScalar *x;
314 
315     // CEED vector
316     ierr = VecGetArray(Xloc[i], &x); CHKERRQ(ierr);
317     CeedVectorSetArray(ceeddata[i]->xceed, CEED_MEM_HOST, CEED_USE_POINTER, x);
318 
319     // Multiplicity
320     CeedElemRestrictionGetMultiplicity(ceeddata[i]->Erestrictu,
321                                        ceeddata[i]->xceed);
322 
323     // Restore vector
324     ierr = VecRestoreArray(Xloc[i], &x); CHKERRQ(ierr);
325 
326     // Creat mult vector
327     ierr = VecDuplicate(Xloc[i], &mult[i]); CHKERRQ(ierr);
328 
329     // Local-to-global
330     ierr = VecZeroEntries(X[i]); CHKERRQ(ierr);
331     ierr = DMLocalToGlobalBegin(dm[i], Xloc[i], ADD_VALUES, X[i]);
332     CHKERRQ(ierr);
333     ierr = DMLocalToGlobalEnd(dm[i], Xloc[i], ADD_VALUES, X[i]);
334     CHKERRQ(ierr);
335     ierr = VecZeroEntries(Xloc[i]); CHKERRQ(ierr);
336 
337     // Global-to-local
338     ierr = DMGlobalToLocalBegin(dm[i], X[i], INSERT_VALUES, mult[i]);
339     CHKERRQ(ierr);
340     ierr = DMGlobalToLocalEnd(dm[i], X[i], INSERT_VALUES, mult[i]);
341     CHKERRQ(ierr);
342     ierr = VecZeroEntries(X[i]); CHKERRQ(ierr);
343 
344     // Multiplicity scaling
345     ierr = VecReciprocal(mult[i]);
346   }
347 
348   // Set up Mat
349   for (int i=0; i<numlevels; i++) {
350     // User Operator
351     userO[i]->comm = comm;
352     userO[i]->dm = dm[i];
353     userO[i]->Xloc = Xloc[i];
354     ierr = VecDuplicate(Xloc[i], &userO[i]->Yloc); CHKERRQ(ierr);
355     userO[i]->xceed = ceeddata[i]->xceed;
356     userO[i]->yceed = ceeddata[i]->yceed;
357     userO[i]->op = ceeddata[i]->op_apply;
358     userO[i]->ceed = ceed;
359 
360     // Set up diagonal
361     const CeedScalar *ceedarray;
362     ierr = VecDuplicate(X[i], &userO[i]->diag); CHKERRQ(ierr);
363 
364     // -- Local diagonal
365     CeedOperatorAssembleLinearDiagonal(userO[i]->op, &diagceed,
366                                        CEED_REQUEST_IMMEDIATE);
367 
368     // -- Set PETSc array
369     CeedVectorGetArrayRead(diagceed, CEED_MEM_HOST, &ceedarray);
370     ierr = VecPlaceArray(Xloc[i], ceedarray); CHKERRQ(ierr);
371     CeedVectorRestoreArrayRead(diagceed, &ceedarray);
372 
373     // -- Global diagonal
374     ierr = VecZeroEntries(userO[i]->diag); CHKERRQ(ierr);
375     ierr = DMLocalToGlobalBegin(userO[i]->dm, Xloc[i], ADD_VALUES,
376                                 userO[i]->diag); CHKERRQ(ierr);
377     ierr = DMLocalToGlobalEnd(userO[i]->dm, Xloc[i], ADD_VALUES,
378                               userO[i]->diag); CHKERRQ(ierr);
379 
380     // -- Cleanup
381     ierr = VecResetArray(Xloc[i]); CHKERRQ(ierr);
382     CeedVectorDestroy(&diagceed);
383 
384     if (i > 0) {
385       // Interp Operator
386       userI[i]->comm = comm;
387       userI[i]->dmc = dm[i-1];
388       userI[i]->dmf = dm[i];
389       userI[i]->Xloc = Xloc[i-1];
390       userI[i]->Yloc = userO[i]->Yloc;
391       userI[i]->mult = mult[i];
392       userI[i]->ceedvecc = userO[i-1]->xceed;
393       userI[i]->ceedvecf = userO[i]->yceed;
394       userI[i]->op = ceeddata[i]->op_interp;
395       userI[i]->ceed = ceed;
396 
397       // Restrict Operator
398       userR[i]->comm = comm;
399       userR[i]->dmc = dm[i-1];
400       userR[i]->dmf = dm[i];
401       userR[i]->Xloc = Xloc[i];
402       userR[i]->Yloc = userO[i-1]->Yloc;
403       userR[i]->mult = mult[i];
404       userR[i]->ceedvecf = userO[i]->xceed;
405       userR[i]->ceedvecc = userO[i-1]->yceed;
406       userR[i]->op = ceeddata[i]->op_restrict;
407       userR[i]->ceed = ceed;
408     }
409   }
410 
411   // Setup dummy SNES for AMG coarse solve
412   ierr = SNESCreate(comm, &snes_dummy); CHKERRQ(ierr);
413   ierr = SNESSetDM(snes_dummy, dm[0]); CHKERRQ(ierr);
414   ierr = SNESSetSolution(snes_dummy, X[0]); CHKERRQ(ierr);
415 
416   // -- Jacobian matrix
417   ierr = DMSetMatType(dm[0], MATAIJ); CHKERRQ(ierr);
418   ierr = DMCreateMatrix(dm[0], &matCoarse); CHKERRQ(ierr);
419   ierr = SNESSetJacobian(snes_dummy, matCoarse, matCoarse, NULL,
420                          NULL); CHKERRQ(ierr);
421 
422   // -- Residual evaluation function
423   ierr = SNESSetFunction(snes_dummy, X[0], FormResidual_Ceed,
424                          userO[0]); CHKERRQ(ierr);
425 
426   // -- Form Jacobian
427   ierr = SNESComputeJacobianDefaultColor(snes_dummy, X[0], matO[0],
428                                          matCoarse, NULL); CHKERRQ(ierr);
429 
430   // Set up KSP
431   ierr = KSPCreate(comm, &ksp); CHKERRQ(ierr);
432   {
433     ierr = KSPSetType(ksp, KSPCG); CHKERRQ(ierr);
434     ierr = KSPSetNormType(ksp, KSP_NORM_NATURAL); CHKERRQ(ierr);
435     ierr = KSPSetTolerances(ksp, 1e-10, PETSC_DEFAULT, PETSC_DEFAULT,
436                             PETSC_DEFAULT); CHKERRQ(ierr);
437   }
438   ierr = KSPSetFromOptions(ksp); CHKERRQ(ierr);
439   ierr = KSPSetOperators(ksp, matO[numlevels-1], matO[numlevels-1]);
440   CHKERRQ(ierr);
441 
442   // Set up PCMG
443   ierr = KSPGetPC(ksp, &pc); CHKERRQ(ierr);
444   PCMGCycleType pcgmcycletype = PC_MG_CYCLE_V;
445   {
446     ierr = PCSetType(pc, PCMG); CHKERRQ(ierr);
447 
448     // PCMG levels
449     ierr = PCMGSetLevels(pc, numlevels, NULL); CHKERRQ(ierr);
450     for (int i=0; i<numlevels; i++) {
451       // Smoother
452       KSP smoother;
453       PC smoother_pc;
454       ierr = PCMGGetSmoother(pc, i, &smoother); CHKERRQ(ierr);
455       ierr = KSPSetType(smoother, KSPCHEBYSHEV); CHKERRQ(ierr);
456       ierr = KSPChebyshevEstEigSet(smoother, 0, 0.1, 0, 1.1); CHKERRQ(ierr);
457       ierr = KSPChebyshevEstEigSetUseNoisy(smoother, PETSC_TRUE); CHKERRQ(ierr);
458       ierr = KSPSetOperators(smoother, matO[i], matO[i]); CHKERRQ(ierr);
459       ierr = KSPGetPC(smoother, &smoother_pc); CHKERRQ(ierr);
460       ierr = PCSetType(smoother_pc, PCJACOBI); CHKERRQ(ierr);
461       ierr = PCJacobiSetType(smoother_pc, PC_JACOBI_DIAGONAL); CHKERRQ(ierr);
462 
463       // Work vector
464       if (i < numlevels-1) {
465         ierr = PCMGSetX(pc, i, X[i]); CHKERRQ(ierr);
466       }
467 
468       // Level transfers
469       if (i > 0) {
470         // Interpolation
471         ierr = PCMGSetInterpolation(pc, i, matI[i]); CHKERRQ(ierr);
472 
473         // Restriction
474         ierr = PCMGSetRestriction(pc, i, matR[i]); CHKERRQ(ierr);
475       }
476 
477       // Coarse solve
478       KSP coarse;
479       PC coarse_pc;
480       ierr = PCMGGetCoarseSolve(pc, &coarse); CHKERRQ(ierr);
481       ierr = KSPSetType(coarse, KSPPREONLY); CHKERRQ(ierr);
482       ierr = KSPSetOperators(coarse, matCoarse, matCoarse); CHKERRQ(ierr);
483 
484       ierr = KSPGetPC(coarse, &coarse_pc); CHKERRQ(ierr);
485       ierr = PCSetType(coarse_pc, PCGAMG); CHKERRQ(ierr);
486 
487       ierr = KSPSetOptionsPrefix(coarse, "coarse_"); CHKERRQ(ierr);
488       ierr = PCSetOptionsPrefix(coarse_pc, "coarse_"); CHKERRQ(ierr);
489       ierr = KSPSetFromOptions(coarse); CHKERRQ(ierr);
490       ierr = PCSetFromOptions(coarse_pc); CHKERRQ(ierr);
491     }
492 
493     // PCMG options
494     ierr = PCMGSetType(pc, PC_MG_MULTIPLICATIVE); CHKERRQ(ierr);
495     ierr = PCMGSetNumberSmooth(pc, 3); CHKERRQ(ierr);
496     ierr = PCMGSetCycleType(pc, pcgmcycletype); CHKERRQ(ierr);
497   }
498 
499   // First run, if benchmarking
500   if (benchmark_mode) {
501     ierr = KSPSetTolerances(ksp, 1e-10, PETSC_DEFAULT, PETSC_DEFAULT, 1);
502     CHKERRQ(ierr);
503     ierr = VecZeroEntries(X[numlevels-1]); CHKERRQ(ierr);
504     my_rt_start = MPI_Wtime();
505     ierr = KSPSolve(ksp, rhs, X[numlevels-1]); CHKERRQ(ierr);
506     my_rt = MPI_Wtime() - my_rt_start;
507     ierr = MPI_Allreduce(MPI_IN_PLACE, &my_rt, 1, MPI_DOUBLE, MPI_MIN, comm);
508     CHKERRQ(ierr);
509     // Set maxits based on first iteration timing
510     if (my_rt > 0.02) {
511       ierr = KSPSetTolerances(ksp, 1e-10, PETSC_DEFAULT, PETSC_DEFAULT, 5);
512       CHKERRQ(ierr);
513     } else {
514       ierr = KSPSetTolerances(ksp, 1e-10, PETSC_DEFAULT, PETSC_DEFAULT, 20);
515       CHKERRQ(ierr);
516     }
517   }
518 
519   // Timed solve
520   ierr = VecZeroEntries(X[numlevels-1]); CHKERRQ(ierr);
521   ierr = PetscBarrier((PetscObject)ksp); CHKERRQ(ierr);
522   my_rt_start = MPI_Wtime();
523   ierr = KSPSolve(ksp, rhs, X[numlevels-1]); CHKERRQ(ierr);
524   my_rt = MPI_Wtime() - my_rt_start;
525 
526   // Output results
527   {
528     KSPType ksptype;
529     PCMGType pcmgtype;
530     KSPConvergedReason reason;
531     PetscReal rnorm;
532     PetscInt its;
533     ierr = KSPGetType(ksp, &ksptype); CHKERRQ(ierr);
534     ierr = KSPGetConvergedReason(ksp, &reason); CHKERRQ(ierr);
535     ierr = KSPGetIterationNumber(ksp, &its); CHKERRQ(ierr);
536     ierr = KSPGetResidualNorm(ksp, &rnorm); CHKERRQ(ierr);
537     ierr = PCMGGetType(pc, &pcmgtype); CHKERRQ(ierr);
538     if (!test_mode || reason < 0 || rnorm > 1e-8) {
539       ierr = PetscPrintf(comm,
540                          "  KSP:\n"
541                          "    KSP Type                           : %s\n"
542                          "    KSP Convergence                    : %s\n"
543                          "    Total KSP Iterations               : %D\n"
544                          "    Final rnorm                        : %e\n",
545                          ksptype, KSPConvergedReasons[reason], its,
546                          (double)rnorm); CHKERRQ(ierr);
547       ierr = PetscPrintf(comm,
548                          "  PCMG:\n"
549                          "    PCMG Type                          : %s\n"
550                          "    PCMG Cycle Type                    : %s\n",
551                          PCMGTypes[pcmgtype],
552                          PCMGCycleTypes[pcgmcycletype]); CHKERRQ(ierr);
553     }
554     if (!test_mode) {
555       ierr = PetscPrintf(comm,"  Performance:\n"); CHKERRQ(ierr);
556     }
557     {
558       PetscReal maxerror;
559       ierr = ComputeErrorMax(userO[numlevels-1], op_error, X[numlevels-1], target,
560                              &maxerror); CHKERRQ(ierr);
561       PetscReal tol = 5e-2;
562       if (!test_mode || maxerror > tol) {
563         ierr = MPI_Allreduce(&my_rt, &rt_min, 1, MPI_DOUBLE, MPI_MIN, comm);
564         CHKERRQ(ierr);
565         ierr = MPI_Allreduce(&my_rt, &rt_max, 1, MPI_DOUBLE, MPI_MAX, comm);
566         CHKERRQ(ierr);
567         ierr = PetscPrintf(comm,
568                            "    Pointwise Error (max)              : %e\n"
569                            "    CG Solve Time                      : %g (%g) sec\n",
570                            (double)maxerror, rt_max, rt_min); CHKERRQ(ierr);
571       }
572     }
573     if (benchmark_mode && (!test_mode)) {
574       ierr = PetscPrintf(comm,
575                          "    DoFs/Sec in CG                     : %g (%g) million\n",
576                          1e-6*gsize[numlevels-1]*its/rt_max,
577                          1e-6*gsize[numlevels-1]*its/rt_min);
578       CHKERRQ(ierr);
579     }
580   }
581 
582   if (write_solution) {
583     PetscViewer vtkviewersoln;
584 
585     ierr = PetscViewerCreate(comm, &vtkviewersoln); CHKERRQ(ierr);
586     ierr = PetscViewerSetType(vtkviewersoln, PETSCVIEWERVTK); CHKERRQ(ierr);
587     ierr = PetscViewerFileSetName(vtkviewersoln, "solution.vtk"); CHKERRQ(ierr);
588     ierr = VecView(X[numlevels-1], vtkviewersoln); CHKERRQ(ierr);
589     ierr = PetscViewerDestroy(&vtkviewersoln); CHKERRQ(ierr);
590   }
591 
592   // Cleanup
593   for (int i=0; i<numlevels; i++) {
594     ierr = VecDestroy(&X[i]); CHKERRQ(ierr);
595     ierr = VecDestroy(&Xloc[i]); CHKERRQ(ierr);
596     ierr = VecDestroy(&mult[i]); CHKERRQ(ierr);
597     ierr = VecDestroy(&userO[i]->Yloc); CHKERRQ(ierr);
598     ierr = VecDestroy(&userO[i]->diag); CHKERRQ(ierr);
599     ierr = MatDestroy(&matO[i]); CHKERRQ(ierr);
600     ierr = PetscFree(userO[i]); CHKERRQ(ierr);
601     if (i > 0) {
602       ierr = MatDestroy(&matI[i]); CHKERRQ(ierr);
603       ierr = PetscFree(userI[i]); CHKERRQ(ierr);
604       ierr = MatDestroy(&matR[i]); CHKERRQ(ierr);
605       ierr = PetscFree(userR[i]); CHKERRQ(ierr);
606     }
607     ierr = CeedDataDestroy(i, ceeddata[i]); CHKERRQ(ierr);
608     ierr = DMDestroy(&dm[i]); CHKERRQ(ierr);
609   }
610   ierr = PetscFree(leveldegrees); CHKERRQ(ierr);
611   ierr = PetscFree(dm); CHKERRQ(ierr);
612   ierr = PetscFree(X); CHKERRQ(ierr);
613   ierr = PetscFree(Xloc); CHKERRQ(ierr);
614   ierr = PetscFree(mult); CHKERRQ(ierr);
615   ierr = PetscFree(matO); CHKERRQ(ierr);
616   ierr = PetscFree(matI); CHKERRQ(ierr);
617   ierr = PetscFree(matR); CHKERRQ(ierr);
618   ierr = PetscFree(ceeddata); CHKERRQ(ierr);
619   ierr = PetscFree(userO); CHKERRQ(ierr);
620   ierr = PetscFree(userI); CHKERRQ(ierr);
621   ierr = PetscFree(userR); CHKERRQ(ierr);
622   ierr = PetscFree(lsize); CHKERRQ(ierr);
623   ierr = PetscFree(xlsize); CHKERRQ(ierr);
624   ierr = PetscFree(gsize); CHKERRQ(ierr);
625   ierr = VecDestroy(&rhs); CHKERRQ(ierr);
626   ierr = VecDestroy(&rhsloc); CHKERRQ(ierr);
627   ierr = MatDestroy(&matCoarse); CHKERRQ(ierr);
628   ierr = KSPDestroy(&ksp); CHKERRQ(ierr);
629   ierr = SNESDestroy(&snes_dummy); CHKERRQ(ierr);
630   ierr = DMDestroy(&dmOrig); CHKERRQ(ierr);
631   CeedVectorDestroy(&target);
632   CeedQFunctionDestroy(&qf_error);
633   CeedQFunctionDestroy(&qf_restrict);
634   CeedQFunctionDestroy(&qf_prolong);
635   CeedOperatorDestroy(&op_error);
636   CeedDestroy(&ceed);
637   return PetscFinalize();
638 }
639