xref: /petsc/src/snes/tutorials/ex62.c (revision 6a5217c03994f2d95bb2e6dbd8bed42381aeb015)
1 static char help[] = "Stokes Problem discretized with finite elements,\n\
2 using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n";
3 
4 /*
5 For the isoviscous Stokes problem, which we discretize using the finite
6 element method on an unstructured mesh, the weak form equations are
7 
8   < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0
9   < q, -\nabla\cdot u >                                                   = 0
10 
11 Viewing:
12 
13 To produce nice output, use
14 
15   -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append
16 
17 You can get a LaTeX view of the mesh, with point numbering using
18 
19   -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0
20 
21 The data layout can be viewed using
22 
23   -dm_petscsection_view
24 
25 Lots of information about the FEM assembly can be printed using
26 
27   -dm_plex_print_fem 3
28 */
29 
30 #include <petscdmplex.h>
31 #include <petscsnes.h>
32 #include <petscds.h>
33 #include <petscbag.h>
34 
35 // TODO: Plot residual by fields after each smoother iterate
36 
37 typedef enum {SOL_QUADRATIC, SOL_TRIG, SOL_UNKNOWN} SolType;
38 const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0};
39 
40 typedef struct {
41   PetscScalar mu; /* dynamic shear viscosity */
42 } Parameter;
43 
44 typedef struct {
45   PetscBag bag; /* Problem parameters */
46   SolType  sol; /* MMS solution */
47 } AppCtx;
48 
49 static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
50                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
51                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
52                  PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
53 {
54   const PetscReal mu = PetscRealPart(constants[0]);
55   const PetscInt  Nc = uOff[1]-uOff[0];
56   PetscInt        c, d;
57 
58   for (c = 0; c < Nc; ++c) {
59     for (d = 0; d < dim; ++d) {
60       f1[c*dim+d] = mu * (u_x[c*dim+d] + u_x[d*dim+c]);
61     }
62     f1[c*dim+c] -= u[uOff[1]];
63   }
64 }
65 
66 static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux,
67                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
68                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
69                  PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
70 {
71   PetscInt d;
72   for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d*dim+d];
73 }
74 
75 static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
76                   const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
77                   const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
78                   PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
79 {
80   PetscInt d;
81   for (d = 0; d < dim; ++d) g1[d*dim+d] = -1.0; /* < q, -\nabla\cdot u > */
82 }
83 
84 static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux,
85                   const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
86                   const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
87                   PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
88 {
89   PetscInt d;
90   for (d = 0; d < dim; ++d) g2[d*dim+d] = -1.0; /* -< \nabla\cdot v, p > */
91 }
92 
93 static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux,
94                   const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
95                   const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
96                   PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
97 {
98   const PetscReal mu = PetscRealPart(constants[0]);
99   const PetscInt  Nc = uOff[1]-uOff[0];
100   PetscInt        c, d;
101 
102   for (c = 0; c < Nc; ++c) {
103     for (d = 0; d < dim; ++d) {
104       g3[((c*Nc+c)*dim+d)*dim+d] += mu; /* < \nabla v, \nabla u > */
105       g3[((c*Nc+d)*dim+d)*dim+c] += mu; /* < \nabla v, {\nabla u}^T > */
106     }
107   }
108 }
109 
110 static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux,
111                   const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
112                   const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
113                   PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
114 {
115   const PetscReal mu = PetscRealPart(constants[0]);
116 
117   g0[0] = 1.0/mu;
118 }
119 
120 /* Quadratic MMS Solution
121    2D:
122 
123      u = x^2 + y^2
124      v = 2 x^2 - 2xy
125      p = x + y - 1
126      f = <1 - 4 mu, 1 - 4 mu>
127 
128    so that
129 
130      e(u) = (grad u + grad u^T) = / 4x  4x \
131                                   \ 4x -4x /
132      div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0
133      \nabla \cdot u             = 2x - 2x = 0
134 
135    3D:
136 
137      u = 2 x^2 + y^2 + z^2
138      v = 2 x^2 - 2xy
139      w = 2 x^2 - 2xz
140      p = x + y + z - 3/2
141      f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu>
142 
143    so that
144 
145      e(u) = (grad u + grad u^T) = / 8x  4x  4x \
146                                   | 4x -4x  0  |
147                                   \ 4x  0  -4x /
148      div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0
149      \nabla \cdot u             = 4x - 2x - 2x = 0
150 */
151 static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
152 {
153   PetscInt c;
154 
155   u[0] = (dim-1)*PetscSqr(x[0]);
156   for (c = 1; c < Nc; ++c) {
157     u[0] += PetscSqr(x[c]);
158     u[c]  = 2.0*PetscSqr(x[0]) - 2.0*x[0]*x[c];
159   }
160   return 0;
161 }
162 
163 static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
164 {
165   PetscInt d;
166 
167   u[0] = -0.5*dim;
168   for (d = 0; d < dim; ++d) u[0] += x[d];
169   return 0;
170 }
171 
172 static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
173                            const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
174                            const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
175                            PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
176 {
177   const PetscReal mu = PetscRealPart(constants[0]);
178   PetscInt        d;
179 
180   f0[0] = (dim-1)*4.0*mu - 1.0;
181   for (d = 1; d < dim; ++d) f0[d] = 4.0*mu - 1.0;
182 }
183 
184 /* Trigonometric MMS Solution
185    2D:
186 
187      u = sin(pi x) + sin(pi y)
188      v = -pi cos(pi x) y
189      p = sin(2 pi x) + sin(2 pi y)
190      f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y>
191 
192    so that
193 
194      e(u) = (grad u + grad u^T) = /        2pi cos(pi x)             pi cos(pi y) + pi^2 sin(pi x) y \
195                                   \ pi cos(pi y) + pi^2 sin(pi x) y          -2pi cos(pi x)          /
196      div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0
197      \nabla \cdot u             = pi cos(pi x) - pi cos(pi x) = 0
198 
199    3D:
200 
201      u = 2 sin(pi x) + sin(pi y) + sin(pi z)
202      v = -pi cos(pi x) y
203      w = -pi cos(pi x) z
204      p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z)
205      f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z>
206 
207    so that
208 
209      e(u) = (grad u + grad u^T) = /        4pi cos(pi x)             pi cos(pi y) + pi^2 sin(pi x) y  pi cos(pi z) + pi^2 sin(pi x) z \
210                                   | pi cos(pi y) + pi^2 sin(pi x) y          -2pi cos(pi x)                        0                  |
211                                   \ pi cos(pi z) + pi^2 sin(pi x) z               0                         -2pi cos(pi x)            /
212      div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0
213      \nabla \cdot u             = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0
214 */
215 static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
216 {
217   PetscInt c;
218 
219   u[0] = (dim-1)*PetscSinReal(PETSC_PI*x[0]);
220   for (c = 1; c < Nc; ++c) {
221     u[0] += PetscSinReal(PETSC_PI*x[c]);
222     u[c]  = -PETSC_PI*PetscCosReal(PETSC_PI*x[0]) * x[c];
223   }
224   return 0;
225 }
226 
227 static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
228 {
229   PetscInt d;
230 
231   for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0*PETSC_PI*x[d]);
232   return 0;
233 }
234 
235 static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux,
236                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
237                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
238                       PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
239 {
240   const PetscReal mu = PetscRealPart(constants[0]);
241   PetscInt        d;
242 
243   f0[0] = -2.0*PETSC_PI*PetscCosReal(2.0*PETSC_PI*x[0]) - (dim-1)*mu*PetscSqr(PETSC_PI)*PetscSinReal(PETSC_PI*x[0]);
244   for (d = 1; d < dim; ++d) {
245     f0[0] -= mu*PetscSqr(PETSC_PI)*PetscSinReal(PETSC_PI*x[d]);
246     f0[d]  = -2.0*PETSC_PI*PetscCosReal(2.0*PETSC_PI*x[d]) + mu*PetscPowRealInt(PETSC_PI, 3)*PetscCosReal(PETSC_PI*x[0])*x[d];
247   }
248 }
249 
250 static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
251 {
252   PetscInt       sol;
253   PetscErrorCode ierr;
254 
255   PetscFunctionBeginUser;
256   options->sol = SOL_QUADRATIC;
257 
258   ierr = PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");PetscCall(ierr);
259   sol  = options->sol;
260   PetscCall(PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, PETSC_STATIC_ARRAY_LENGTH(SolTypes)-3, SolTypes[options->sol], &sol, NULL));
261   options->sol = (SolType) sol;
262   ierr = PetscOptionsEnd();PetscCall(ierr);
263   PetscFunctionReturn(0);
264 }
265 
266 static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
267 {
268   PetscFunctionBeginUser;
269   PetscCall(DMCreate(comm, dm));
270   PetscCall(DMSetType(*dm, DMPLEX));
271   PetscCall(DMSetFromOptions(*dm));
272   PetscCall(DMViewFromOptions(*dm, NULL, "-dm_view"));
273   PetscFunctionReturn(0);
274 }
275 
276 static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx)
277 {
278   Parameter     *p;
279 
280   PetscFunctionBeginUser;
281   /* setup PETSc parameter bag */
282   PetscCall(PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag));
283   PetscCall(PetscBagGetData(ctx->bag, (void **) &p));
284   PetscCall(PetscBagSetName(ctx->bag, "par", "Stokes Parameters"));
285   PetscCall(PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s"));
286   PetscCall(PetscBagSetFromOptions(ctx->bag));
287   {
288     PetscViewer       viewer;
289     PetscViewerFormat format;
290     PetscBool         flg;
291 
292     PetscCall(PetscOptionsGetViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg));
293     if (flg) {
294       PetscCall(PetscViewerPushFormat(viewer, format));
295       PetscCall(PetscBagView(ctx->bag, viewer));
296       PetscCall(PetscViewerFlush(viewer));
297       PetscCall(PetscViewerPopFormat(viewer));
298       PetscCall(PetscViewerDestroy(&viewer));
299     }
300   }
301   PetscFunctionReturn(0);
302 }
303 
304 static PetscErrorCode SetupEqn(DM dm, AppCtx *user)
305 {
306   PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *);
307   PetscDS          ds;
308   DMLabel          label;
309   const PetscInt   id = 1;
310 
311   PetscFunctionBeginUser;
312   PetscCall(DMGetDS(dm, &ds));
313   switch (user->sol) {
314     case SOL_QUADRATIC:
315       PetscCall(PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u));
316       exactFuncs[0] = quadratic_u;
317       exactFuncs[1] = quadratic_p;
318       break;
319     case SOL_TRIG:
320       PetscCall(PetscDSSetResidual(ds, 0, f0_trig_u, f1_u));
321       exactFuncs[0] = trig_u;
322       exactFuncs[1] = trig_p;
323       break;
324     default: SETERRQ(PetscObjectComm((PetscObject) dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%D)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol);
325   }
326   PetscCall(PetscDSSetResidual(ds, 1, f0_p, NULL));
327   PetscCall(PetscDSSetJacobian(ds, 0, 0, NULL, NULL,  NULL,  g3_uu));
328   PetscCall(PetscDSSetJacobian(ds, 0, 1, NULL, NULL,  g2_up, NULL));
329   PetscCall(PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL,  NULL));
330   PetscCall(PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu));
331   PetscCall(PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL));
332 
333   PetscCall(PetscDSSetExactSolution(ds, 0, exactFuncs[0], user));
334   PetscCall(PetscDSSetExactSolution(ds, 1, exactFuncs[1], user));
335 
336   PetscCall(DMGetLabel(dm, "marker", &label));
337   PetscCall(DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", label, 1, &id, 0, 0, NULL, (void (*)(void)) exactFuncs[0], NULL, user, NULL));
338 
339   /* Make constant values available to pointwise functions */
340   {
341     Parameter  *param;
342     PetscScalar constants[1];
343 
344     PetscCall(PetscBagGetData(user->bag, (void **) &param));
345     constants[0] = param->mu; /* dynamic shear viscosity, Pa s */
346     PetscCall(PetscDSSetConstants(ds, 1, constants));
347   }
348   PetscFunctionReturn(0);
349 }
350 
351 static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
352 {
353   PetscInt c;
354   for (c = 0; c < Nc; ++c) u[c] = 0.0;
355   return 0;
356 }
357 static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
358 {
359   PetscInt c;
360   for (c = 0; c < Nc; ++c) u[c] = 1.0;
361   return 0;
362 }
363 
364 static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace)
365 {
366   Vec              vec;
367   PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero, one};
368 
369   PetscFunctionBeginUser;
370   PetscCheck(origField == 1,PetscObjectComm((PetscObject) dm), PETSC_ERR_ARG_WRONG, "Field %D should be 1 for pressure", origField);
371   funcs[field] = one;
372   {
373     PetscDS ds;
374     PetscCall(DMGetDS(dm, &ds));
375     PetscCall(PetscObjectViewFromOptions((PetscObject) ds, NULL, "-ds_view"));
376   }
377   PetscCall(DMCreateGlobalVector(dm, &vec));
378   PetscCall(DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec));
379   PetscCall(VecNormalize(vec, NULL));
380   PetscCall(MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace));
381   PetscCall(VecDestroy(&vec));
382   /* New style for field null spaces */
383   {
384     PetscObject  pressure;
385     MatNullSpace nullspacePres;
386 
387     PetscCall(DMGetField(dm, field, NULL, &pressure));
388     PetscCall(MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres));
389     PetscCall(PetscObjectCompose(pressure, "nullspace", (PetscObject) nullspacePres));
390     PetscCall(MatNullSpaceDestroy(&nullspacePres));
391   }
392   PetscFunctionReturn(0);
393 }
394 
395 static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user)
396 {
397   DM              cdm = dm;
398   PetscQuadrature q   = NULL;
399   PetscBool       simplex;
400   PetscInt        dim, Nf = 2, f, Nc[2];
401   const char     *name[2]   = {"velocity", "pressure"};
402   const char     *prefix[2] = {"vel_",     "pres_"};
403 
404   PetscFunctionBegin;
405   PetscCall(DMGetDimension(dm, &dim));
406   PetscCall(DMPlexIsSimplex(dm, &simplex));
407   Nc[0] = dim;
408   Nc[1] = 1;
409   for (f = 0; f < Nf; ++f) {
410     PetscFE fe;
411 
412     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe));
413     PetscCall(PetscObjectSetName((PetscObject) fe, name[f]));
414     if (!q) PetscCall(PetscFEGetQuadrature(fe, &q));
415     PetscCall(PetscFESetQuadrature(fe, q));
416     PetscCall(DMSetField(dm, f, NULL, (PetscObject) fe));
417     PetscCall(PetscFEDestroy(&fe));
418   }
419   PetscCall(DMCreateDS(dm));
420   PetscCall((*setupEqn)(dm, user));
421   while (cdm) {
422     PetscCall(DMCopyDisc(dm, cdm));
423     PetscCall(DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace));
424     PetscCall(DMGetCoarseDM(cdm, &cdm));
425   }
426   PetscFunctionReturn(0);
427 }
428 
429 int main(int argc, char **argv)
430 {
431   SNES           snes;
432   DM             dm;
433   Vec            u;
434   AppCtx         user;
435 
436   PetscCall(PetscInitialize(&argc, &argv, NULL, help));
437   PetscCall(ProcessOptions(PETSC_COMM_WORLD, &user));
438   PetscCall(CreateMesh(PETSC_COMM_WORLD, &user, &dm));
439   PetscCall(SNESCreate(PetscObjectComm((PetscObject) dm), &snes));
440   PetscCall(SNESSetDM(snes, dm));
441   PetscCall(DMSetApplicationContext(dm, &user));
442 
443   PetscCall(SetupParameters(PETSC_COMM_WORLD, &user));
444   PetscCall(SetupProblem(dm, SetupEqn, &user));
445   PetscCall(DMPlexCreateClosureIndex(dm, NULL));
446 
447   PetscCall(DMCreateGlobalVector(dm, &u));
448   PetscCall(DMPlexSetSNESLocalFEM(dm, &user, &user, &user));
449   PetscCall(SNESSetFromOptions(snes));
450   PetscCall(DMSNESCheckFromOptions(snes, u));
451   PetscCall(PetscObjectSetName((PetscObject) u, "Solution"));
452   {
453     Mat          J;
454     MatNullSpace sp;
455 
456     PetscCall(SNESSetUp(snes));
457     PetscCall(CreatePressureNullSpace(dm, 1, 1, &sp));
458     PetscCall(SNESGetJacobian(snes, &J, NULL, NULL, NULL));
459     PetscCall(MatSetNullSpace(J, sp));
460     PetscCall(MatNullSpaceDestroy(&sp));
461     PetscCall(PetscObjectSetName((PetscObject) J, "Jacobian"));
462     PetscCall(MatViewFromOptions(J, NULL, "-J_view"));
463   }
464   PetscCall(SNESSolve(snes, NULL, u));
465 
466   PetscCall(VecDestroy(&u));
467   PetscCall(SNESDestroy(&snes));
468   PetscCall(DMDestroy(&dm));
469   PetscCall(PetscBagDestroy(&user.bag));
470   PetscCall(PetscFinalize());
471   return 0;
472 }
473 /*TEST
474 
475   test:
476     suffix: 2d_p2_p1_check
477     requires: triangle
478     args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
479 
480   test:
481     suffix: 2d_p2_p1_check_parallel
482     nsize: {{2 3 5}}
483     requires: triangle
484     args: -sol quadratic -dm_refine 2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
485 
486   test:
487     suffix: 3d_p2_p1_check
488     requires: ctetgen
489     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
490 
491   test:
492     suffix: 3d_p2_p1_check_parallel
493     nsize: {{2 3 5}}
494     requires: ctetgen
495     args: -sol quadratic -dm_refine 2 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
496 
497   test:
498     suffix: 2d_p2_p1_conv
499     requires: triangle
500     # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1]
501     args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
502       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
503       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
504         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
505 
506   test:
507     suffix: 2d_p2_p1_conv_gamg
508     requires: triangle
509     args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2  \
510       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
511         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
512 
513   test:
514     suffix: 3d_p2_p1_conv
515     requires: ctetgen !single
516     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8]
517     args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
518       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
519       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
520         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
521 
522   test:
523     suffix: 2d_q2_q1_check
524     args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
525 
526   test:
527     suffix: 3d_q2_q1_check
528     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
529 
530   test:
531     suffix: 2d_q2_q1_conv
532     # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1]
533     args: -sol trig -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \
534       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
535       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
536         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
537 
538   test:
539     suffix: 3d_q2_q1_conv
540     requires: !single
541     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4]
542     args: -sol trig -dm_plex_simplex 0 -dm_plex_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
543       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
544       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
545         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
546 
547   test:
548     suffix: 2d_p3_p2_check
549     requires: triangle
550     args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
551 
552   test:
553     suffix: 3d_p3_p2_check
554     requires: ctetgen !single
555     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
556 
557   test:
558     suffix: 2d_p3_p2_conv
559     requires: triangle
560     # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0]
561     args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
562       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
563       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
564         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
565 
566   test:
567     suffix: 3d_p3_p2_conv
568     requires: ctetgen long_runtime
569     # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9]
570     args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \
571       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
572       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
573         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
574 
575   test:
576     suffix: 2d_q1_p0_conv
577     requires: !single
578     # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0]
579     args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \
580       -ksp_atol 1e-10 -petscds_jac_pre 0 \
581       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
582         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd
583 
584   test:
585     suffix: 3d_q1_p0_conv
586     requires: !single
587     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0]
588     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \
589       -ksp_atol 1e-10 -petscds_jac_pre 0 \
590       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
591         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd
592 
593   # Stokes preconditioners
594   #   Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
595   test:
596     suffix: 2d_p2_p1_block_diagonal
597     requires: triangle
598     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
599       -snes_error_if_not_converged \
600       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \
601       -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
602   #   Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
603   test:
604     suffix: 2d_p2_p1_block_triangular
605     requires: triangle
606     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
607       -snes_error_if_not_converged \
608       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
609       -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
610   #   Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
611   test:
612     suffix: 2d_p2_p1_schur_diagonal
613     requires: triangle
614     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
615       -snes_error_if_not_converged \
616       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
617       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \
618         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
619   #   Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
620   test:
621     suffix: 2d_p2_p1_schur_upper
622     requires: triangle
623     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \
624       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
625       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \
626         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
627   #   Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
628   test:
629     suffix: 2d_p2_p1_schur_lower
630     requires: triangle
631     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
632       -snes_error_if_not_converged \
633       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
634       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \
635         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
636   #   Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
637   test:
638     suffix: 2d_p2_p1_schur_full
639     requires: triangle
640     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
641       -snes_error_if_not_converged \
642       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
643       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \
644         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
645   #   Full Schur + Velocity GMG
646   test:
647     suffix: 2d_p2_p1_gmg_vcycle
648     requires: triangle
649     args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
650       -ksp_type fgmres -ksp_atol 1e-9 -snes_error_if_not_converged -pc_use_amat \
651       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \
652         -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_pc_gamg_esteig_ksp_max_it 10 -fieldsplit_pressure_mg_levels_pc_type sor -fieldsplit_pressure_mg_coarse_pc_type svd
653   #   SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
654   test:
655     suffix: 2d_p2_p1_simple
656     requires: triangle
657     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
658       -snes_error_if_not_converged \
659       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
660       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
661         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \
662         -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi
663   #   FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
664   test:
665     suffix: 2d_p2_p1_fetidp
666     requires: triangle mumps
667     nsize: 5
668     args: -sol quadratic -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
669       -snes_error_if_not_converged \
670       -ksp_type fetidp -ksp_rtol 1.0e-8 \
671       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
672         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
673         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
674   test:
675     suffix: 2d_q2_q1_fetidp
676     requires: mumps
677     nsize: 5
678     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
679       -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
680       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
681         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
682         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
683   test:
684     suffix: 3d_p2_p1_fetidp
685     requires: ctetgen mumps suitesparse
686     nsize: 5
687     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
688       -snes_error_if_not_converged \
689       -ksp_type fetidp -ksp_rtol 1.0e-9  \
690       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
691         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \
692         -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \
693         -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
694         -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \
695         -fetidp_bddelta_pc_factor_mat_ordering_type external \
696         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
697         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
698   test:
699     suffix: 3d_q2_q1_fetidp
700     requires: suitesparse
701     nsize: 5
702     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
703       -snes_error_if_not_converged \
704       -ksp_type fetidp -ksp_rtol 1.0e-8 \
705       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
706         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \
707         -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
708         -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \
709         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
710         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
711   #   BDDC solvers (these solvers are quite inefficient, they are here to exercise the code)
712   test:
713     suffix: 2d_p2_p1_bddc
714     nsize: 2
715     requires: triangle !single
716     args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
717       -snes_error_if_not_converged \
718       -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
719         -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd
720   #   Vanka
721   test:
722     suffix: 2d_q1_p0_vanka
723     requires: double !complex
724     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
725       -snes_rtol 1.0e-4 \
726       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
727       -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
728         -sub_ksp_type preonly -sub_pc_type lu
729   test:
730     suffix: 2d_q1_p0_vanka_denseinv
731     requires: double !complex
732     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
733       -snes_rtol 1.0e-4 \
734       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
735       -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
736         -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
737   #   Vanka smoother
738   test:
739     suffix: 2d_q1_p0_gmg_vanka
740     requires: double !complex
741     args: -sol quadratic -dm_plex_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
742       -snes_rtol 1.0e-4 \
743       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
744       -pc_type mg \
745         -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \
746         -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
747           -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
748         -mg_coarse_pc_type svd
749 
750 TEST*/
751