xref: /petsc/src/snes/tutorials/ex62.c (revision 77d968b72e8e27b79bcc994c018975de390644ed)
1 static char help[] = "Stokes Problem discretized with finite elements,\n\
2 using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n";
3 
4 /*
5 For the isoviscous Stokes problem, which we discretize using the finite
6 element method on an unstructured mesh, the weak form equations are
7 
8   < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0
9   < q, -\nabla\cdot u >                                                   = 0
10 
11 Viewing:
12 
13 To produce nice output, use
14 
15   -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append
16 
17 You can get a LaTeX view of the mesh, with point numbering using
18 
19   -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0
20 
21 The data layout can be viewed using
22 
23   -dm_petscsection_view
24 
25 Lots of information about the FEM assembly can be printed using
26 
27   -dm_plex_print_fem 3
28 */
29 
30 #include <petscdmplex.h>
31 #include <petscsnes.h>
32 #include <petscds.h>
33 #include <petscbag.h>
34 
35 // TODO: Plot residual by fields after each smoother iterate
36 
37 typedef enum {
38   SOL_QUADRATIC,
39   SOL_TRIG,
40   SOL_UNKNOWN
41 } SolType;
42 const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0};
43 
44 typedef struct
45 {
46   PetscScalar mu; /* dynamic shear viscosity */
47 } Parameter;
48 
49 typedef struct
50 {
51   PetscBag bag; /* Problem parameters */
52   SolType  sol; /* MMS solution */
53 } AppCtx;
54 
55 static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
56 {
57   const PetscReal mu = PetscRealPart(constants[0]);
58   const PetscInt  Nc = uOff[1] - uOff[0];
59   PetscInt        c, d;
60 
61   for (c = 0; c < Nc; ++c) {
62     for (d = 0; d < dim; ++d) { f1[c * dim + d] = mu * (u_x[c * dim + d] + u_x[d * dim + c]); }
63     f1[c * dim + c] -= u[uOff[1]];
64   }
65 }
66 
67 static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
68 {
69   PetscInt d;
70   for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d * dim + d];
71 }
72 
73 static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
74 {
75   PetscInt d;
76   for (d = 0; d < dim; ++d) g1[d * dim + d] = -1.0; /* < q, -\nabla\cdot u > */
77 }
78 
79 static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
80 {
81   PetscInt d;
82   for (d = 0; d < dim; ++d) g2[d * dim + d] = -1.0; /* -< \nabla\cdot v, p > */
83 }
84 
85 static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
86 {
87   const PetscReal mu = PetscRealPart(constants[0]);
88   const PetscInt  Nc = uOff[1] - uOff[0];
89   PetscInt        c, d;
90 
91   for (c = 0; c < Nc; ++c) {
92     for (d = 0; d < dim; ++d) {
93       g3[((c * Nc + c) * dim + d) * dim + d] += mu; /* < \nabla v, \nabla u > */
94       g3[((c * Nc + d) * dim + d) * dim + c] += mu; /* < \nabla v, {\nabla u}^T > */
95     }
96   }
97 }
98 
99 static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
100 {
101   const PetscReal mu = PetscRealPart(constants[0]);
102 
103   g0[0] = 1.0 / mu;
104 }
105 
106 /* Quadratic MMS Solution
107    2D:
108 
109      u = x^2 + y^2
110      v = 2 x^2 - 2xy
111      p = x + y - 1
112      f = <1 - 4 mu, 1 - 4 mu>
113 
114    so that
115 
116      e(u) = (grad u + grad u^T) = / 4x  4x \
117                                   \ 4x -4x /
118      div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0
119      \nabla \cdot u             = 2x - 2x = 0
120 
121    3D:
122 
123      u = 2 x^2 + y^2 + z^2
124      v = 2 x^2 - 2xy
125      w = 2 x^2 - 2xz
126      p = x + y + z - 3/2
127      f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu>
128 
129    so that
130 
131      e(u) = (grad u + grad u^T) = / 8x  4x  4x \
132                                   | 4x -4x  0  |
133                                   \ 4x  0  -4x /
134      div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0
135      \nabla \cdot u             = 4x - 2x - 2x = 0
136 */
137 static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
138 {
139   PetscInt c;
140 
141   u[0] = (dim - 1) * PetscSqr(x[0]);
142   for (c = 1; c < Nc; ++c) {
143     u[0] += PetscSqr(x[c]);
144     u[c] = 2.0 * PetscSqr(x[0]) - 2.0 * x[0] * x[c];
145   }
146   return 0;
147 }
148 
149 static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
150 {
151   PetscInt d;
152 
153   u[0] = -0.5 * dim;
154   for (d = 0; d < dim; ++d) u[0] += x[d];
155   return 0;
156 }
157 
158 static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
159 {
160   const PetscReal mu = PetscRealPart(constants[0]);
161   PetscInt        d;
162 
163   f0[0] = (dim - 1) * 4.0 * mu - 1.0;
164   for (d = 1; d < dim; ++d) f0[d] = 4.0 * mu - 1.0;
165 }
166 
167 /* Trigonometric MMS Solution
168    2D:
169 
170      u = sin(pi x) + sin(pi y)
171      v = -pi cos(pi x) y
172      p = sin(2 pi x) + sin(2 pi y)
173      f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y>
174 
175    so that
176 
177      e(u) = (grad u + grad u^T) = /        2pi cos(pi x)             pi cos(pi y) + pi^2 sin(pi x) y \
178                                   \ pi cos(pi y) + pi^2 sin(pi x) y          -2pi cos(pi x)          /
179      div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0
180      \nabla \cdot u             = pi cos(pi x) - pi cos(pi x) = 0
181 
182    3D:
183 
184      u = 2 sin(pi x) + sin(pi y) + sin(pi z)
185      v = -pi cos(pi x) y
186      w = -pi cos(pi x) z
187      p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z)
188      f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z>
189 
190    so that
191 
192      e(u) = (grad u + grad u^T) = /        4pi cos(pi x)             pi cos(pi y) + pi^2 sin(pi x) y  pi cos(pi z) + pi^2 sin(pi x) z \
193                                   | pi cos(pi y) + pi^2 sin(pi x) y          -2pi cos(pi x)                        0                  |
194                                   \ pi cos(pi z) + pi^2 sin(pi x) z               0                         -2pi cos(pi x)            /
195      div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0
196      \nabla \cdot u             = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0
197 */
198 static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
199 {
200   PetscInt c;
201 
202   u[0] = (dim - 1) * PetscSinReal(PETSC_PI * x[0]);
203   for (c = 1; c < Nc; ++c) {
204     u[0] += PetscSinReal(PETSC_PI * x[c]);
205     u[c] = -PETSC_PI * PetscCosReal(PETSC_PI * x[0]) * x[c];
206   }
207   return 0;
208 }
209 
210 static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
211 {
212   PetscInt d;
213 
214   for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0 * PETSC_PI * x[d]);
215   return 0;
216 }
217 
218 static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
219 {
220   const PetscReal mu = PetscRealPart(constants[0]);
221   PetscInt        d;
222 
223   f0[0] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[0]) - (dim - 1) * mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[0]);
224   for (d = 1; d < dim; ++d) {
225     f0[0] -= mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[d]);
226     f0[d] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[d]) + mu * PetscPowRealInt(PETSC_PI, 3) * PetscCosReal(PETSC_PI * x[0]) * x[d];
227   }
228 }
229 
230 static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
231 {
232   PetscInt sol;
233 
234   PetscFunctionBeginUser;
235   options->sol = SOL_QUADRATIC;
236   PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
237   sol = options->sol;
238   PetscCall(PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, PETSC_STATIC_ARRAY_LENGTH(SolTypes) - 3, SolTypes[options->sol], &sol, NULL));
239   options->sol = (SolType)sol;
240   PetscOptionsEnd();
241   PetscFunctionReturn(0);
242 }
243 
244 static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
245 {
246   PetscFunctionBeginUser;
247   PetscCall(DMCreate(comm, dm));
248   PetscCall(DMSetType(*dm, DMPLEX));
249   PetscCall(DMSetFromOptions(*dm));
250   PetscCall(DMViewFromOptions(*dm, NULL, "-dm_view"));
251   PetscFunctionReturn(0);
252 }
253 
254 static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx)
255 {
256   Parameter *p;
257 
258   PetscFunctionBeginUser;
259   /* setup PETSc parameter bag */
260   PetscCall(PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag));
261   PetscCall(PetscBagGetData(ctx->bag, (void **)&p));
262   PetscCall(PetscBagSetName(ctx->bag, "par", "Stokes Parameters"));
263   PetscCall(PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s"));
264   PetscCall(PetscBagSetFromOptions(ctx->bag));
265   {
266     PetscViewer       viewer;
267     PetscViewerFormat format;
268     PetscBool         flg;
269 
270     PetscCall(PetscOptionsGetViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg));
271     if (flg) {
272       PetscCall(PetscViewerPushFormat(viewer, format));
273       PetscCall(PetscBagView(ctx->bag, viewer));
274       PetscCall(PetscViewerFlush(viewer));
275       PetscCall(PetscViewerPopFormat(viewer));
276       PetscCall(PetscViewerDestroy(&viewer));
277     }
278   }
279   PetscFunctionReturn(0);
280 }
281 
282 static PetscErrorCode SetupEqn(DM dm, AppCtx *user)
283 {
284   PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *);
285   PetscDS        ds;
286   DMLabel        label;
287   const PetscInt id = 1;
288 
289   PetscFunctionBeginUser;
290   PetscCall(DMGetDS(dm, &ds));
291   switch (user->sol) {
292   case SOL_QUADRATIC:
293     PetscCall(PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u));
294     exactFuncs[0] = quadratic_u;
295     exactFuncs[1] = quadratic_p;
296     break;
297   case SOL_TRIG:
298     PetscCall(PetscDSSetResidual(ds, 0, f0_trig_u, f1_u));
299     exactFuncs[0] = trig_u;
300     exactFuncs[1] = trig_p;
301     break;
302   default: SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%d)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol);
303   }
304   PetscCall(PetscDSSetResidual(ds, 1, f0_p, NULL));
305   PetscCall(PetscDSSetJacobian(ds, 0, 0, NULL, NULL, NULL, g3_uu));
306   PetscCall(PetscDSSetJacobian(ds, 0, 1, NULL, NULL, g2_up, NULL));
307   PetscCall(PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL, NULL));
308   PetscCall(PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu));
309   PetscCall(PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL));
310 
311   PetscCall(PetscDSSetExactSolution(ds, 0, exactFuncs[0], user));
312   PetscCall(PetscDSSetExactSolution(ds, 1, exactFuncs[1], user));
313 
314   PetscCall(DMGetLabel(dm, "marker", &label));
315   PetscCall(DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", label, 1, &id, 0, 0, NULL, (void (*)(void))exactFuncs[0], NULL, user, NULL));
316 
317   /* Make constant values available to pointwise functions */
318   {
319     Parameter  *param;
320     PetscScalar constants[1];
321 
322     PetscCall(PetscBagGetData(user->bag, (void **)&param));
323     constants[0] = param->mu; /* dynamic shear viscosity, Pa s */
324     PetscCall(PetscDSSetConstants(ds, 1, constants));
325   }
326   PetscFunctionReturn(0);
327 }
328 
329 static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
330 {
331   PetscInt c;
332   for (c = 0; c < Nc; ++c) u[c] = 0.0;
333   return 0;
334 }
335 static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx)
336 {
337   PetscInt c;
338   for (c = 0; c < Nc; ++c) u[c] = 1.0;
339   return 0;
340 }
341 
342 static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace)
343 {
344   Vec vec;
345   PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx) = {zero, one};
346 
347   PetscFunctionBeginUser;
348   PetscCheck(origField == 1, PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Field %" PetscInt_FMT " should be 1 for pressure", origField);
349   funcs[field] = one;
350   {
351     PetscDS ds;
352     PetscCall(DMGetDS(dm, &ds));
353     PetscCall(PetscObjectViewFromOptions((PetscObject)ds, NULL, "-ds_view"));
354   }
355   PetscCall(DMCreateGlobalVector(dm, &vec));
356   PetscCall(DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec));
357   PetscCall(VecNormalize(vec, NULL));
358   PetscCall(MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace));
359   PetscCall(VecDestroy(&vec));
360   /* New style for field null spaces */
361   {
362     PetscObject  pressure;
363     MatNullSpace nullspacePres;
364 
365     PetscCall(DMGetField(dm, field, NULL, &pressure));
366     PetscCall(MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres));
367     PetscCall(PetscObjectCompose(pressure, "nullspace", (PetscObject)nullspacePres));
368     PetscCall(MatNullSpaceDestroy(&nullspacePres));
369   }
370   PetscFunctionReturn(0);
371 }
372 
373 static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user)
374 {
375   DM              cdm = dm;
376   PetscQuadrature q   = NULL;
377   PetscBool       simplex;
378   PetscInt        dim, Nf = 2, f, Nc[2];
379   const char     *name[2]   = {"velocity", "pressure"};
380   const char     *prefix[2] = {"vel_", "pres_"};
381 
382   PetscFunctionBegin;
383   PetscCall(DMGetDimension(dm, &dim));
384   PetscCall(DMPlexIsSimplex(dm, &simplex));
385   Nc[0] = dim;
386   Nc[1] = 1;
387   for (f = 0; f < Nf; ++f) {
388     PetscFE fe;
389 
390     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe));
391     PetscCall(PetscObjectSetName((PetscObject)fe, name[f]));
392     if (!q) PetscCall(PetscFEGetQuadrature(fe, &q));
393     PetscCall(PetscFESetQuadrature(fe, q));
394     PetscCall(DMSetField(dm, f, NULL, (PetscObject)fe));
395     PetscCall(PetscFEDestroy(&fe));
396   }
397   PetscCall(DMCreateDS(dm));
398   PetscCall((*setupEqn)(dm, user));
399   while (cdm) {
400     PetscCall(DMCopyDisc(dm, cdm));
401     PetscCall(DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace));
402     PetscCall(DMGetCoarseDM(cdm, &cdm));
403   }
404   PetscFunctionReturn(0);
405 }
406 
407 int main(int argc, char **argv)
408 {
409   SNES   snes;
410   DM     dm;
411   Vec    u;
412   AppCtx user;
413 
414   PetscFunctionBeginUser;
415   PetscCall(PetscInitialize(&argc, &argv, NULL, help));
416   PetscCall(ProcessOptions(PETSC_COMM_WORLD, &user));
417   PetscCall(CreateMesh(PETSC_COMM_WORLD, &user, &dm));
418   PetscCall(SNESCreate(PetscObjectComm((PetscObject)dm), &snes));
419   PetscCall(SNESSetDM(snes, dm));
420   PetscCall(DMSetApplicationContext(dm, &user));
421 
422   PetscCall(SetupParameters(PETSC_COMM_WORLD, &user));
423   PetscCall(SetupProblem(dm, SetupEqn, &user));
424   PetscCall(DMPlexCreateClosureIndex(dm, NULL));
425 
426   PetscCall(DMCreateGlobalVector(dm, &u));
427   PetscCall(DMPlexSetSNESLocalFEM(dm, &user, &user, &user));
428   PetscCall(SNESSetFromOptions(snes));
429   PetscCall(DMSNESCheckFromOptions(snes, u));
430   PetscCall(PetscObjectSetName((PetscObject)u, "Solution"));
431   {
432     Mat          J;
433     MatNullSpace sp;
434 
435     PetscCall(SNESSetUp(snes));
436     PetscCall(CreatePressureNullSpace(dm, 1, 1, &sp));
437     PetscCall(SNESGetJacobian(snes, &J, NULL, NULL, NULL));
438     PetscCall(MatSetNullSpace(J, sp));
439     PetscCall(MatNullSpaceDestroy(&sp));
440     PetscCall(PetscObjectSetName((PetscObject)J, "Jacobian"));
441     PetscCall(MatViewFromOptions(J, NULL, "-J_view"));
442   }
443   PetscCall(SNESSolve(snes, NULL, u));
444 
445   PetscCall(VecDestroy(&u));
446   PetscCall(SNESDestroy(&snes));
447   PetscCall(DMDestroy(&dm));
448   PetscCall(PetscBagDestroy(&user.bag));
449   PetscCall(PetscFinalize());
450   return 0;
451 }
452 /*TEST
453 
454   test:
455     suffix: 2d_p2_p1_check
456     requires: triangle
457     args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
458 
459   test:
460     suffix: 2d_p2_p1_check_parallel
461     nsize: {{2 3 5}}
462     requires: triangle
463     args: -sol quadratic -dm_refine 2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
464 
465   test:
466     suffix: 3d_p2_p1_check
467     requires: ctetgen
468     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
469 
470   test:
471     suffix: 3d_p2_p1_check_parallel
472     nsize: {{2 3 5}}
473     requires: ctetgen
474     args: -sol quadratic -dm_refine 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
475 
476   test:
477     suffix: 2d_p2_p1_conv
478     requires: triangle
479     # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1]
480     args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
481       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
482       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
483         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
484 
485   test:
486     suffix: 2d_p2_p1_conv_gamg
487     requires: triangle
488     args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2  \
489       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
490         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
491 
492   test:
493     suffix: 3d_p2_p1_conv
494     requires: ctetgen !single
495     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8]
496     args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
497       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
498       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
499         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
500 
501   test:
502     suffix: 2d_q2_q1_check
503     args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
504 
505   test:
506     suffix: 3d_q2_q1_check
507     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
508 
509   test:
510     suffix: 2d_q2_q1_conv
511     # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1]
512     args: -sol trig -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \
513       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
514       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
515         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
516 
517   test:
518     suffix: 3d_q2_q1_conv
519     requires: !single
520     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4]
521     args: -sol trig -dm_plex_simplex 0 -dm_plex_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
522       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
523       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
524         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
525 
526   test:
527     suffix: 2d_p3_p2_check
528     requires: triangle
529     args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
530 
531   test:
532     suffix: 3d_p3_p2_check
533     requires: ctetgen !single
534     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
535 
536   test:
537     suffix: 2d_p3_p2_conv
538     requires: triangle
539     # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0]
540     args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
541       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
542       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
543         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
544 
545   test:
546     suffix: 3d_p3_p2_conv
547     requires: ctetgen long_runtime
548     # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9]
549     args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \
550       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
551       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
552         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
553 
554   test:
555     suffix: 2d_q1_p0_conv
556     requires: !single
557     # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0]
558     args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \
559       -ksp_atol 1e-10 -petscds_jac_pre 0 \
560       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
561         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
562 
563   test:
564     suffix: 3d_q1_p0_conv
565     requires: !single
566     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0]
567     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \
568       -ksp_atol 1e-10 -petscds_jac_pre 0 \
569       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
570         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
571 
572   # Stokes preconditioners
573   #   Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
574   test:
575     suffix: 2d_p2_p1_block_diagonal
576     requires: triangle
577     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
578       -snes_error_if_not_converged \
579       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \
580       -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
581   #   Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
582   test:
583     suffix: 2d_p2_p1_block_triangular
584     requires: triangle
585     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
586       -snes_error_if_not_converged \
587       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
588       -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
589   #   Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
590   test:
591     suffix: 2d_p2_p1_schur_diagonal
592     requires: triangle
593     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
594       -snes_error_if_not_converged \
595       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
596       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \
597         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
598   #   Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
599   test:
600     suffix: 2d_p2_p1_schur_upper
601     requires: triangle
602     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \
603       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
604       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \
605         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
606   #   Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
607   test:
608     suffix: 2d_p2_p1_schur_lower
609     requires: triangle
610     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
611       -snes_error_if_not_converged \
612       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
613       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \
614         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
615   #   Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
616   test:
617     suffix: 2d_p2_p1_schur_full
618     requires: triangle
619     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
620       -snes_error_if_not_converged \
621       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
622       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \
623         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
624   #   Full Schur + Velocity GMG
625   test:
626     suffix: 2d_p2_p1_gmg_vcycle
627     requires: triangle
628     args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
629       -ksp_type fgmres -ksp_atol 1e-9 -snes_error_if_not_converged -pc_use_amat \
630       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \
631         -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_pc_gamg_esteig_ksp_max_it 10 -fieldsplit_pressure_mg_levels_pc_type sor -fieldsplit_pressure_mg_coarse_pc_type svd
632   #   SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
633   test:
634     suffix: 2d_p2_p1_simple
635     requires: triangle
636     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
637       -snes_error_if_not_converged \
638       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
639       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
640         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \
641         -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi
642   #   FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
643   test:
644     suffix: 2d_p2_p1_fetidp
645     requires: triangle mumps
646     nsize: 5
647     args: -sol quadratic -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
648       -snes_error_if_not_converged \
649       -ksp_type fetidp -ksp_rtol 1.0e-8 \
650       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
651         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
652         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
653   test:
654     suffix: 2d_q2_q1_fetidp
655     requires: mumps
656     nsize: 5
657     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
658       -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
659       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
660         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
661         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
662   test:
663     suffix: 3d_p2_p1_fetidp
664     requires: ctetgen mumps suitesparse
665     nsize: 5
666     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
667       -snes_error_if_not_converged \
668       -ksp_type fetidp -ksp_rtol 1.0e-9  \
669       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
670         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \
671         -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \
672         -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
673         -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \
674         -fetidp_bddelta_pc_factor_mat_ordering_type external \
675         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
676         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
677   test:
678     suffix: 3d_q2_q1_fetidp
679     requires: suitesparse
680     nsize: 5
681     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
682       -snes_error_if_not_converged \
683       -ksp_type fetidp -ksp_rtol 1.0e-8 \
684       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
685         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \
686         -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
687         -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \
688         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
689         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
690   #   BDDC solvers (these solvers are quite inefficient, they are here to exercise the code)
691   test:
692     suffix: 2d_p2_p1_bddc
693     nsize: 2
694     requires: triangle !single
695     args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
696       -snes_error_if_not_converged \
697       -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
698         -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd
699   #   Vanka
700   test:
701     suffix: 2d_q1_p0_vanka
702     requires: double !complex
703     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
704       -snes_rtol 1.0e-4 \
705       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
706       -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
707         -sub_ksp_type preonly -sub_pc_type lu
708   test:
709     suffix: 2d_q1_p0_vanka_denseinv
710     requires: double !complex
711     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
712       -snes_rtol 1.0e-4 \
713       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
714       -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
715         -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
716   #   Vanka smoother
717   test:
718     suffix: 2d_q1_p0_gmg_vanka
719     requires: double !complex
720     args: -sol quadratic -dm_plex_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
721       -snes_rtol 1.0e-4 \
722       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
723       -pc_type mg \
724         -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \
725         -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
726           -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
727         -mg_coarse_pc_type svd
728 
729 TEST*/
730