xref: /petsc/src/snes/tutorials/ex62.c (revision 6524c165f7ddaf30fd7457737f668f984c8ababf)
1 static char help[] = "Stokes Problem discretized with finite elements,\n\
2 using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n";
3 
4 /*
5 For the isoviscous Stokes problem, which we discretize using the finite
6 element method on an unstructured mesh, the weak form equations are
7 
8   < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0
9   < q, -\nabla\cdot u >                                                   = 0
10 
11 Viewing:
12 
13 To produce nice output, use
14 
15   -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append
16 
17 You can get a LaTeX view of the mesh, with point numbering using
18 
19   -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0
20 
21 The data layout can be viewed using
22 
23   -dm_petscsection_view
24 
25 Lots of information about the FEM assembly can be printed using
26 
27   -dm_plex_print_fem 3
28 */
29 
30 #include <petscdmplex.h>
31 #include <petscsnes.h>
32 #include <petscds.h>
33 #include <petscbag.h>
34 
35 // TODO: Plot residual by fields after each smoother iterate
36 
37 typedef enum {
38   SOL_QUADRATIC,
39   SOL_TRIG,
40   SOL_UNKNOWN
41 } SolType;
42 const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0};
43 
44 typedef struct {
45   PetscScalar mu; /* dynamic shear viscosity */
46 } Parameter;
47 
48 typedef struct {
49   PetscBag bag; /* Problem parameters */
50   SolType  sol; /* MMS solution */
51 } AppCtx;
52 
53 static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[]) {
54   const PetscReal mu = PetscRealPart(constants[0]);
55   const PetscInt  Nc = uOff[1] - uOff[0];
56   PetscInt        c, d;
57 
58   for (c = 0; c < Nc; ++c) {
59     for (d = 0; d < dim; ++d) f1[c * dim + d] = mu * (u_x[c * dim + d] + u_x[d * dim + c]);
60     f1[c * dim + c] -= u[uOff[1]];
61   }
62 }
63 
64 static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) {
65   PetscInt d;
66   for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d * dim + d];
67 }
68 
69 static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[]) {
70   PetscInt d;
71   for (d = 0; d < dim; ++d) g1[d * dim + d] = -1.0; /* < q, -\nabla\cdot u > */
72 }
73 
74 static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[]) {
75   PetscInt d;
76   for (d = 0; d < dim; ++d) g2[d * dim + d] = -1.0; /* -< \nabla\cdot v, p > */
77 }
78 
79 static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[]) {
80   const PetscReal mu = PetscRealPart(constants[0]);
81   const PetscInt  Nc = uOff[1] - uOff[0];
82   PetscInt        c, d;
83 
84   for (c = 0; c < Nc; ++c) {
85     for (d = 0; d < dim; ++d) {
86       g3[((c * Nc + c) * dim + d) * dim + d] += mu; /* < \nabla v, \nabla u > */
87       g3[((c * Nc + d) * dim + d) * dim + c] += mu; /* < \nabla v, {\nabla u}^T > */
88     }
89   }
90 }
91 
92 static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) {
93   const PetscReal mu = PetscRealPart(constants[0]);
94 
95   g0[0] = 1.0 / mu;
96 }
97 
98 /* Quadratic MMS Solution
99    2D:
100 
101      u = x^2 + y^2
102      v = 2 x^2 - 2xy
103      p = x + y - 1
104      f = <1 - 4 mu, 1 - 4 mu>
105 
106    so that
107 
108      e(u) = (grad u + grad u^T) = / 4x  4x \
109                                   \ 4x -4x /
110      div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0
111      \nabla \cdot u             = 2x - 2x = 0
112 
113    3D:
114 
115      u = 2 x^2 + y^2 + z^2
116      v = 2 x^2 - 2xy
117      w = 2 x^2 - 2xz
118      p = x + y + z - 3/2
119      f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu>
120 
121    so that
122 
123      e(u) = (grad u + grad u^T) = / 8x  4x  4x \
124                                   | 4x -4x  0  |
125                                   \ 4x  0  -4x /
126      div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0
127      \nabla \cdot u             = 4x - 2x - 2x = 0
128 */
129 static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) {
130   PetscInt c;
131 
132   u[0] = (dim - 1) * PetscSqr(x[0]);
133   for (c = 1; c < Nc; ++c) {
134     u[0] += PetscSqr(x[c]);
135     u[c] = 2.0 * PetscSqr(x[0]) - 2.0 * x[0] * x[c];
136   }
137   return 0;
138 }
139 
140 static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) {
141   PetscInt d;
142 
143   u[0] = -0.5 * dim;
144   for (d = 0; d < dim; ++d) u[0] += x[d];
145   return 0;
146 }
147 
148 static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) {
149   const PetscReal mu = PetscRealPart(constants[0]);
150   PetscInt        d;
151 
152   f0[0] = (dim - 1) * 4.0 * mu - 1.0;
153   for (d = 1; d < dim; ++d) f0[d] = 4.0 * mu - 1.0;
154 }
155 
156 /* Trigonometric MMS Solution
157    2D:
158 
159      u = sin(pi x) + sin(pi y)
160      v = -pi cos(pi x) y
161      p = sin(2 pi x) + sin(2 pi y)
162      f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y>
163 
164    so that
165 
166      e(u) = (grad u + grad u^T) = /        2pi cos(pi x)             pi cos(pi y) + pi^2 sin(pi x) y \
167                                   \ pi cos(pi y) + pi^2 sin(pi x) y          -2pi cos(pi x)          /
168      div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0
169      \nabla \cdot u             = pi cos(pi x) - pi cos(pi x) = 0
170 
171    3D:
172 
173      u = 2 sin(pi x) + sin(pi y) + sin(pi z)
174      v = -pi cos(pi x) y
175      w = -pi cos(pi x) z
176      p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z)
177      f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z>
178 
179    so that
180 
181      e(u) = (grad u + grad u^T) = /        4pi cos(pi x)             pi cos(pi y) + pi^2 sin(pi x) y  pi cos(pi z) + pi^2 sin(pi x) z \
182                                   | pi cos(pi y) + pi^2 sin(pi x) y          -2pi cos(pi x)                        0                  |
183                                   \ pi cos(pi z) + pi^2 sin(pi x) z               0                         -2pi cos(pi x)            /
184      div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0
185      \nabla \cdot u             = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0
186 */
187 static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) {
188   PetscInt c;
189 
190   u[0] = (dim - 1) * PetscSinReal(PETSC_PI * x[0]);
191   for (c = 1; c < Nc; ++c) {
192     u[0] += PetscSinReal(PETSC_PI * x[c]);
193     u[c] = -PETSC_PI * PetscCosReal(PETSC_PI * x[0]) * x[c];
194   }
195   return 0;
196 }
197 
198 static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) {
199   PetscInt d;
200 
201   for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0 * PETSC_PI * x[d]);
202   return 0;
203 }
204 
205 static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) {
206   const PetscReal mu = PetscRealPart(constants[0]);
207   PetscInt        d;
208 
209   f0[0] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[0]) - (dim - 1) * mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[0]);
210   for (d = 1; d < dim; ++d) {
211     f0[0] -= mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[d]);
212     f0[d] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[d]) + mu * PetscPowRealInt(PETSC_PI, 3) * PetscCosReal(PETSC_PI * x[0]) * x[d];
213   }
214 }
215 
216 static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options) {
217   PetscInt sol;
218 
219   PetscFunctionBeginUser;
220   options->sol = SOL_QUADRATIC;
221   PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
222   sol = options->sol;
223   PetscCall(PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, PETSC_STATIC_ARRAY_LENGTH(SolTypes) - 3, SolTypes[options->sol], &sol, NULL));
224   options->sol = (SolType)sol;
225   PetscOptionsEnd();
226   PetscFunctionReturn(0);
227 }
228 
229 static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm) {
230   PetscFunctionBeginUser;
231   PetscCall(DMCreate(comm, dm));
232   PetscCall(DMSetType(*dm, DMPLEX));
233   PetscCall(DMSetFromOptions(*dm));
234   PetscCall(DMViewFromOptions(*dm, NULL, "-dm_view"));
235   PetscFunctionReturn(0);
236 }
237 
238 static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx) {
239   Parameter *p;
240 
241   PetscFunctionBeginUser;
242   /* setup PETSc parameter bag */
243   PetscCall(PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag));
244   PetscCall(PetscBagGetData(ctx->bag, (void **)&p));
245   PetscCall(PetscBagSetName(ctx->bag, "par", "Stokes Parameters"));
246   PetscCall(PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s"));
247   PetscCall(PetscBagSetFromOptions(ctx->bag));
248   {
249     PetscViewer       viewer;
250     PetscViewerFormat format;
251     PetscBool         flg;
252 
253     PetscCall(PetscOptionsGetViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg));
254     if (flg) {
255       PetscCall(PetscViewerPushFormat(viewer, format));
256       PetscCall(PetscBagView(ctx->bag, viewer));
257       PetscCall(PetscViewerFlush(viewer));
258       PetscCall(PetscViewerPopFormat(viewer));
259       PetscCall(PetscViewerDestroy(&viewer));
260     }
261   }
262   PetscFunctionReturn(0);
263 }
264 
265 static PetscErrorCode SetupEqn(DM dm, AppCtx *user) {
266   PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *);
267   PetscDS        ds;
268   DMLabel        label;
269   const PetscInt id = 1;
270 
271   PetscFunctionBeginUser;
272   PetscCall(DMGetDS(dm, &ds));
273   switch (user->sol) {
274   case SOL_QUADRATIC:
275     PetscCall(PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u));
276     exactFuncs[0] = quadratic_u;
277     exactFuncs[1] = quadratic_p;
278     break;
279   case SOL_TRIG:
280     PetscCall(PetscDSSetResidual(ds, 0, f0_trig_u, f1_u));
281     exactFuncs[0] = trig_u;
282     exactFuncs[1] = trig_p;
283     break;
284   default: SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%d)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol);
285   }
286   PetscCall(PetscDSSetResidual(ds, 1, f0_p, NULL));
287   PetscCall(PetscDSSetJacobian(ds, 0, 0, NULL, NULL, NULL, g3_uu));
288   PetscCall(PetscDSSetJacobian(ds, 0, 1, NULL, NULL, g2_up, NULL));
289   PetscCall(PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL, NULL));
290   PetscCall(PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu));
291   PetscCall(PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL));
292 
293   PetscCall(PetscDSSetExactSolution(ds, 0, exactFuncs[0], user));
294   PetscCall(PetscDSSetExactSolution(ds, 1, exactFuncs[1], user));
295 
296   PetscCall(DMGetLabel(dm, "marker", &label));
297   PetscCall(DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", label, 1, &id, 0, 0, NULL, (void (*)(void))exactFuncs[0], NULL, user, NULL));
298 
299   /* Make constant values available to pointwise functions */
300   {
301     Parameter  *param;
302     PetscScalar constants[1];
303 
304     PetscCall(PetscBagGetData(user->bag, (void **)&param));
305     constants[0] = param->mu; /* dynamic shear viscosity, Pa s */
306     PetscCall(PetscDSSetConstants(ds, 1, constants));
307   }
308   PetscFunctionReturn(0);
309 }
310 
311 static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) {
312   PetscInt c;
313   for (c = 0; c < Nc; ++c) u[c] = 0.0;
314   return 0;
315 }
316 static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) {
317   PetscInt c;
318   for (c = 0; c < Nc; ++c) u[c] = 1.0;
319   return 0;
320 }
321 
322 static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace) {
323   Vec vec;
324   PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx) = {zero, one};
325 
326   PetscFunctionBeginUser;
327   PetscCheck(origField == 1, PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Field %" PetscInt_FMT " should be 1 for pressure", origField);
328   funcs[field] = one;
329   {
330     PetscDS ds;
331     PetscCall(DMGetDS(dm, &ds));
332     PetscCall(PetscObjectViewFromOptions((PetscObject)ds, NULL, "-ds_view"));
333   }
334   PetscCall(DMCreateGlobalVector(dm, &vec));
335   PetscCall(DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec));
336   PetscCall(VecNormalize(vec, NULL));
337   PetscCall(MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace));
338   PetscCall(VecDestroy(&vec));
339   /* New style for field null spaces */
340   {
341     PetscObject  pressure;
342     MatNullSpace nullspacePres;
343 
344     PetscCall(DMGetField(dm, field, NULL, &pressure));
345     PetscCall(MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres));
346     PetscCall(PetscObjectCompose(pressure, "nullspace", (PetscObject)nullspacePres));
347     PetscCall(MatNullSpaceDestroy(&nullspacePres));
348   }
349   PetscFunctionReturn(0);
350 }
351 
352 static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user) {
353   DM              cdm = dm;
354   PetscQuadrature q   = NULL;
355   PetscBool       simplex;
356   PetscInt        dim, Nf = 2, f, Nc[2];
357   const char     *name[2]   = {"velocity", "pressure"};
358   const char     *prefix[2] = {"vel_", "pres_"};
359 
360   PetscFunctionBegin;
361   PetscCall(DMGetDimension(dm, &dim));
362   PetscCall(DMPlexIsSimplex(dm, &simplex));
363   Nc[0] = dim;
364   Nc[1] = 1;
365   for (f = 0; f < Nf; ++f) {
366     PetscFE fe;
367 
368     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe));
369     PetscCall(PetscObjectSetName((PetscObject)fe, name[f]));
370     if (!q) PetscCall(PetscFEGetQuadrature(fe, &q));
371     PetscCall(PetscFESetQuadrature(fe, q));
372     PetscCall(DMSetField(dm, f, NULL, (PetscObject)fe));
373     PetscCall(PetscFEDestroy(&fe));
374   }
375   PetscCall(DMCreateDS(dm));
376   PetscCall((*setupEqn)(dm, user));
377   while (cdm) {
378     PetscCall(DMCopyDisc(dm, cdm));
379     PetscCall(DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace));
380     PetscCall(DMGetCoarseDM(cdm, &cdm));
381   }
382   PetscFunctionReturn(0);
383 }
384 
385 int main(int argc, char **argv) {
386   SNES   snes;
387   DM     dm;
388   Vec    u;
389   AppCtx user;
390 
391   PetscFunctionBeginUser;
392   PetscCall(PetscInitialize(&argc, &argv, NULL, help));
393   PetscCall(ProcessOptions(PETSC_COMM_WORLD, &user));
394   PetscCall(CreateMesh(PETSC_COMM_WORLD, &user, &dm));
395   PetscCall(SNESCreate(PetscObjectComm((PetscObject)dm), &snes));
396   PetscCall(SNESSetDM(snes, dm));
397   PetscCall(DMSetApplicationContext(dm, &user));
398 
399   PetscCall(SetupParameters(PETSC_COMM_WORLD, &user));
400   PetscCall(SetupProblem(dm, SetupEqn, &user));
401   PetscCall(DMPlexCreateClosureIndex(dm, NULL));
402 
403   PetscCall(DMCreateGlobalVector(dm, &u));
404   PetscCall(DMPlexSetSNESLocalFEM(dm, &user, &user, &user));
405   PetscCall(SNESSetFromOptions(snes));
406   PetscCall(DMSNESCheckFromOptions(snes, u));
407   PetscCall(PetscObjectSetName((PetscObject)u, "Solution"));
408   {
409     Mat          J;
410     MatNullSpace sp;
411 
412     PetscCall(SNESSetUp(snes));
413     PetscCall(CreatePressureNullSpace(dm, 1, 1, &sp));
414     PetscCall(SNESGetJacobian(snes, &J, NULL, NULL, NULL));
415     PetscCall(MatSetNullSpace(J, sp));
416     PetscCall(MatNullSpaceDestroy(&sp));
417     PetscCall(PetscObjectSetName((PetscObject)J, "Jacobian"));
418     PetscCall(MatViewFromOptions(J, NULL, "-J_view"));
419   }
420   PetscCall(SNESSolve(snes, NULL, u));
421 
422   PetscCall(VecDestroy(&u));
423   PetscCall(SNESDestroy(&snes));
424   PetscCall(DMDestroy(&dm));
425   PetscCall(PetscBagDestroy(&user.bag));
426   PetscCall(PetscFinalize());
427   return 0;
428 }
429 /*TEST
430 
431   test:
432     suffix: 2d_p2_p1_check
433     requires: triangle
434     args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
435 
436   test:
437     suffix: 2d_p2_p1_check_parallel
438     nsize: {{2 3 5}}
439     requires: triangle
440     args: -sol quadratic -dm_refine 2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
441 
442   test:
443     suffix: 3d_p2_p1_check
444     requires: ctetgen
445     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
446 
447   test:
448     suffix: 3d_p2_p1_check_parallel
449     nsize: {{2 3 5}}
450     requires: ctetgen
451     args: -sol quadratic -dm_refine 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
452 
453   test:
454     suffix: 2d_p2_p1_conv
455     requires: triangle
456     # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1]
457     args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
458       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
459       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
460         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
461 
462   test:
463     suffix: 2d_p2_p1_conv_gamg
464     requires: triangle
465     args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2  \
466       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
467         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
468 
469   test:
470     suffix: 3d_p2_p1_conv
471     requires: ctetgen !single
472     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8]
473     args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
474       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
475       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
476         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
477 
478   test:
479     suffix: 2d_q2_q1_check
480     args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
481 
482   test:
483     suffix: 3d_q2_q1_check
484     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
485 
486   test:
487     suffix: 2d_q2_q1_conv
488     # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1]
489     args: -sol trig -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \
490       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
491       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
492         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
493 
494   test:
495     suffix: 3d_q2_q1_conv
496     requires: !single
497     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4]
498     args: -sol trig -dm_plex_simplex 0 -dm_plex_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
499       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
500       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
501         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
502 
503   test:
504     suffix: 2d_p3_p2_check
505     requires: triangle
506     args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
507 
508   test:
509     suffix: 3d_p3_p2_check
510     requires: ctetgen !single
511     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
512 
513   test:
514     suffix: 2d_p3_p2_conv
515     requires: triangle
516     # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0]
517     args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
518       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
519       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
520         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
521 
522   test:
523     suffix: 3d_p3_p2_conv
524     requires: ctetgen long_runtime
525     # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9]
526     args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \
527       -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
528       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
529         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
530 
531   test:
532     suffix: 2d_q1_p0_conv
533     requires: !single
534     # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0]
535     args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \
536       -ksp_atol 1e-10 -petscds_jac_pre 0 \
537       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
538         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
539 
540   test:
541     suffix: 3d_q1_p0_conv
542     requires: !single
543     # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0]
544     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \
545       -ksp_atol 1e-10 -petscds_jac_pre 0 \
546       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
547         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
548 
549   # Stokes preconditioners
550   #   Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
551   test:
552     suffix: 2d_p2_p1_block_diagonal
553     requires: triangle
554     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
555       -snes_error_if_not_converged \
556       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \
557       -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
558   #   Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
559   test:
560     suffix: 2d_p2_p1_block_triangular
561     requires: triangle
562     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
563       -snes_error_if_not_converged \
564       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
565       -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
566   #   Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
567   test:
568     suffix: 2d_p2_p1_schur_diagonal
569     requires: triangle
570     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
571       -snes_error_if_not_converged \
572       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
573       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \
574         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
575   #   Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
576   test:
577     suffix: 2d_p2_p1_schur_upper
578     requires: triangle
579     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \
580       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
581       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \
582         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
583   #   Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
584   test:
585     suffix: 2d_p2_p1_schur_lower
586     requires: triangle
587     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
588       -snes_error_if_not_converged \
589       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
590       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \
591         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
592   #   Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
593   test:
594     suffix: 2d_p2_p1_schur_full
595     requires: triangle
596     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
597       -snes_error_if_not_converged \
598       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
599       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \
600         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
601   #   Full Schur + Velocity GMG
602   test:
603     suffix: 2d_p2_p1_gmg_vcycle
604     requires: triangle
605     args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
606       -ksp_type fgmres -ksp_atol 1e-9 -snes_error_if_not_converged -pc_use_amat \
607       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \
608         -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_pc_gamg_esteig_ksp_max_it 10 -fieldsplit_pressure_mg_levels_pc_type sor -fieldsplit_pressure_mg_coarse_pc_type svd
609   #   SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
610   test:
611     suffix: 2d_p2_p1_simple
612     requires: triangle
613     args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
614       -snes_error_if_not_converged \
615       -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
616       -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
617         -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \
618         -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi
619   #   FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
620   test:
621     suffix: 2d_p2_p1_fetidp
622     requires: triangle mumps
623     nsize: 5
624     args: -sol quadratic -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
625       -snes_error_if_not_converged \
626       -ksp_type fetidp -ksp_rtol 1.0e-8 \
627       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
628         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
629         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
630   test:
631     suffix: 2d_q2_q1_fetidp
632     requires: mumps
633     nsize: 5
634     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
635       -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
636       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
637         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
638         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
639   test:
640     suffix: 3d_p2_p1_fetidp
641     requires: ctetgen mumps suitesparse
642     nsize: 5
643     args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
644       -snes_error_if_not_converged \
645       -ksp_type fetidp -ksp_rtol 1.0e-9  \
646       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
647         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \
648         -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \
649         -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
650         -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \
651         -fetidp_bddelta_pc_factor_mat_ordering_type external \
652         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
653         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
654   test:
655     suffix: 3d_q2_q1_fetidp
656     requires: suitesparse
657     nsize: 5
658     args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
659       -snes_error_if_not_converged \
660       -ksp_type fetidp -ksp_rtol 1.0e-8 \
661       -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
662         -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \
663         -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
664         -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \
665         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
666         -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
667   #   BDDC solvers (these solvers are quite inefficient, they are here to exercise the code)
668   test:
669     suffix: 2d_p2_p1_bddc
670     nsize: 2
671     requires: triangle !single
672     args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
673       -snes_error_if_not_converged \
674       -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
675         -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd
676   #   Vanka
677   test:
678     suffix: 2d_q1_p0_vanka
679     requires: double !complex
680     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
681       -snes_rtol 1.0e-4 \
682       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
683       -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
684         -sub_ksp_type preonly -sub_pc_type lu
685   test:
686     suffix: 2d_q1_p0_vanka_denseinv
687     requires: double !complex
688     args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
689       -snes_rtol 1.0e-4 \
690       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
691       -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
692         -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
693   #   Vanka smoother
694   test:
695     suffix: 2d_q1_p0_gmg_vanka
696     requires: double !complex
697     args: -sol quadratic -dm_plex_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
698       -snes_rtol 1.0e-4 \
699       -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
700       -pc_type mg \
701         -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \
702         -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
703           -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
704         -mg_coarse_pc_type svd
705 
706 TEST*/
707