1 static char help[] = "Stokes Problem discretized with finite elements,\n\
2 using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n";
3
4 /*
5 For the isoviscous Stokes problem, which we discretize using the finite
6 element method on an unstructured mesh, the weak form equations are
7
8 < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0
9 < q, -\nabla\cdot u > = 0
10
11 Viewing:
12
13 To produce nice output, use
14
15 -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append
16
17 You can get a LaTeX view of the mesh, with point numbering using
18
19 -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0
20
21 The data layout can be viewed using
22
23 -dm_petscsection_view
24
25 Lots of information about the FEM assembly can be printed using
26
27 -dm_plex_print_fem 3
28 */
29
30 #include <petscdmplex.h>
31 #include <petscsnes.h>
32 #include <petscds.h>
33 #include <petscbag.h>
34
35 // TODO: Plot residual by fields after each smoother iterate
36
37 typedef enum {
38 SOL_QUADRATIC,
39 SOL_TRIG,
40 SOL_UNKNOWN
41 } SolType;
42 const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0};
43
44 typedef struct {
45 PetscScalar mu; /* dynamic shear viscosity */
46 } Parameter;
47
48 typedef struct {
49 PetscBag bag; /* Problem parameters */
50 SolType sol; /* MMS solution */
51 } AppCtx;
52
f1_u(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar f1[])53 static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[])
54 {
55 const PetscReal mu = PetscRealPart(constants[0]);
56 const PetscInt Nc = uOff[1] - uOff[0];
57 PetscInt c, d;
58
59 for (c = 0; c < Nc; ++c) {
60 for (d = 0; d < dim; ++d) f1[c * dim + d] = mu * (u_x[c * dim + d] + u_x[d * dim + c]);
61 f1[c * dim + c] -= u[uOff[1]];
62 }
63 }
64
f0_p(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar f0[])65 static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
66 {
67 PetscInt d;
68 for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d * dim + d];
69 }
70
g1_pu(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,PetscReal u_tShift,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar g1[])71 static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[])
72 {
73 PetscInt d;
74 for (d = 0; d < dim; ++d) g1[d * dim + d] = -1.0; /* < q, -\nabla\cdot u > */
75 }
76
g2_up(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,PetscReal u_tShift,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar g2[])77 static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[])
78 {
79 PetscInt d;
80 for (d = 0; d < dim; ++d) g2[d * dim + d] = -1.0; /* -< \nabla\cdot v, p > */
81 }
82
g3_uu(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,PetscReal u_tShift,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar g3[])83 static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[])
84 {
85 const PetscReal mu = PetscRealPart(constants[0]);
86 const PetscInt Nc = uOff[1] - uOff[0];
87 PetscInt c, d;
88
89 for (c = 0; c < Nc; ++c) {
90 for (d = 0; d < dim; ++d) {
91 g3[((c * Nc + c) * dim + d) * dim + d] += mu; /* < \nabla v, \nabla u > */
92 g3[((c * Nc + d) * dim + d) * dim + c] += mu; /* < \nabla v, {\nabla u}^T > */
93 }
94 }
95 }
96
g0_pp(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,PetscReal u_tShift,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar g0[])97 static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
98 {
99 const PetscReal mu = PetscRealPart(constants[0]);
100
101 g0[0] = 1.0 / mu;
102 }
103
104 /* Quadratic MMS Solution
105 2D:
106
107 u = x^2 + y^2
108 v = 2 x^2 - 2xy
109 p = x + y - 1
110 f = <1 - 4 mu, 1 - 4 mu>
111
112 so that
113
114 e(u) = (grad u + grad u^T) = / 4x 4x \
115 \ 4x -4x /
116 div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0
117 \nabla \cdot u = 2x - 2x = 0
118
119 3D:
120
121 u = 2 x^2 + y^2 + z^2
122 v = 2 x^2 - 2xy
123 w = 2 x^2 - 2xz
124 p = x + y + z - 3/2
125 f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu>
126
127 so that
128
129 e(u) = (grad u + grad u^T) = / 8x 4x 4x \
130 | 4x -4x 0 |
131 \ 4x 0 -4x /
132 div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0
133 \nabla \cdot u = 4x - 2x - 2x = 0
134 */
quadratic_u(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nc,PetscScalar * u,PetscCtx ctx)135 static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
136 {
137 PetscInt c;
138
139 u[0] = (dim - 1) * PetscSqr(x[0]);
140 for (c = 1; c < Nc; ++c) {
141 u[0] += PetscSqr(x[c]);
142 u[c] = 2.0 * PetscSqr(x[0]) - 2.0 * x[0] * x[c];
143 }
144 return PETSC_SUCCESS;
145 }
146
quadratic_p(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nc,PetscScalar * u,PetscCtx ctx)147 static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
148 {
149 PetscInt d;
150
151 u[0] = -0.5 * dim;
152 for (d = 0; d < dim; ++d) u[0] += x[d];
153 return PETSC_SUCCESS;
154 }
155
f0_quadratic_u(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar f0[])156 static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
157 {
158 const PetscReal mu = PetscRealPart(constants[0]);
159 PetscInt d;
160
161 f0[0] = (dim - 1) * 4.0 * mu - 1.0;
162 for (d = 1; d < dim; ++d) f0[d] = 4.0 * mu - 1.0;
163 }
164
165 /* Trigonometric MMS Solution
166 2D:
167
168 u = sin(pi x) + sin(pi y)
169 v = -pi cos(pi x) y
170 p = sin(2 pi x) + sin(2 pi y)
171 f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y>
172
173 so that
174
175 e(u) = (grad u + grad u^T) = / 2pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y \
176 \ pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) /
177 div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0
178 \nabla \cdot u = pi cos(pi x) - pi cos(pi x) = 0
179
180 3D:
181
182 u = 2 sin(pi x) + sin(pi y) + sin(pi z)
183 v = -pi cos(pi x) y
184 w = -pi cos(pi x) z
185 p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z)
186 f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z>
187
188 so that
189
190 e(u) = (grad u + grad u^T) = / 4pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y pi cos(pi z) + pi^2 sin(pi x) z \
191 | pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) 0 |
192 \ pi cos(pi z) + pi^2 sin(pi x) z 0 -2pi cos(pi x) /
193 div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0
194 \nabla \cdot u = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0
195 */
trig_u(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nc,PetscScalar * u,PetscCtx ctx)196 static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
197 {
198 PetscInt c;
199
200 u[0] = (dim - 1) * PetscSinReal(PETSC_PI * x[0]);
201 for (c = 1; c < Nc; ++c) {
202 u[0] += PetscSinReal(PETSC_PI * x[c]);
203 u[c] = -PETSC_PI * PetscCosReal(PETSC_PI * x[0]) * x[c];
204 }
205 return PETSC_SUCCESS;
206 }
207
trig_p(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nc,PetscScalar * u,PetscCtx ctx)208 static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
209 {
210 PetscInt d;
211
212 for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0 * PETSC_PI * x[d]);
213 return PETSC_SUCCESS;
214 }
215
f0_trig_u(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar f0[])216 static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[])
217 {
218 const PetscReal mu = PetscRealPart(constants[0]);
219 PetscInt d;
220
221 f0[0] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[0]) - (dim - 1) * mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[0]);
222 for (d = 1; d < dim; ++d) {
223 f0[0] -= mu * PetscSqr(PETSC_PI) * PetscSinReal(PETSC_PI * x[d]);
224 f0[d] = -2.0 * PETSC_PI * PetscCosReal(2.0 * PETSC_PI * x[d]) + mu * PetscPowRealInt(PETSC_PI, 3) * PetscCosReal(PETSC_PI * x[0]) * x[d];
225 }
226 }
227
ProcessOptions(MPI_Comm comm,AppCtx * options)228 static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
229 {
230 PetscInt sol;
231
232 PetscFunctionBeginUser;
233 options->sol = SOL_QUADRATIC;
234 PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");
235 sol = options->sol;
236 PetscCall(PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, PETSC_STATIC_ARRAY_LENGTH(SolTypes) - 3, SolTypes[options->sol], &sol, NULL));
237 options->sol = (SolType)sol;
238 PetscOptionsEnd();
239 PetscFunctionReturn(PETSC_SUCCESS);
240 }
241
CreateMesh(MPI_Comm comm,AppCtx * user,DM * dm)242 static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
243 {
244 PetscFunctionBeginUser;
245 PetscCall(DMCreate(comm, dm));
246 PetscCall(DMSetType(*dm, DMPLEX));
247 PetscCall(DMSetFromOptions(*dm));
248 PetscCall(DMViewFromOptions(*dm, NULL, "-dm_view"));
249 PetscFunctionReturn(PETSC_SUCCESS);
250 }
251
SetupParameters(MPI_Comm comm,AppCtx * ctx)252 static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx)
253 {
254 Parameter *p;
255
256 PetscFunctionBeginUser;
257 /* setup PETSc parameter bag */
258 PetscCall(PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag));
259 PetscCall(PetscBagGetData(ctx->bag, &p));
260 PetscCall(PetscBagSetName(ctx->bag, "par", "Stokes Parameters"));
261 PetscCall(PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s"));
262 PetscCall(PetscBagSetFromOptions(ctx->bag));
263 {
264 PetscViewer viewer;
265 PetscViewerFormat format;
266 PetscBool flg;
267
268 PetscCall(PetscOptionsCreateViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg));
269 if (flg) {
270 PetscCall(PetscViewerPushFormat(viewer, format));
271 PetscCall(PetscBagView(ctx->bag, viewer));
272 PetscCall(PetscViewerFlush(viewer));
273 PetscCall(PetscViewerPopFormat(viewer));
274 PetscCall(PetscViewerDestroy(&viewer));
275 }
276 }
277 PetscFunctionReturn(PETSC_SUCCESS);
278 }
279
SetupEqn(DM dm,AppCtx * user)280 static PetscErrorCode SetupEqn(DM dm, AppCtx *user)
281 {
282 PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *);
283 PetscDS ds;
284 DMLabel label;
285 const PetscInt id = 1;
286
287 PetscFunctionBeginUser;
288 PetscCall(DMGetDS(dm, &ds));
289 switch (user->sol) {
290 case SOL_QUADRATIC:
291 PetscCall(PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u));
292 exactFuncs[0] = quadratic_u;
293 exactFuncs[1] = quadratic_p;
294 break;
295 case SOL_TRIG:
296 PetscCall(PetscDSSetResidual(ds, 0, f0_trig_u, f1_u));
297 exactFuncs[0] = trig_u;
298 exactFuncs[1] = trig_p;
299 break;
300 default:
301 SETERRQ(PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%d)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol);
302 }
303 PetscCall(PetscDSSetResidual(ds, 1, f0_p, NULL));
304 PetscCall(PetscDSSetJacobian(ds, 0, 0, NULL, NULL, NULL, g3_uu));
305 PetscCall(PetscDSSetJacobian(ds, 0, 1, NULL, NULL, g2_up, NULL));
306 PetscCall(PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL, NULL));
307 PetscCall(PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu));
308 PetscCall(PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL));
309
310 PetscCall(PetscDSSetExactSolution(ds, 0, exactFuncs[0], user));
311 PetscCall(PetscDSSetExactSolution(ds, 1, exactFuncs[1], user));
312
313 PetscCall(DMGetLabel(dm, "marker", &label));
314 PetscCall(DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", label, 1, &id, 0, 0, NULL, (PetscVoidFn *)exactFuncs[0], NULL, user, NULL));
315
316 /* Make constant values available to pointwise functions */
317 {
318 Parameter *param;
319 PetscScalar constants[1];
320
321 PetscCall(PetscBagGetData(user->bag, ¶m));
322 constants[0] = param->mu; /* dynamic shear viscosity, Pa s */
323 PetscCall(PetscDSSetConstants(ds, 1, constants));
324 }
325 PetscFunctionReturn(PETSC_SUCCESS);
326 }
327
zero(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nc,PetscScalar * u,PetscCtx ctx)328 static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
329 {
330 PetscInt c;
331 for (c = 0; c < Nc; ++c) u[c] = 0.0;
332 return PETSC_SUCCESS;
333 }
one(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nc,PetscScalar * u,PetscCtx ctx)334 static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, PetscCtx ctx)
335 {
336 PetscInt c;
337 for (c = 0; c < Nc; ++c) u[c] = 1.0;
338 return PETSC_SUCCESS;
339 }
340
CreatePressureNullSpace(DM dm,PetscInt origField,PetscInt field,MatNullSpace * nullspace)341 static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace)
342 {
343 Vec vec;
344 PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, PetscCtx ctx) = {zero, one};
345
346 PetscFunctionBeginUser;
347 PetscCheck(origField == 1, PetscObjectComm((PetscObject)dm), PETSC_ERR_ARG_WRONG, "Field %" PetscInt_FMT " should be 1 for pressure", origField);
348 funcs[field] = one;
349 {
350 PetscDS ds;
351 PetscCall(DMGetDS(dm, &ds));
352 PetscCall(PetscObjectViewFromOptions((PetscObject)ds, NULL, "-ds_view"));
353 }
354 PetscCall(DMCreateGlobalVector(dm, &vec));
355 PetscCall(DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec));
356 PetscCall(VecNormalize(vec, NULL));
357 PetscCall(MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace));
358 PetscCall(VecDestroy(&vec));
359 /* New style for field null spaces */
360 {
361 PetscObject pressure;
362 MatNullSpace nullspacePres;
363
364 PetscCall(DMGetField(dm, field, NULL, &pressure));
365 PetscCall(MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres));
366 PetscCall(PetscObjectCompose(pressure, "nullspace", (PetscObject)nullspacePres));
367 PetscCall(MatNullSpaceDestroy(&nullspacePres));
368 }
369 PetscFunctionReturn(PETSC_SUCCESS);
370 }
371
SetupProblem(DM dm,PetscErrorCode (* setupEqn)(DM,AppCtx *),AppCtx * user)372 static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user)
373 {
374 DM cdm = dm;
375 PetscQuadrature q = NULL;
376 PetscBool simplex;
377 PetscInt dim, Nf = 2, f, Nc[2];
378 const char *name[2] = {"velocity", "pressure"};
379 const char *prefix[2] = {"vel_", "pres_"};
380
381 PetscFunctionBegin;
382 PetscCall(DMGetDimension(dm, &dim));
383 PetscCall(DMPlexIsSimplex(dm, &simplex));
384 Nc[0] = dim;
385 Nc[1] = 1;
386 for (f = 0; f < Nf; ++f) {
387 PetscFE fe;
388
389 PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe));
390 PetscCall(PetscObjectSetName((PetscObject)fe, name[f]));
391 if (!q) PetscCall(PetscFEGetQuadrature(fe, &q));
392 PetscCall(PetscFESetQuadrature(fe, q));
393 PetscCall(DMSetField(dm, f, NULL, (PetscObject)fe));
394 PetscCall(PetscFEDestroy(&fe));
395 }
396 PetscCall(DMCreateDS(dm));
397 PetscCall((*setupEqn)(dm, user));
398 while (cdm) {
399 PetscCall(DMCopyDisc(dm, cdm));
400 PetscCall(DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace));
401 PetscCall(DMGetCoarseDM(cdm, &cdm));
402 }
403 PetscFunctionReturn(PETSC_SUCCESS);
404 }
405
main(int argc,char ** argv)406 int main(int argc, char **argv)
407 {
408 SNES snes;
409 DM dm;
410 Vec u;
411 AppCtx user;
412
413 PetscFunctionBeginUser;
414 PetscCall(PetscInitialize(&argc, &argv, NULL, help));
415 PetscCall(ProcessOptions(PETSC_COMM_WORLD, &user));
416 PetscCall(CreateMesh(PETSC_COMM_WORLD, &user, &dm));
417 PetscCall(SNESCreate(PetscObjectComm((PetscObject)dm), &snes));
418 PetscCall(SNESSetDM(snes, dm));
419 PetscCall(DMSetApplicationContext(dm, &user));
420
421 PetscCall(SetupParameters(PETSC_COMM_WORLD, &user));
422 PetscCall(SetupProblem(dm, SetupEqn, &user));
423 PetscCall(DMPlexCreateClosureIndex(dm, NULL));
424
425 PetscCall(DMCreateGlobalVector(dm, &u));
426 PetscCall(DMPlexSetSNESLocalFEM(dm, PETSC_FALSE, &user));
427 PetscCall(SNESSetFromOptions(snes));
428 PetscCall(DMSNESCheckFromOptions(snes, u));
429 PetscCall(PetscObjectSetName((PetscObject)u, "Solution"));
430 {
431 Mat J;
432 MatNullSpace sp;
433
434 PetscCall(SNESSetUp(snes));
435 PetscCall(CreatePressureNullSpace(dm, 1, 1, &sp));
436 PetscCall(SNESGetJacobian(snes, &J, NULL, NULL, NULL));
437 PetscCall(MatSetNullSpace(J, sp));
438 PetscCall(MatNullSpaceDestroy(&sp));
439 PetscCall(PetscObjectSetName((PetscObject)J, "Jacobian"));
440 PetscCall(MatViewFromOptions(J, NULL, "-J_view"));
441 }
442 PetscCall(SNESSolve(snes, NULL, u));
443
444 PetscCall(VecDestroy(&u));
445 PetscCall(SNESDestroy(&snes));
446 PetscCall(DMDestroy(&dm));
447 PetscCall(PetscBagDestroy(&user.bag));
448 PetscCall(PetscFinalize());
449 return 0;
450 }
451 /*TEST
452
453 test:
454 suffix: 2d_p2_p1_check
455 requires: triangle
456 args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
457
458 test:
459 suffix: 2d_p2_p1_check_parallel
460 nsize: {{2 3 5}}
461 requires: triangle
462 args: -sol quadratic -dm_refine 2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
463
464 test:
465 suffix: 3d_p2_p1_check
466 requires: ctetgen
467 args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
468
469 test:
470 suffix: 3d_p2_p1_check_parallel
471 nsize: {{2 3 5}}
472 requires: ctetgen
473 args: -sol quadratic -dm_refine 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
474
475 test:
476 suffix: 2d_p2_p1_conv
477 requires: triangle
478 # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1]
479 args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
480 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
481 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
482 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
483
484 test:
485 suffix: 2d_p2_p1_conv_gamg
486 requires: triangle
487 args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 \
488 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
489 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd
490
491 test:
492 suffix: 3d_p2_p1_conv
493 requires: ctetgen !single
494 # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8]
495 args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
496 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
497 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
498 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
499
500 test:
501 suffix: 2d_q2_q1_check
502 args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
503
504 test:
505 suffix: 3d_q2_q1_check
506 args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001
507
508 test:
509 suffix: 2d_q2_q1_conv
510 # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1]
511 args: -sol trig -dm_plex_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \
512 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
513 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
514 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
515
516 test:
517 suffix: 3d_q2_q1_conv
518 requires: !single
519 # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4]
520 args: -sol trig -dm_plex_simplex 0 -dm_plex_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \
521 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
522 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
523 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
524
525 test:
526 suffix: 2d_p3_p2_check
527 requires: triangle
528 args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
529
530 test:
531 suffix: 3d_p3_p2_check
532 requires: ctetgen !single
533 args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001
534
535 test:
536 suffix: 2d_p3_p2_conv
537 requires: triangle
538 # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0]
539 args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \
540 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
541 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
542 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
543
544 test:
545 suffix: 3d_p3_p2_conv
546 requires: ctetgen long_runtime
547 # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9]
548 args: -sol trig -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \
549 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \
550 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \
551 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu
552
553 test:
554 suffix: 2d_q1_p0_conv
555 requires: !single
556 # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0]
557 args: -sol quadratic -dm_plex_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \
558 -ksp_atol 1e-10 -petscds_jac_pre 0 \
559 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
560 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
561
562 test:
563 suffix: 3d_q1_p0_conv
564 requires: !single
565 # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0]
566 args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \
567 -ksp_atol 1e-10 -petscds_jac_pre 0 \
568 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \
569 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_explicit_operator_mat_type aij -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd -fieldsplit_pressure_pc_gamg_aggressive_coarsening 0
570
571 # Stokes preconditioners
572 # Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix}
573 test:
574 suffix: 2d_p2_p1_block_diagonal
575 requires: triangle
576 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
577 -snes_error_if_not_converged \
578 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \
579 -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
580 output_file: output/empty.out
581 # Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix}
582 test:
583 suffix: 2d_p2_p1_block_triangular
584 requires: triangle
585 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
586 -snes_error_if_not_converged \
587 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
588 -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi
589 output_file: output/empty.out
590 # Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix}
591 test:
592 suffix: 2d_p2_p1_schur_diagonal
593 requires: triangle
594 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
595 -snes_error_if_not_converged \
596 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
597 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \
598 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
599 output_file: output/empty.out
600 # Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
601 test:
602 suffix: 2d_p2_p1_schur_upper
603 requires: triangle
604 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \
605 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
606 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \
607 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
608 # Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix}
609 test:
610 suffix: 2d_p2_p1_schur_lower
611 requires: triangle
612 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
613 -snes_error_if_not_converged \
614 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
615 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \
616 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
617 output_file: output/empty.out
618 # Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix}
619 test:
620 suffix: 2d_p2_p1_schur_full
621 requires: triangle
622 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
623 -snes_error_if_not_converged \
624 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \
625 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \
626 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi
627 output_file: output/empty.out
628 # Full Schur + Velocity GMG
629 test:
630 suffix: 2d_p2_p1_gmg_vcycle
631 TODO: broken (requires subDMs hooks)
632 requires: triangle
633 args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \
634 -ksp_type fgmres -ksp_atol 1e-9 -snes_error_if_not_converged -pc_use_amat \
635 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \
636 -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_pc_gamg_esteig_ksp_max_it 10 -fieldsplit_pressure_mg_levels_pc_type sor -fieldsplit_pressure_mg_coarse_pc_type svd
637 # SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix}
638 test:
639 suffix: 2d_p2_p1_simple
640 requires: triangle
641 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
642 -snes_error_if_not_converged \
643 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \
644 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \
645 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \
646 -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi
647 output_file: output/empty.out
648 # FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code)
649 test:
650 suffix: 2d_p2_p1_fetidp
651 requires: triangle mumps
652 nsize: 5
653 args: -sol quadratic -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
654 -snes_error_if_not_converged \
655 -ksp_type fetidp -ksp_rtol 1.0e-8 \
656 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
657 -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
658 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
659 output_file: output/empty.out
660 test:
661 suffix: 2d_q2_q1_fetidp
662 requires: mumps
663 nsize: 5
664 args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
665 -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
666 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
667 -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \
668 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly
669 output_file: output/empty.out
670 test:
671 suffix: 3d_p2_p1_fetidp
672 requires: ctetgen mumps suitesparse
673 nsize: 5
674 args: -sol quadratic -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
675 -snes_error_if_not_converged \
676 -ksp_type fetidp -ksp_rtol 1.0e-9 \
677 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
678 -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \
679 -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \
680 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
681 -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \
682 -fetidp_bddelta_pc_factor_mat_ordering_type external \
683 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
684 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
685 output_file: output/empty.out
686 test:
687 suffix: 3d_q2_q1_fetidp
688 requires: suitesparse
689 nsize: 5
690 args: -sol quadratic -dm_plex_simplex 0 -dm_plex_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
691 -snes_error_if_not_converged \
692 -ksp_type fetidp -ksp_rtol 1.0e-8 \
693 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \
694 -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \
695 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \
696 -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \
697 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \
698 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external
699 output_file: output/empty.out
700 # BDDC solvers (these solvers are quite inefficient, they are here to exercise the code)
701 test:
702 suffix: 2d_p2_p1_bddc
703 nsize: 2
704 requires: triangle !single
705 args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \
706 -snes_error_if_not_converged \
707 -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \
708 -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd
709 output_file: output/empty.out
710 # Vanka
711 test:
712 suffix: 2d_q1_p0_vanka
713 output_file: output/empty.out
714 requires: double !complex
715 args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
716 -snes_rtol 1.0e-4 \
717 -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
718 -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
719 -sub_ksp_type preonly -sub_pc_type lu
720 test:
721 suffix: 2d_q1_p0_vanka_denseinv
722 output_file: output/empty.out
723 requires: double !complex
724 args: -sol quadratic -dm_plex_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
725 -snes_rtol 1.0e-4 \
726 -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
727 -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \
728 -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense
729 # Vanka smoother
730 test:
731 suffix: 2d_q1_p0_gmg_vanka
732 output_file: output/empty.out
733 requires: double !complex
734 args: -sol quadratic -dm_plex_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \
735 -snes_rtol 1.0e-4 \
736 -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \
737 -pc_type mg \
738 -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \
739 -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \
740 -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \
741 -mg_coarse_pc_type svd
742
743 TEST*/
744