1 static char help[] = "Stokes Problem discretized with finite elements,\n\ 2 using a parallel unstructured mesh (DMPLEX) to represent the domain.\n\n\n"; 3 4 /* 5 For the isoviscous Stokes problem, which we discretize using the finite 6 element method on an unstructured mesh, the weak form equations are 7 8 < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > - < v, f > = 0 9 < q, -\nabla\cdot u > = 0 10 11 Viewing: 12 13 To produce nice output, use 14 15 -dm_refine 3 -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -snes_view_solution hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append 16 17 You can get a LaTeX view of the mesh, with point numbering using 18 19 -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0 20 21 The data layout can be viewed using 22 23 -dm_petscsection_view 24 25 Lots of information about the FEM assembly can be printed using 26 27 -dm_plex_print_fem 3 28 */ 29 30 #include <petscdmplex.h> 31 #include <petscsnes.h> 32 #include <petscds.h> 33 #include <petscbag.h> 34 35 // TODO: Plot residual by fields after each smoother iterate 36 37 typedef enum {SOL_QUADRATIC, SOL_TRIG, SOL_UNKNOWN} SolType; 38 const char *SolTypes[] = {"quadratic", "trig", "unknown", "SolType", "SOL_", 0}; 39 40 typedef struct { 41 PetscScalar mu; /* dynamic shear viscosity */ 42 } Parameter; 43 44 typedef struct { 45 /* Domain and mesh definition */ 46 char filename[PETSC_MAX_PATH_LEN]; 47 /* Problem definition */ 48 PetscBag bag; /* Problem parameters */ 49 SolType sol; /* MMS solution */ 50 } AppCtx; 51 52 static void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, 53 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 54 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 55 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[]) 56 { 57 const PetscReal mu = PetscRealPart(constants[0]); 58 const PetscInt Nc = uOff[1]-uOff[0]; 59 PetscInt c, d; 60 61 for (c = 0; c < Nc; ++c) { 62 for (d = 0; d < dim; ++d) { 63 f1[c*dim+d] = mu * (u_x[c*dim+d] + u_x[d*dim+c]); 64 } 65 f1[c*dim+c] -= u[uOff[1]]; 66 } 67 } 68 69 static void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, 70 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 71 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 72 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) 73 { 74 PetscInt d; 75 for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] -= u_x[d*dim+d]; 76 } 77 78 static void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, 79 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 80 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 81 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[]) 82 { 83 PetscInt d; 84 for (d = 0; d < dim; ++d) g1[d*dim+d] = -1.0; /* < q, -\nabla\cdot u > */ 85 } 86 87 static void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, 88 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 89 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 90 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[]) 91 { 92 PetscInt d; 93 for (d = 0; d < dim; ++d) g2[d*dim+d] = -1.0; /* -< \nabla\cdot v, p > */ 94 } 95 96 static void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, 97 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 98 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 99 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[]) 100 { 101 const PetscReal mu = PetscRealPart(constants[0]); 102 const PetscInt Nc = uOff[1]-uOff[0]; 103 PetscInt c, d; 104 105 for (c = 0; c < Nc; ++c) { 106 for (d = 0; d < dim; ++d) { 107 g3[((c*Nc+c)*dim+d)*dim+d] += mu; /* < \nabla v, \nabla u > */ 108 g3[((c*Nc+d)*dim+d)*dim+c] += mu; /* < \nabla v, {\nabla u}^T > */ 109 } 110 } 111 } 112 113 static void g0_pp(PetscInt dim, PetscInt Nf, PetscInt NfAux, 114 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 115 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 116 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 117 { 118 const PetscReal mu = PetscRealPart(constants[0]); 119 120 g0[0] = 1.0/mu; 121 } 122 123 /* Quadratic MMS Solution 124 2D: 125 126 u = x^2 + y^2 127 v = 2 x^2 - 2xy 128 p = x + y - 1 129 f = <1 - 4 mu, 1 - 4 mu> 130 131 so that 132 133 e(u) = (grad u + grad u^T) = / 4x 4x \ 134 \ 4x -4x / 135 div mu e(u) - \nabla p + f = mu <4, 4> - <1, 1> + <1 - 4 mu, 1 - 4 mu> = 0 136 \nabla \cdot u = 2x - 2x = 0 137 138 3D: 139 140 u = 2 x^2 + y^2 + z^2 141 v = 2 x^2 - 2xy 142 w = 2 x^2 - 2xz 143 p = x + y + z - 3/2 144 f = <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> 145 146 so that 147 148 e(u) = (grad u + grad u^T) = / 8x 4x 4x \ 149 | 4x -4x 0 | 150 \ 4x 0 -4x / 151 div mu e(u) - \nabla p + f = mu <8, 4, 4> - <1, 1, 1> + <1 - 8 mu, 1 - 4 mu, 1 - 4 mu> = 0 152 \nabla \cdot u = 4x - 2x - 2x = 0 153 */ 154 static PetscErrorCode quadratic_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) 155 { 156 PetscInt c; 157 158 u[0] = (dim-1)*PetscSqr(x[0]); 159 for (c = 1; c < Nc; ++c) { 160 u[0] += PetscSqr(x[c]); 161 u[c] = 2.0*PetscSqr(x[0]) - 2.0*x[0]*x[c]; 162 } 163 return 0; 164 } 165 166 static PetscErrorCode quadratic_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) 167 { 168 PetscInt d; 169 170 u[0] = -0.5*dim; 171 for (d = 0; d < dim; ++d) u[0] += x[d]; 172 return 0; 173 } 174 175 static void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, 176 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 177 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 178 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) 179 { 180 const PetscReal mu = PetscRealPart(constants[0]); 181 PetscInt d; 182 183 f0[0] = (dim-1)*4.0*mu - 1.0; 184 for (d = 1; d < dim; ++d) f0[d] = 4.0*mu - 1.0; 185 } 186 187 /* Trigonometric MMS Solution 188 2D: 189 190 u = sin(pi x) + sin(pi y) 191 v = -pi cos(pi x) y 192 p = sin(2 pi x) + sin(2 pi y) 193 f = <2pi cos(2 pi x) + mu pi^2 sin(pi x) + mu pi^2 sin(pi y), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y> 194 195 so that 196 197 e(u) = (grad u + grad u^T) = / 2pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y \ 198 \ pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) / 199 div mu e(u) - \nabla p + f = mu <-pi^2 sin(pi x) - pi^2 sin(pi y), pi^3 cos(pi x) y> - <2pi cos(2 pi x), 2pi cos(2 pi y)> + <f_x, f_y> = 0 200 \nabla \cdot u = pi cos(pi x) - pi cos(pi x) = 0 201 202 3D: 203 204 u = 2 sin(pi x) + sin(pi y) + sin(pi z) 205 v = -pi cos(pi x) y 206 w = -pi cos(pi x) z 207 p = sin(2 pi x) + sin(2 pi y) + sin(2 pi z) 208 f = <2pi cos(2 pi x) + mu 2pi^2 sin(pi x) + mu pi^2 sin(pi y) + mu pi^2 sin(pi z), 2pi cos(2 pi y) - mu pi^3 cos(pi x) y, 2pi cos(2 pi z) - mu pi^3 cos(pi x) z> 209 210 so that 211 212 e(u) = (grad u + grad u^T) = / 4pi cos(pi x) pi cos(pi y) + pi^2 sin(pi x) y pi cos(pi z) + pi^2 sin(pi x) z \ 213 | pi cos(pi y) + pi^2 sin(pi x) y -2pi cos(pi x) 0 | 214 \ pi cos(pi z) + pi^2 sin(pi x) z 0 -2pi cos(pi x) / 215 div mu e(u) - \nabla p + f = mu <-2pi^2 sin(pi x) - pi^2 sin(pi y) - pi^2 sin(pi z), pi^3 cos(pi x) y, pi^3 cos(pi x) z> - <2pi cos(2 pi x), 2pi cos(2 pi y), 2pi cos(2 pi z)> + <f_x, f_y, f_z> = 0 216 \nabla \cdot u = 2 pi cos(pi x) - pi cos(pi x) - pi cos(pi x) = 0 217 */ 218 static PetscErrorCode trig_u(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) 219 { 220 PetscInt c; 221 222 u[0] = (dim-1)*PetscSinReal(PETSC_PI*x[0]); 223 for (c = 1; c < Nc; ++c) { 224 u[0] += PetscSinReal(PETSC_PI*x[c]); 225 u[c] = -PETSC_PI*PetscCosReal(PETSC_PI*x[0]) * x[c]; 226 } 227 return 0; 228 } 229 230 static PetscErrorCode trig_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) 231 { 232 PetscInt d; 233 234 for (d = 0, u[0] = 0.0; d < dim; ++d) u[0] += PetscSinReal(2.0*PETSC_PI*x[d]); 235 return 0; 236 } 237 238 static void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, 239 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 240 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 241 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) 242 { 243 const PetscReal mu = PetscRealPart(constants[0]); 244 PetscInt d; 245 246 f0[0] = -2.0*PETSC_PI*PetscCosReal(2.0*PETSC_PI*x[0]) - (dim-1)*mu*PetscSqr(PETSC_PI)*PetscSinReal(PETSC_PI*x[0]); 247 for (d = 1; d < dim; ++d) { 248 f0[0] -= mu*PetscSqr(PETSC_PI)*PetscSinReal(PETSC_PI*x[d]); 249 f0[d] = -2.0*PETSC_PI*PetscCosReal(2.0*PETSC_PI*x[d]) + mu*PetscPowRealInt(PETSC_PI, 3)*PetscCosReal(PETSC_PI*x[0])*x[d]; 250 } 251 } 252 253 static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options) 254 { 255 PetscInt sol; 256 PetscErrorCode ierr; 257 258 PetscFunctionBeginUser; 259 options->filename[0] = '\0'; 260 options->sol = SOL_QUADRATIC; 261 262 ierr = PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");CHKERRQ(ierr); 263 sol = options->sol; 264 ierr = PetscOptionsEList("-sol", "The MMS solution", "ex62.c", SolTypes, (sizeof(SolTypes)/sizeof(SolTypes[0]))-3, SolTypes[options->sol], &sol, NULL);CHKERRQ(ierr); 265 options->sol = (SolType) sol; 266 ierr = PetscOptionsEnd(); 267 PetscFunctionReturn(0); 268 } 269 270 static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm) 271 { 272 size_t len; 273 PetscErrorCode ierr; 274 275 PetscFunctionBeginUser; 276 ierr = PetscStrlen(user->filename, &len);CHKERRQ(ierr); 277 if (len) {ierr = DMPlexCreateFromFile(comm, user->filename, PETSC_TRUE, dm);CHKERRQ(ierr);} 278 else {ierr = DMPlexCreateBoxMesh(comm, 2, PETSC_TRUE, NULL, NULL, NULL, NULL, PETSC_TRUE, dm);CHKERRQ(ierr);} 279 ierr = DMSetFromOptions(*dm);CHKERRQ(ierr); 280 ierr = DMViewFromOptions(*dm, NULL, "-dm_view");CHKERRQ(ierr); 281 PetscFunctionReturn(0); 282 } 283 284 static PetscErrorCode SetupParameters(MPI_Comm comm, AppCtx *ctx) 285 { 286 Parameter *p; 287 PetscErrorCode ierr; 288 289 PetscFunctionBeginUser; 290 /* setup PETSc parameter bag */ 291 ierr = PetscBagCreate(PETSC_COMM_SELF, sizeof(Parameter), &ctx->bag);CHKERRQ(ierr); 292 ierr = PetscBagGetData(ctx->bag, (void **) &p);CHKERRQ(ierr); 293 ierr = PetscBagSetName(ctx->bag, "par", "Stokes Parameters");CHKERRQ(ierr); 294 ierr = PetscBagRegisterScalar(ctx->bag, &p->mu, 1.0, "mu", "Dynamic Shear Viscosity, Pa s");CHKERRQ(ierr); 295 ierr = PetscBagSetFromOptions(ctx->bag);CHKERRQ(ierr); 296 { 297 PetscViewer viewer; 298 PetscViewerFormat format; 299 PetscBool flg; 300 301 ierr = PetscOptionsGetViewer(comm, NULL, NULL, "-param_view", &viewer, &format, &flg);CHKERRQ(ierr); 302 if (flg) { 303 ierr = PetscViewerPushFormat(viewer, format);CHKERRQ(ierr); 304 ierr = PetscBagView(ctx->bag, viewer);CHKERRQ(ierr); 305 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 306 ierr = PetscViewerPopFormat(viewer);CHKERRQ(ierr); 307 ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr); 308 } 309 } 310 PetscFunctionReturn(0); 311 } 312 313 static PetscErrorCode SetupEqn(DM dm, AppCtx *user) 314 { 315 PetscErrorCode (*exactFuncs[2])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar *, void *); 316 PetscDS ds; 317 const PetscInt id = 1; 318 PetscErrorCode ierr; 319 320 PetscFunctionBeginUser; 321 ierr = DMGetDS(dm, &ds);CHKERRQ(ierr); 322 switch (user->sol) { 323 case SOL_QUADRATIC: 324 ierr = PetscDSSetResidual(ds, 0, f0_quadratic_u, f1_u);CHKERRQ(ierr); 325 exactFuncs[0] = quadratic_u; 326 exactFuncs[1] = quadratic_p; 327 break; 328 case SOL_TRIG: 329 ierr = PetscDSSetResidual(ds, 0, f0_trig_u, f1_u);CHKERRQ(ierr); 330 exactFuncs[0] = trig_u; 331 exactFuncs[1] = trig_p; 332 break; 333 default: SETERRQ2(PetscObjectComm((PetscObject) dm), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%D)", SolTypes[PetscMin(user->sol, SOL_UNKNOWN)], user->sol); 334 } 335 ierr = PetscDSSetResidual(ds, 1, f0_p, NULL);CHKERRQ(ierr); 336 ierr = PetscDSSetJacobian(ds, 0, 0, NULL, NULL, NULL, g3_uu);CHKERRQ(ierr); 337 ierr = PetscDSSetJacobian(ds, 0, 1, NULL, NULL, g2_up, NULL);CHKERRQ(ierr); 338 ierr = PetscDSSetJacobian(ds, 1, 0, NULL, g1_pu, NULL, NULL);CHKERRQ(ierr); 339 ierr = PetscDSSetJacobianPreconditioner(ds, 0, 0, NULL, NULL, NULL, g3_uu);CHKERRQ(ierr); 340 ierr = PetscDSSetJacobianPreconditioner(ds, 1, 1, g0_pp, NULL, NULL, NULL);CHKERRQ(ierr); 341 342 ierr = PetscDSSetExactSolution(ds, 0, exactFuncs[0], user);CHKERRQ(ierr); 343 ierr = PetscDSSetExactSolution(ds, 1, exactFuncs[1], user);CHKERRQ(ierr); 344 345 ierr = DMAddBoundary(dm, DM_BC_ESSENTIAL, "wall", "marker", 0, 0, NULL, (void (*)(void)) exactFuncs[0], NULL, 1, &id, user);CHKERRQ(ierr); 346 347 /* Make constant values available to pointwise functions */ 348 { 349 Parameter *param; 350 PetscScalar constants[1]; 351 352 ierr = PetscBagGetData(user->bag, (void **) ¶m);CHKERRQ(ierr); 353 constants[0] = param->mu; /* dynamic shear viscosity, Pa s */ 354 ierr = PetscDSSetConstants(ds, 1, constants);CHKERRQ(ierr); 355 } 356 PetscFunctionReturn(0); 357 } 358 359 static PetscErrorCode zero(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) 360 { 361 PetscInt c; 362 for (c = 0; c < Nc; ++c) u[c] = 0.0; 363 return 0; 364 } 365 static PetscErrorCode one(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nc, PetscScalar *u, void *ctx) 366 { 367 PetscInt c; 368 for (c = 0; c < Nc; ++c) u[c] = 1.0; 369 return 0; 370 } 371 372 static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt origField, PetscInt field, MatNullSpace *nullspace) 373 { 374 Vec vec; 375 PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero, one}; 376 PetscErrorCode ierr; 377 378 PetscFunctionBeginUser; 379 if (origField != 1) SETERRQ1(PetscObjectComm((PetscObject) dm), PETSC_ERR_ARG_WRONG, "Field %D should be 1 for pressure", origField); 380 funcs[field] = one; 381 { 382 PetscDS ds; 383 ierr = DMGetDS(dm, &ds);CHKERRQ(ierr); 384 ierr = PetscObjectViewFromOptions((PetscObject) ds, NULL, "-ds_view");CHKERRQ(ierr); 385 } 386 ierr = DMCreateGlobalVector(dm, &vec);CHKERRQ(ierr); 387 ierr = DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec);CHKERRQ(ierr); 388 ierr = VecNormalize(vec, NULL);CHKERRQ(ierr); 389 ierr = MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace);CHKERRQ(ierr); 390 ierr = VecDestroy(&vec);CHKERRQ(ierr); 391 /* New style for field null spaces */ 392 { 393 PetscObject pressure; 394 MatNullSpace nullspacePres; 395 396 ierr = DMGetField(dm, field, NULL, &pressure);CHKERRQ(ierr); 397 ierr = MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres);CHKERRQ(ierr); 398 ierr = PetscObjectCompose(pressure, "nullspace", (PetscObject) nullspacePres);CHKERRQ(ierr); 399 ierr = MatNullSpaceDestroy(&nullspacePres);CHKERRQ(ierr); 400 } 401 PetscFunctionReturn(0); 402 } 403 404 static PetscErrorCode SetupProblem(DM dm, PetscErrorCode (*setupEqn)(DM, AppCtx *), AppCtx *user) 405 { 406 DM cdm = dm; 407 PetscQuadrature q = NULL; 408 DMPolytopeType ct; 409 PetscBool simplex; 410 PetscInt dim, Nf = 2, f, Nc[2], cStart; 411 const char *name[2] = {"velocity", "pressure"}; 412 const char *prefix[2] = {"vel_", "pres_"}; 413 PetscErrorCode ierr; 414 415 PetscFunctionBegin; 416 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 417 ierr = DMPlexGetHeightStratum(dm, 0, &cStart, NULL);CHKERRQ(ierr); 418 ierr = DMPlexGetCellType(dm, cStart, &ct);CHKERRQ(ierr); 419 simplex = DMPolytopeTypeGetNumVertices(ct) == DMPolytopeTypeGetDim(ct)+1 ? PETSC_TRUE : PETSC_FALSE; 420 Nc[0] = dim; 421 Nc[1] = 1; 422 for (f = 0; f < Nf; ++f) { 423 PetscFE fe; 424 425 ierr = PetscFECreateDefault(PETSC_COMM_SELF, dim, Nc[f], simplex, prefix[f], -1, &fe);CHKERRQ(ierr); 426 ierr = PetscObjectSetName((PetscObject) fe, name[f]);CHKERRQ(ierr); 427 if (!q) {ierr = PetscFEGetQuadrature(fe, &q);CHKERRQ(ierr);} 428 ierr = PetscFESetQuadrature(fe, q);CHKERRQ(ierr); 429 ierr = DMSetField(dm, f, NULL, (PetscObject) fe);CHKERRQ(ierr); 430 ierr = PetscFEDestroy(&fe);CHKERRQ(ierr); 431 } 432 ierr = DMCreateDS(dm);CHKERRQ(ierr); 433 ierr = (*setupEqn)(dm, user);CHKERRQ(ierr); 434 while (cdm) { 435 ierr = DMCopyDisc(dm, cdm);CHKERRQ(ierr); 436 ierr = DMSetNullSpaceConstructor(cdm, 1, CreatePressureNullSpace);CHKERRQ(ierr); 437 ierr = DMGetCoarseDM(cdm, &cdm);CHKERRQ(ierr); 438 } 439 PetscFunctionReturn(0); 440 } 441 442 int main(int argc, char **argv) 443 { 444 SNES snes; 445 DM dm; 446 Vec u; 447 AppCtx user; 448 PetscErrorCode ierr; 449 450 ierr = PetscInitialize(&argc, &argv, NULL, help);if (ierr) return ierr; 451 ierr = ProcessOptions(PETSC_COMM_WORLD, &user);CHKERRQ(ierr); 452 ierr = CreateMesh(PETSC_COMM_WORLD, &user, &dm);CHKERRQ(ierr); 453 ierr = SNESCreate(PetscObjectComm((PetscObject) dm), &snes);CHKERRQ(ierr); 454 ierr = SNESSetDM(snes, dm);CHKERRQ(ierr); 455 ierr = DMSetApplicationContext(dm, &user);CHKERRQ(ierr); 456 457 ierr = SetupParameters(PETSC_COMM_WORLD, &user);CHKERRQ(ierr); 458 ierr = SetupProblem(dm, SetupEqn, &user);CHKERRQ(ierr); 459 ierr = DMPlexCreateClosureIndex(dm, NULL);CHKERRQ(ierr); 460 461 ierr = DMCreateGlobalVector(dm, &u);CHKERRQ(ierr); 462 ierr = DMPlexSetSNESLocalFEM(dm, &user, &user, &user);CHKERRQ(ierr); 463 ierr = SNESSetFromOptions(snes);CHKERRQ(ierr); 464 ierr = DMSNESCheckFromOptions(snes, u);CHKERRQ(ierr); 465 ierr = PetscObjectSetName((PetscObject) u, "Solution");CHKERRQ(ierr); 466 { 467 Mat J; 468 MatNullSpace sp; 469 470 ierr = SNESSetUp(snes);CHKERRQ(ierr); 471 ierr = CreatePressureNullSpace(dm, 1, 1, &sp);CHKERRQ(ierr); 472 ierr = SNESGetJacobian(snes, &J, NULL, NULL, NULL);CHKERRQ(ierr); 473 ierr = MatSetNullSpace(J, sp);CHKERRQ(ierr); 474 ierr = MatNullSpaceDestroy(&sp);CHKERRQ(ierr); 475 ierr = PetscObjectSetName((PetscObject) J, "Jacobian");CHKERRQ(ierr); 476 ierr = MatViewFromOptions(J, NULL, "-J_view");CHKERRQ(ierr); 477 } 478 ierr = SNESSolve(snes, NULL, u);CHKERRQ(ierr); 479 480 ierr = VecDestroy(&u);CHKERRQ(ierr); 481 ierr = SNESDestroy(&snes);CHKERRQ(ierr); 482 ierr = DMDestroy(&dm);CHKERRQ(ierr); 483 ierr = PetscBagDestroy(&user.bag);CHKERRQ(ierr); 484 ierr = PetscFinalize(); 485 return ierr; 486 } 487 /*TEST 488 489 test: 490 suffix: 2d_p2_p1_check 491 requires: triangle 492 args: -sol quadratic -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 493 494 test: 495 suffix: 2d_p2_p1_check_parallel 496 nsize: {{2 3 5}} 497 requires: triangle 498 args: -sol quadratic -dm_refine 2 -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 499 500 test: 501 suffix: 3d_p2_p1_check 502 requires: ctetgen 503 args: -sol quadratic -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 504 505 test: 506 suffix: 3d_p2_p1_check_parallel 507 nsize: {{2 3 5}} 508 requires: ctetgen 509 args: -sol quadratic -dm_refine 2 -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 510 511 test: 512 suffix: 2d_p2_p1_conv 513 requires: triangle 514 # Using -dm_refine 3 gives L_2 convergence rate: [3.0, 2.1] 515 args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \ 516 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \ 517 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \ 518 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu 519 520 test: 521 suffix: 2d_p2_p1_conv_gamg 522 requires: triangle 523 args: -sol trig -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \ 524 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \ 525 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd 526 527 test: 528 suffix: 3d_p2_p1_conv 529 requires: ctetgen !single 530 # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.8] 531 args: -sol trig -dm_plex_box_dim 3 -dm_refine 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \ 532 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \ 533 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \ 534 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu 535 536 test: 537 suffix: 2d_q2_q1_check 538 args: -sol quadratic -dm_plex_box_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 539 540 test: 541 suffix: 3d_q2_q1_check 542 args: -sol quadratic -dm_plex_box_simplex 0 -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 543 544 test: 545 suffix: 2d_q2_q1_conv 546 # Using -dm_refine 3 -convest_num_refine 1 gives L_2 convergence rate: [3.0, 2.1] 547 args: -sol trig -dm_plex_box_simplex 0 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 -ksp_error_if_not_converged \ 548 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \ 549 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \ 550 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu 551 552 test: 553 suffix: 3d_q2_q1_conv 554 requires: !single 555 # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [2.8, 2.4] 556 args: -sol trig -dm_plex_box_simplex 0 -dm_plex_box_dim 3 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_convergence_estimate -convest_num_refine 1 \ 557 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \ 558 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \ 559 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu 560 561 test: 562 suffix: 2d_p3_p2_check 563 requires: triangle 564 args: -sol quadratic -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001 565 566 test: 567 suffix: 3d_p3_p2_check 568 requires: ctetgen !single 569 args: -sol quadratic -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -dmsnes_check 0.0001 570 571 test: 572 suffix: 2d_p3_p2_conv 573 requires: triangle 574 # Using -dm_refine 2 gives L_2 convergence rate: [3.8, 3.0] 575 args: -sol trig -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 -ksp_error_if_not_converged \ 576 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \ 577 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \ 578 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu 579 580 test: 581 suffix: 3d_p3_p2_conv 582 requires: ctetgen long_runtime 583 # Using -dm_refine 1 -convest_num_refine 2 gives L_2 convergence rate: [3.6, 3.9] 584 args: -sol trig -dm_plex_box_dim 3 -dm_refine 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 -snes_convergence_estimate -convest_num_refine 2 \ 585 -ksp_atol 1e-10 -ksp_error_if_not_converged -pc_use_amat \ 586 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition a11 -pc_fieldsplit_off_diag_use_amat \ 587 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type lu 588 589 test: 590 suffix: 2d_q1_p0_conv 591 requires: !single 592 # Using -dm_refine 3 gives L_2 convergence rate: [1.9, 1.0] 593 args: -sol quadratic -dm_plex_box_simplex 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 2 \ 594 -ksp_atol 1e-10 -ksp_error_if_not_converged -petscds_jac_pre 0 \ 595 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \ 596 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd 597 598 test: 599 suffix: 3d_q1_p0_conv 600 requires: !single 601 # Using -dm_refine 2 -convest_num_refine 2 gives L_2 convergence rate: [1.7, 1.0] 602 args: -sol quadratic -dm_plex_box_simplex 0 -dm_plex_box_dim 3 -dm_refine 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -snes_convergence_estimate -convest_num_refine 1 \ 603 -ksp_atol 1e-10 -ksp_error_if_not_converged -petscds_jac_pre 0 \ 604 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_schur_precondition full \ 605 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_levels_pc_type jacobi -fieldsplit_pressure_mg_coarse_pc_type svd 606 607 # Stokes preconditioners 608 # Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix} 609 test: 610 suffix: 2d_p2_p1_block_diagonal 611 requires: triangle 612 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 613 -snes_error_if_not_converged \ 614 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -ksp_error_if_not_converged \ 615 -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi 616 # Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix} 617 test: 618 suffix: 2d_p2_p1_block_triangular 619 requires: triangle 620 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 621 -snes_error_if_not_converged \ 622 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \ 623 -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi 624 # Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} 625 test: 626 suffix: 2d_p2_p1_schur_diagonal 627 requires: triangle 628 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \ 629 -snes_error_if_not_converged \ 630 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \ 631 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -pc_fieldsplit_off_diag_use_amat \ 632 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi 633 # Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix} 634 test: 635 suffix: 2d_p2_p1_schur_upper 636 requires: triangle 637 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -dmsnes_check 0.0001 \ 638 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \ 639 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -pc_fieldsplit_off_diag_use_amat \ 640 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi 641 # Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix} 642 test: 643 suffix: 2d_p2_p1_schur_lower 644 requires: triangle 645 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \ 646 -snes_error_if_not_converged \ 647 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \ 648 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -pc_fieldsplit_off_diag_use_amat \ 649 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi 650 # Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix} 651 test: 652 suffix: 2d_p2_p1_schur_full 653 requires: triangle 654 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \ 655 -snes_error_if_not_converged \ 656 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged -pc_use_amat \ 657 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -pc_fieldsplit_off_diag_use_amat \ 658 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi 659 # Full Schur + Velocity GMG 660 test: 661 suffix: 2d_p2_p1_gmg_vcycle 662 requires: triangle 663 args: -sol quadratic -dm_refine_hierarchy 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \ 664 -snes_error_if_not_converged \ 665 -ksp_type fgmres -ksp_atol 1e-9 -ksp_error_if_not_converged -pc_use_amat \ 666 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_fact_type full -pc_fieldsplit_off_diag_use_amat \ 667 -fieldsplit_velocity_pc_type mg -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type gamg -fieldsplit_pressure_mg_coarse_pc_type svd 668 # SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix} 669 test: 670 suffix: 2d_p2_p1_simple 671 requires: triangle 672 args: -sol quadratic -dm_refine 2 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 673 -snes_error_if_not_converged \ 674 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \ 675 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \ 676 -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi \ 677 -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi 678 # FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code) 679 test: 680 suffix: 2d_p2_p1_fetidp 681 requires: triangle mumps 682 nsize: 5 683 args: -sol quadratic -dm_refine 2 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 684 -snes_error_if_not_converged \ 685 -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \ 686 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \ 687 -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \ 688 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly 689 test: 690 suffix: 2d_q2_q1_fetidp 691 requires: mumps 692 nsize: 5 693 args: -sol quadratic -dm_plex_box_simplex 0 -dm_refine 2 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 694 -snes_error_if_not_converged \ 695 -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \ 696 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \ 697 -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none \ 698 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -fetidp_fieldsplit_lag_ksp_type preonly 699 test: 700 suffix: 3d_p2_p1_fetidp 701 requires: ctetgen mumps suitesparse 702 nsize: 5 703 args: -sol quadratic -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 704 -snes_error_if_not_converged \ 705 -ksp_type fetidp -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \ 706 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \ 707 -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none \ 708 -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat \ 709 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \ 710 -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_sub_schurs_mat_solver_type mumps -fetidp_bddc_sub_schurs_mat_mumps_icntl_14 100000 \ 711 -fetidp_bddelta_pc_factor_mat_ordering_type external \ 712 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \ 713 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external 714 test: 715 suffix: 3d_q2_q1_fetidp 716 requires: suitesparse 717 nsize: 5 718 args: -sol quadratic -dm_plex_box_simplex 0 -dm_plex_box_dim 3 -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 719 -snes_error_if_not_converged \ 720 -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \ 721 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg \ 722 -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none \ 723 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky \ 724 -fetidp_bddc_pc_bddc_symmetric -fetidp_fieldsplit_lag_ksp_type preonly \ 725 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack \ 726 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_ordering_type external -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_ordering_type external 727 # BDDC solvers (these solvers are quite inefficient, they are here to exercise the code) 728 test: 729 suffix: 2d_p2_p1_bddc 730 nsize: 2 731 requires: triangle !single 732 args: -sol quadratic -dm_plex_box_faces 2,2,2 -dm_refine 1 -dm_mat_type is -dm_distribute -petscpartitioner_type simple -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 733 -snes_error_if_not_converged \ 734 -ksp_type gmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-8 -ksp_error_if_not_converged \ 735 -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd 736 # Vanka 737 test: 738 suffix: 2d_q1_p0_vanka 739 requires: double !complex 740 args: -sol quadratic -dm_plex_box_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \ 741 -snes_rtol 1.0e-4 \ 742 -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \ 743 -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \ 744 -sub_ksp_type preonly -sub_pc_type lu 745 test: 746 suffix: 2d_q1_p0_vanka_denseinv 747 requires: double !complex 748 args: -sol quadratic -dm_plex_box_simplex 0 -dm_refine 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \ 749 -snes_rtol 1.0e-4 \ 750 -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \ 751 -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \ 752 -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense 753 # Vanka smoother 754 test: 755 suffix: 2d_q1_p0_gmg_vanka 756 requires: double !complex 757 args: -sol quadratic -dm_plex_box_simplex 0 -dm_refine_hierarchy 2 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \ 758 -snes_rtol 1.0e-4 \ 759 -ksp_type fgmres -ksp_atol 1e-5 -ksp_error_if_not_converged \ 760 -pc_type mg \ 761 -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 \ 762 -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \ 763 -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \ 764 -mg_coarse_pc_type svd 765 766 TEST*/ 767