1 static char help[] = "Stokes Problem in 2d and 3d with simplicial finite elements.\n\ 2 We solve the Stokes problem in a rectangular\n\ 3 domain, using a parallel unstructured mesh (DMPLEX) to discretize it.\n\n\n"; 4 5 /* 6 The isoviscous Stokes problem, which we discretize using the finite 7 element method on an unstructured mesh. The weak form equations are 8 9 < \nabla v, \nabla u + {\nabla u}^T > - < \nabla\cdot v, p > + < v, f > = 0 10 < q, \nabla\cdot u > = 0 11 12 Viewing: 13 14 To produce nice output, use 15 16 -dm_refine 3 -show_error -dm_view hdf5:sol1.h5 -error_vec_view hdf5:sol1.h5::append -sol_vec_view hdf5:sol1.h5::append -exact_vec_view hdf5:sol1.h5::append 17 18 You can get a LaTeX view of the mesh, with point numbering using 19 20 -dm_view :mesh.tex:ascii_latex -dm_plex_view_scale 8.0 21 22 The data layout can be viewed using 23 24 -dm_petscsection_view 25 26 Lots of information about the FEM assembly can be printed using 27 28 -dm_plex_print_fem 2 29 30 Field Data: 31 32 DMPLEX data is organized by point, and the closure operation just stacks up the 33 data from each sieve point in the closure. Thus, for a P_2-P_1 Stokes element, we 34 have 35 36 cl{e} = {f e_0 e_1 e_2 v_0 v_1 v_2} 37 x = [u_{e_0} v_{e_0} u_{e_1} v_{e_1} u_{e_2} v_{e_2} u_{v_0} v_{v_0} p_{v_0} u_{v_1} v_{v_1} p_{v_1} u_{v_2} v_{v_2} p_{v_2}] 38 39 The problem here is that we would like to loop over each field separately for 40 integration. Therefore, the closure visitor in DMPlexVecGetClosure() reorders 41 the data so that each field is contiguous 42 43 x' = [u_{e_0} v_{e_0} u_{e_1} v_{e_1} u_{e_2} v_{e_2} u_{v_0} v_{v_0} u_{v_1} v_{v_1} u_{v_2} v_{v_2} p_{v_0} p_{v_1} p_{v_2}] 44 45 Likewise, DMPlexVecSetClosure() takes data partitioned by field, and correctly 46 puts it into the Sieve ordering. 47 48 TODO: 49 - Update FETI test output 50 - Reorder mesh 51 - Check the q1-p0 Vanka domains are correct (I think its correct) 52 - Check scaling of iterates, right now it is bad 53 - Check the q2-q1 domains since convergence is bad 54 - Ask Patrick about domains 55 - Plot residual by fields after each smoother iterate 56 - Get Diskin checks going 57 */ 58 59 #include <petscdmplex.h> 60 #include <petscsnes.h> 61 #include <petscds.h> 62 63 PetscInt spatialDim = 0; 64 65 typedef enum {NEUMANN, DIRICHLET, NUM_BC_TYPES} BCType; 66 const char *bcTypes[NUM_BC_TYPES+1] = {"neumann", "dirichlet", "unknown"}; 67 typedef enum {RUN_FULL, RUN_TEST, NUM_RUN_TYPES} RunType; 68 const char *runTypes[NUM_RUN_TYPES+1] = {"full", "test", "unknown"}; 69 typedef enum {SOL_QUADRATIC, SOL_CUBIC, SOL_TRIG, NUM_SOL_TYPES} SolType; 70 const char *solTypes[NUM_SOL_TYPES+1] = {"quadratic", "cubic", "trig", "unknown"}; 71 72 typedef struct { 73 PetscInt debug; /* The debugging level */ 74 RunType runType; /* Whether to run tests, or solve the full problem */ 75 PetscLogEvent createMeshEvent; 76 PetscBool showInitial, showError; 77 /* Domain and mesh definition */ 78 PetscInt dim; /* The topological mesh dimension */ 79 PetscBool interpolate; /* Generate intermediate mesh elements */ 80 PetscBool simplex; /* Use simplices or tensor product cells */ 81 PetscInt cells[3]; /* The initial domain division */ 82 PetscReal refinementLimit; /* The largest allowable cell volume */ 83 PetscBool testPartition; /* Use a fixed partitioning for testing */ 84 /* Problem definition */ 85 BCType bcType; 86 SolType solType; 87 PetscErrorCode (**exactFuncs)(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx); 88 } AppCtx; 89 90 PetscErrorCode zero_scalar(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx) 91 { 92 u[0] = 0.0; 93 return 0; 94 } 95 PetscErrorCode zero_vector(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx) 96 { 97 PetscInt d; 98 for (d = 0; d < spatialDim; ++d) u[d] = 0.0; 99 return 0; 100 } 101 102 /* 103 In 2D we use exact solution: 104 105 u = x^2 + y^2 106 v = 2 x^2 - 2xy 107 p = x + y - 1 108 f_x = f_y = 3 109 110 so that 111 112 -\Delta u + \nabla p + f = <-4, -4> + <1, 1> + <3, 3> = 0 113 \nabla \cdot u = 2x - 2x = 0 114 */ 115 PetscErrorCode quadratic_u_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx) 116 { 117 u[0] = x[0]*x[0] + x[1]*x[1]; 118 u[1] = 2.0*x[0]*x[0] - 2.0*x[0]*x[1]; 119 return 0; 120 } 121 122 PetscErrorCode linear_p_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx) 123 { 124 *p = x[0] + x[1] - 1.0; 125 return 0; 126 } 127 PetscErrorCode constant_p(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx) 128 { 129 *p = 1.0; 130 return 0; 131 } 132 133 /* 134 In 2D we use exact solution: 135 136 u = x^3 + y^3 137 v = 2 x^3 - 3 x^2 y 138 p = 3/2 x^2 + 3/2 y^2 - 1 139 f_x = 6 (x + y) 140 f_y = 12 x - 3 y 141 142 so that 143 144 -\Delta u + \nabla p + f = <-6 x - 6 y, -12 x + 6 y> + <3 x, 3 y> + <6 (x + y), 12 x - 6 y> = 0 145 \nabla \cdot u = 3 x^2 - 3 x^2 = 0 146 */ 147 PetscErrorCode cubic_u_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx) 148 { 149 u[0] = x[0]*x[0]*x[0] + x[1]*x[1]*x[1]; 150 u[1] = 2.0*x[0]*x[0]*x[0] - 3.0*x[0]*x[0]*x[1]; 151 return 0; 152 } 153 154 PetscErrorCode quadratic_p_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx) 155 { 156 *p = 3.0*x[0]*x[0]/2.0 + 3.0*x[1]*x[1]/2.0 - 1.0; 157 return 0; 158 } 159 160 /* 161 In 2D we use exact solution: 162 163 u = sin(n pi x) + y^2 164 v = -sin(n pi y) 165 p = 3/2 x^2 + 3/2 y^2 - 1 166 f_x = 4 - 3x - n^2 pi^2 sin (n pi x) 167 f_y = - 3y + n^2 pi^2 sin(n pi y) 168 169 so that 170 171 -\Delta u + \nabla p + f = <n^2 pi^2 sin (n pi x) - 4, -n^2 pi^2 sin(n pi y)> + <3 x, 3 y> + <4 - 3x - n^2 pi^2 sin (n pi x), -3y + n^2 pi^2 sin(n pi y)> = 0 172 \nabla \cdot u = n pi cos(n pi x) - n pi cos(n pi y) = 0 173 */ 174 PetscErrorCode trig_u_2d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx) 175 { 176 const PetscReal n = 1.0; 177 178 u[0] = PetscSinReal(n*PETSC_PI*x[0]) + x[1]*x[1]; 179 u[1] = -PetscSinReal(n*PETSC_PI*x[1]); 180 return 0; 181 } 182 183 void f0_quadratic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, 184 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 185 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 186 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) 187 { 188 PetscInt c; 189 for (c = 0; c < dim; ++c) f0[c] = 3.0; 190 } 191 192 void f0_cubic_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, 193 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 194 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 195 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) 196 { 197 f0[0] = 3.0*x[0] + 6.0*x[1]; 198 f0[1] = 12.0*x[0] - 9.0*x[1]; 199 } 200 201 void f0_trig_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, 202 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 203 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 204 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) 205 { 206 const PetscReal n = 1.0; 207 208 f0[0] = 4.0 - 3.0*x[0] - PetscSqr(n*PETSC_PI)*PetscSinReal(n*PETSC_PI*x[0]); 209 f0[1] = -3.0*x[1] + PetscSqr(n*PETSC_PI)*PetscSinReal(n*PETSC_PI*x[1]); 210 } 211 212 /* gradU[comp*dim+d] = {u_x, u_y, v_x, v_y} or {u_x, u_y, u_z, v_x, v_y, v_z, w_x, w_y, w_z} 213 u[Ncomp] = {p} */ 214 void f1_u(PetscInt dim, PetscInt Nf, PetscInt NfAux, 215 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 216 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 217 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[]) 218 { 219 const PetscInt Ncomp = dim; 220 PetscInt comp, d; 221 222 for (comp = 0; comp < Ncomp; ++comp) { 223 for (d = 0; d < dim; ++d) { 224 /* f1[comp*dim+d] = 0.5*(gradU[comp*dim+d] + gradU[d*dim+comp]); */ 225 f1[comp*dim+d] = u_x[comp*dim+d]; 226 } 227 f1[comp*dim+comp] -= u[Ncomp]; 228 } 229 } 230 231 /* gradU[comp*dim+d] = {u_x, u_y, v_x, v_y} or {u_x, u_y, u_z, v_x, v_y, v_z, w_x, w_y, w_z} */ 232 void f0_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, 233 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 234 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 235 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f0[]) 236 { 237 PetscInt d; 238 for (d = 0, f0[0] = 0.0; d < dim; ++d) f0[0] += u_x[d*dim+d]; 239 } 240 241 void f1_p(PetscInt dim, PetscInt Nf, PetscInt NfAux, 242 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 243 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 244 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f1[]) 245 { 246 PetscInt d; 247 for (d = 0; d < dim; ++d) f1[d] = 0.0; 248 } 249 250 /* < q, \nabla\cdot u > 251 NcompI = 1, NcompJ = dim */ 252 void g1_pu(PetscInt dim, PetscInt Nf, PetscInt NfAux, 253 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 254 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 255 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g1[]) 256 { 257 PetscInt d; 258 for (d = 0; d < dim; ++d) g1[d*dim+d] = 1.0; /* \frac{\partial\phi^{u_d}}{\partial x_d} */ 259 } 260 261 /* -< \nabla\cdot v, p > 262 NcompI = dim, NcompJ = 1 */ 263 void g2_up(PetscInt dim, PetscInt Nf, PetscInt NfAux, 264 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 265 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 266 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g2[]) 267 { 268 PetscInt d; 269 for (d = 0; d < dim; ++d) g2[d*dim+d] = -1.0; /* \frac{\partial\psi^{u_d}}{\partial x_d} */ 270 } 271 272 /* < \nabla v, \nabla u + {\nabla u}^T > 273 This just gives \nabla u, give the perdiagonal for the transpose */ 274 void g3_uu(PetscInt dim, PetscInt Nf, PetscInt NfAux, 275 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 276 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 277 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g3[]) 278 { 279 const PetscInt Ncomp = dim; 280 PetscInt compI, d; 281 282 for (compI = 0; compI < Ncomp; ++compI) { 283 for (d = 0; d < dim; ++d) { 284 g3[((compI*Ncomp+compI)*dim+d)*dim+d] = 1.0; 285 } 286 } 287 } 288 289 /* 290 In 3D we use exact solution: 291 292 u = x^2 + y^2 293 v = y^2 + z^2 294 w = x^2 + y^2 - 2(x+y)z 295 p = x + y + z - 3/2 296 f_x = f_y = f_z = 3 297 298 so that 299 300 -\Delta u + \nabla p + f = <-4, -4, -4> + <1, 1, 1> + <3, 3, 3> = 0 301 \nabla \cdot u = 2x + 2y - 2(x + y) = 0 302 */ 303 PetscErrorCode quadratic_u_3d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *ctx) 304 { 305 u[0] = x[0]*x[0] + x[1]*x[1]; 306 u[1] = x[1]*x[1] + x[2]*x[2]; 307 u[2] = x[0]*x[0] + x[1]*x[1] - 2.0*(x[0] + x[1])*x[2]; 308 return 0; 309 } 310 311 PetscErrorCode linear_p_3d(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *p, void *ctx) 312 { 313 *p = x[0] + x[1] + x[2] - 1.5; 314 return 0; 315 } 316 317 void pressure(PetscInt dim, PetscInt Nf, PetscInt NfAux, 318 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 319 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 320 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar p[]) 321 { 322 p[0] = u[uOff[1]]; 323 } 324 325 PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options) 326 { 327 PetscInt bc, run, sol, n; 328 PetscErrorCode ierr; 329 330 PetscFunctionBeginUser; 331 options->debug = 0; 332 options->runType = RUN_FULL; 333 options->dim = 2; 334 options->interpolate = PETSC_FALSE; 335 options->simplex = PETSC_TRUE; 336 options->cells[0] = 3; 337 options->cells[1] = 3; 338 options->cells[2] = 3; 339 options->refinementLimit = 0.0; 340 options->testPartition = PETSC_FALSE; 341 options->bcType = DIRICHLET; 342 options->solType = SOL_QUADRATIC; 343 options->showInitial = PETSC_FALSE; 344 options->showError = PETSC_FALSE; 345 346 ierr = PetscOptionsBegin(comm, "", "Stokes Problem Options", "DMPLEX");CHKERRQ(ierr); 347 ierr = PetscOptionsInt("-debug", "The debugging level", "ex62.c", options->debug, &options->debug, NULL);CHKERRQ(ierr); 348 run = options->runType; 349 ierr = PetscOptionsEList("-run_type", "The run type", "ex62.c", runTypes, NUM_RUN_TYPES, runTypes[options->runType], &run, NULL);CHKERRQ(ierr); 350 options->runType = (RunType) run; 351 ierr = PetscOptionsInt("-dim", "The topological mesh dimension", "ex62.c", options->dim, &options->dim, NULL);CHKERRQ(ierr); 352 spatialDim = options->dim; 353 ierr = PetscOptionsBool("-interpolate", "Generate intermediate mesh elements", "ex62.c", options->interpolate, &options->interpolate, NULL);CHKERRQ(ierr); 354 ierr = PetscOptionsBool("-simplex", "Use simplices or tensor product cells", "ex62.c", options->simplex, &options->simplex, NULL);CHKERRQ(ierr); 355 if (options->simplex) { 356 options->cells[0] = 4 - options->dim; 357 options->cells[1] = 4 - options->dim; 358 options->cells[2] = 4 - options->dim; 359 } 360 n = 3; 361 ierr = PetscOptionsIntArray("-cells", "The initial mesh division", "ex62.c", options->cells, &n, NULL);CHKERRQ(ierr); 362 ierr = PetscOptionsReal("-refinement_limit", "The largest allowable cell volume", "ex62.c", options->refinementLimit, &options->refinementLimit, NULL);CHKERRQ(ierr); 363 ierr = PetscOptionsBool("-test_partition", "Use a fixed partition for testing", "ex62.c", options->testPartition, &options->testPartition, NULL);CHKERRQ(ierr); 364 bc = options->bcType; 365 ierr = PetscOptionsEList("-bc_type","Type of boundary condition","ex62.c", bcTypes, NUM_BC_TYPES, bcTypes[options->bcType], &bc, NULL);CHKERRQ(ierr); 366 options->bcType = (BCType) bc; 367 sol = options->solType; 368 ierr = PetscOptionsEList("-sol_type", "The solution type", "ex62.c", solTypes, NUM_SOL_TYPES, solTypes[options->solType], &sol, NULL);CHKERRQ(ierr); 369 options->solType = (SolType) sol; 370 ierr = PetscOptionsBool("-show_initial", "Output the initial guess for verification", "ex62.c", options->showInitial, &options->showInitial, NULL);CHKERRQ(ierr); 371 ierr = PetscOptionsBool("-show_error", "Output the error for verification", "ex62.c", options->showError, &options->showError, NULL);CHKERRQ(ierr); 372 ierr = PetscOptionsEnd(); 373 374 ierr = PetscLogEventRegister("CreateMesh", DM_CLASSID, &options->createMeshEvent);CHKERRQ(ierr); 375 PetscFunctionReturn(0); 376 } 377 378 PetscErrorCode DMVecViewLocal(DM dm, Vec v) 379 { 380 Vec lv; 381 PetscErrorCode ierr; 382 383 PetscFunctionBeginUser; 384 ierr = DMGetLocalVector(dm, &lv);CHKERRQ(ierr); 385 ierr = DMGlobalToLocalBegin(dm, v, INSERT_VALUES, lv);CHKERRQ(ierr); 386 ierr = DMGlobalToLocalEnd(dm, v, INSERT_VALUES, lv);CHKERRQ(ierr); 387 ierr = DMPrintLocalVec(dm, "Local function", 0.0, lv);CHKERRQ(ierr); 388 ierr = DMRestoreLocalVector(dm, &lv);CHKERRQ(ierr); 389 PetscFunctionReturn(0); 390 } 391 392 PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm) 393 { 394 PetscInt dim = user->dim; 395 PetscBool interpolate = user->interpolate; 396 PetscReal refinementLimit = user->refinementLimit; 397 PetscErrorCode ierr; 398 399 PetscFunctionBeginUser; 400 ierr = PetscLogEventBegin(user->createMeshEvent,0,0,0,0);CHKERRQ(ierr); 401 ierr = DMPlexCreateBoxMesh(comm, dim, user->simplex, user->cells, NULL, NULL, NULL, interpolate, dm);CHKERRQ(ierr); 402 { 403 DM refinedMesh = NULL; 404 DM distributedMesh = NULL; 405 406 /* Refine mesh using a volume constraint */ 407 ierr = DMPlexSetRefinementLimit(*dm, refinementLimit);CHKERRQ(ierr); 408 if (user->simplex) {ierr = DMRefine(*dm, comm, &refinedMesh);CHKERRQ(ierr);} 409 if (refinedMesh) { 410 ierr = DMDestroy(dm);CHKERRQ(ierr); 411 *dm = refinedMesh; 412 } 413 /* Setup test partitioning */ 414 if (user->testPartition) { 415 PetscInt triSizes_n2[2] = {4, 4}; 416 PetscInt triPoints_n2[8] = {3, 5, 6, 7, 0, 1, 2, 4}; 417 PetscInt triSizes_n3[3] = {2, 3, 3}; 418 PetscInt triPoints_n3[8] = {3, 5, 1, 6, 7, 0, 2, 4}; 419 PetscInt triSizes_n5[5] = {1, 2, 2, 1, 2}; 420 PetscInt triPoints_n5[8] = {3, 5, 6, 4, 7, 0, 1, 2}; 421 PetscInt triSizes_ref_n2[2] = {8, 8}; 422 PetscInt triPoints_ref_n2[16] = {1, 5, 6, 7, 10, 11, 14, 15, 0, 2, 3, 4, 8, 9, 12, 13}; 423 PetscInt triSizes_ref_n3[3] = {5, 6, 5}; 424 PetscInt triPoints_ref_n3[16] = {1, 7, 10, 14, 15, 2, 6, 8, 11, 12, 13, 0, 3, 4, 5, 9}; 425 PetscInt triSizes_ref_n5[5] = {3, 4, 3, 3, 3}; 426 PetscInt triPoints_ref_n5[16] = {1, 7, 10, 2, 11, 13, 14, 5, 6, 15, 0, 8, 9, 3, 4, 12}; 427 PetscInt triSizes_ref_n5_d3[5] = {1, 1, 1, 1, 2}; 428 PetscInt triPoints_ref_n5_d3[6] = {0, 1, 2, 3, 4, 5}; 429 const PetscInt *sizes = NULL; 430 const PetscInt *points = NULL; 431 PetscPartitioner part; 432 PetscInt cEnd; 433 PetscMPIInt rank, size; 434 435 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 436 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 437 ierr = DMPlexGetHeightStratum(*dm, 0, NULL, &cEnd);CHKERRQ(ierr); 438 if (!rank) { 439 if (dim == 2 && user->simplex && size == 2 && cEnd == 8) { 440 sizes = triSizes_n2; points = triPoints_n2; 441 } else if (dim == 2 && user->simplex && size == 3 && cEnd == 8) { 442 sizes = triSizes_n3; points = triPoints_n3; 443 } else if (dim == 2 && user->simplex && size == 5 && cEnd == 8) { 444 sizes = triSizes_n5; points = triPoints_n5; 445 } else if (dim == 2 && user->simplex && size == 2 && cEnd == 16) { 446 sizes = triSizes_ref_n2; points = triPoints_ref_n2; 447 } else if (dim == 2 && user->simplex && size == 3 && cEnd == 16) { 448 sizes = triSizes_ref_n3; points = triPoints_ref_n3; 449 } else if (dim == 2 && user->simplex && size == 5 && cEnd == 16) { 450 sizes = triSizes_ref_n5; points = triPoints_ref_n5; 451 } else if (dim == 3 && user->simplex && size == 5 && cEnd == 6) { 452 sizes = triSizes_ref_n5_d3; points = triPoints_ref_n5_d3; 453 } else SETERRQ(comm, PETSC_ERR_ARG_WRONG, "No stored partition matching run parameters"); 454 } 455 ierr = DMPlexGetPartitioner(*dm, &part);CHKERRQ(ierr); 456 ierr = PetscPartitionerSetType(part, PETSCPARTITIONERSHELL);CHKERRQ(ierr); 457 ierr = PetscPartitionerShellSetPartition(part, size, sizes, points);CHKERRQ(ierr); 458 } else { 459 PetscPartitioner part; 460 461 ierr = DMPlexGetPartitioner(*dm, &part);CHKERRQ(ierr); 462 ierr = PetscPartitionerSetFromOptions(part);CHKERRQ(ierr); 463 } 464 /* Distribute mesh over processes */ 465 ierr = DMPlexDistribute(*dm, 0, NULL, &distributedMesh);CHKERRQ(ierr); 466 if (distributedMesh) { 467 ierr = DMDestroy(dm);CHKERRQ(ierr); 468 *dm = distributedMesh; 469 } 470 } 471 ierr = DMSetFromOptions(*dm);CHKERRQ(ierr); 472 ierr = DMViewFromOptions(*dm, NULL, "-dm_view");CHKERRQ(ierr); 473 ierr = PetscLogEventEnd(user->createMeshEvent,0,0,0,0);CHKERRQ(ierr); 474 PetscFunctionReturn(0); 475 } 476 477 PetscErrorCode SetupProblem(DM dm, AppCtx *user) 478 { 479 PetscDS prob; 480 const PetscInt id = 1; 481 PetscErrorCode ierr; 482 483 PetscFunctionBeginUser; 484 ierr = DMGetDS(dm, &prob);CHKERRQ(ierr); 485 switch (user->solType) { 486 case SOL_QUADRATIC: 487 switch (user->dim) { 488 case 2: 489 ierr = PetscDSSetResidual(prob, 0, f0_quadratic_u, f1_u);CHKERRQ(ierr); 490 user->exactFuncs[0] = quadratic_u_2d; 491 user->exactFuncs[1] = linear_p_2d; 492 break; 493 case 3: 494 ierr = PetscDSSetResidual(prob, 0, f0_quadratic_u, f1_u);CHKERRQ(ierr); 495 user->exactFuncs[0] = quadratic_u_3d; 496 user->exactFuncs[1] = linear_p_3d; 497 break; 498 default: SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Unsupported dimension %d for quadratic solution", user->dim); 499 } 500 break; 501 case SOL_CUBIC: 502 switch (user->dim) { 503 case 2: 504 ierr = PetscDSSetResidual(prob, 0, f0_cubic_u, f1_u);CHKERRQ(ierr); 505 user->exactFuncs[0] = cubic_u_2d; 506 user->exactFuncs[1] = quadratic_p_2d; 507 break; 508 default: SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Unsupported dimension %d for quadratic solution", user->dim); 509 } 510 break; 511 case SOL_TRIG: 512 switch (user->dim) { 513 case 2: 514 ierr = PetscDSSetResidual(prob, 0, f0_trig_u, f1_u);CHKERRQ(ierr); 515 user->exactFuncs[0] = trig_u_2d; 516 user->exactFuncs[1] = quadratic_p_2d; 517 break; 518 default: SETERRQ1(PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Unsupported dimension %d for trigonometric solution", user->dim); 519 } 520 break; 521 default: SETERRQ2(PetscObjectComm((PetscObject) prob), PETSC_ERR_ARG_WRONG, "Unsupported solution type: %s (%D)", solTypes[PetscMin(user->solType, NUM_SOL_TYPES)], user->solType); 522 } 523 ierr = PetscDSSetResidual(prob, 1, f0_p, f1_p);CHKERRQ(ierr); 524 ierr = PetscDSSetJacobian(prob, 0, 0, NULL, NULL, NULL, g3_uu);CHKERRQ(ierr); 525 ierr = PetscDSSetJacobian(prob, 0, 1, NULL, NULL, g2_up, NULL);CHKERRQ(ierr); 526 ierr = PetscDSSetJacobian(prob, 1, 0, NULL, g1_pu, NULL, NULL);CHKERRQ(ierr); 527 528 ierr = PetscDSAddBoundary(prob, user->bcType == DIRICHLET ? DM_BC_ESSENTIAL : DM_BC_NATURAL, "wall", user->bcType == NEUMANN ? "boundary" : "marker", 0, 0, NULL, (void (*)(void)) user->exactFuncs[0], 1, &id, user);CHKERRQ(ierr); 529 ierr = PetscDSSetExactSolution(prob, 0, user->exactFuncs[0], user);CHKERRQ(ierr); 530 ierr = PetscDSSetExactSolution(prob, 1, user->exactFuncs[1], user);CHKERRQ(ierr); 531 PetscFunctionReturn(0); 532 } 533 534 PetscErrorCode SetupDiscretization(DM dm, AppCtx *user) 535 { 536 DM cdm = dm; 537 const PetscInt dim = user->dim; 538 PetscFE fe[2]; 539 MPI_Comm comm; 540 PetscErrorCode ierr; 541 542 PetscFunctionBeginUser; 543 /* Create finite element */ 544 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 545 ierr = PetscFECreateDefault(comm, dim, dim, user->simplex, "vel_", PETSC_DEFAULT, &fe[0]);CHKERRQ(ierr); 546 ierr = PetscObjectSetName((PetscObject) fe[0], "velocity");CHKERRQ(ierr); 547 ierr = PetscFECreateDefault(comm, dim, 1, user->simplex, "pres_", PETSC_DEFAULT, &fe[1]);CHKERRQ(ierr); 548 ierr = PetscFECopyQuadrature(fe[0], fe[1]);CHKERRQ(ierr); 549 ierr = PetscObjectSetName((PetscObject) fe[1], "pressure");CHKERRQ(ierr); 550 /* Set discretization and boundary conditions for each mesh */ 551 ierr = DMSetField(dm, 0, NULL, (PetscObject) fe[0]);CHKERRQ(ierr); 552 ierr = DMSetField(dm, 1, NULL, (PetscObject) fe[1]);CHKERRQ(ierr); 553 ierr = DMCreateDS(dm);CHKERRQ(ierr); 554 ierr = SetupProblem(dm, user);CHKERRQ(ierr); 555 while (cdm) { 556 ierr = DMCopyDisc(dm, cdm);CHKERRQ(ierr); 557 ierr = DMGetCoarseDM(cdm, &cdm);CHKERRQ(ierr); 558 } 559 ierr = PetscFEDestroy(&fe[0]);CHKERRQ(ierr); 560 ierr = PetscFEDestroy(&fe[1]);CHKERRQ(ierr); 561 PetscFunctionReturn(0); 562 } 563 564 static PetscErrorCode CreatePressureNullSpace(DM dm, PetscInt dummy, MatNullSpace *nullspace) 565 { 566 Vec vec; 567 PetscErrorCode (*funcs[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero_vector, constant_p}; 568 PetscErrorCode ierr; 569 570 PetscFunctionBeginUser; 571 ierr = DMCreateGlobalVector(dm, &vec);CHKERRQ(ierr); 572 ierr = DMProjectFunction(dm, 0.0, funcs, NULL, INSERT_ALL_VALUES, vec);CHKERRQ(ierr); 573 ierr = VecNormalize(vec, NULL);CHKERRQ(ierr); 574 ierr = PetscObjectSetName((PetscObject) vec, "Pressure Null Space");CHKERRQ(ierr); 575 ierr = VecViewFromOptions(vec, NULL, "-pressure_nullspace_view");CHKERRQ(ierr); 576 ierr = MatNullSpaceCreate(PetscObjectComm((PetscObject)dm), PETSC_FALSE, 1, &vec, nullspace);CHKERRQ(ierr); 577 ierr = VecDestroy(&vec);CHKERRQ(ierr); 578 /* New style for field null spaces */ 579 { 580 PetscObject pressure; 581 MatNullSpace nullspacePres; 582 583 ierr = DMGetField(dm, 1, NULL, &pressure);CHKERRQ(ierr); 584 ierr = MatNullSpaceCreate(PetscObjectComm(pressure), PETSC_TRUE, 0, NULL, &nullspacePres);CHKERRQ(ierr); 585 ierr = PetscObjectCompose(pressure, "nullspace", (PetscObject) nullspacePres);CHKERRQ(ierr); 586 ierr = MatNullSpaceDestroy(&nullspacePres);CHKERRQ(ierr); 587 } 588 PetscFunctionReturn(0); 589 } 590 591 /* Add a vector in the nullspace to make the continuum integral 0. 592 593 If int(u) = a and int(n) = b, then int(u - a/b n) = a - a/b b = 0 594 */ 595 static PetscErrorCode CorrectDiscretePressure(DM dm, MatNullSpace nullspace, Vec u, AppCtx *user) 596 { 597 PetscDS prob; 598 const Vec *nullvecs; 599 PetscScalar pintd, intc[2], intn[2]; 600 MPI_Comm comm; 601 PetscErrorCode ierr; 602 603 PetscFunctionBeginUser; 604 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 605 ierr = DMGetDS(dm, &prob);CHKERRQ(ierr); 606 ierr = PetscDSSetObjective(prob, 1, pressure);CHKERRQ(ierr); 607 ierr = MatNullSpaceGetVecs(nullspace, NULL, NULL, &nullvecs);CHKERRQ(ierr); 608 ierr = VecDot(nullvecs[0], u, &pintd);CHKERRQ(ierr); 609 if (PetscAbsScalar(pintd) > 1.0e-10) SETERRQ1(comm, PETSC_ERR_ARG_WRONG, "Discrete integral of pressure: %g\n", (double) PetscRealPart(pintd)); 610 ierr = DMPlexComputeIntegralFEM(dm, nullvecs[0], intn, user);CHKERRQ(ierr); 611 ierr = DMPlexComputeIntegralFEM(dm, u, intc, user);CHKERRQ(ierr); 612 ierr = VecAXPY(u, -intc[1]/intn[1], nullvecs[0]);CHKERRQ(ierr); 613 ierr = DMPlexComputeIntegralFEM(dm, u, intc, user);CHKERRQ(ierr); 614 if (PetscAbsScalar(intc[1]) > 1.0e-10) SETERRQ1(comm, PETSC_ERR_ARG_WRONG, "Continuum integral of pressure after correction: %g\n", (double) PetscRealPart(intc[1])); 615 PetscFunctionReturn(0); 616 } 617 618 static PetscErrorCode SNESConvergenceCorrectPressure(SNES snes, PetscInt it, PetscReal xnorm, PetscReal gnorm, PetscReal f, SNESConvergedReason *reason, void *user) 619 { 620 PetscErrorCode ierr; 621 622 PetscFunctionBeginUser; 623 ierr = SNESConvergedDefault(snes, it, xnorm, gnorm, f, reason, user);CHKERRQ(ierr); 624 if (*reason > 0) { 625 DM dm; 626 Mat J; 627 Vec u; 628 MatNullSpace nullspace; 629 630 ierr = SNESGetDM(snes, &dm);CHKERRQ(ierr); 631 ierr = SNESGetSolution(snes, &u);CHKERRQ(ierr); 632 ierr = SNESGetJacobian(snes, &J, NULL, NULL, NULL);CHKERRQ(ierr); 633 ierr = MatGetNullSpace(J, &nullspace);CHKERRQ(ierr); 634 ierr = CorrectDiscretePressure(dm, nullspace, u, (AppCtx *) user);CHKERRQ(ierr); 635 } 636 PetscFunctionReturn(0); 637 } 638 639 int main(int argc, char **argv) 640 { 641 SNES snes; /* nonlinear solver */ 642 DM dm; /* problem definition */ 643 Vec u, r; /* solution and residual */ 644 AppCtx user; /* user-defined work context */ 645 PetscReal error = 0.0; /* L_2 error in the solution */ 646 PetscReal ferrors[2]; 647 PetscErrorCode ierr; 648 649 ierr = PetscInitialize(&argc, &argv, NULL,help);if (ierr) return ierr; 650 ierr = ProcessOptions(PETSC_COMM_WORLD, &user);CHKERRQ(ierr); 651 ierr = SNESCreate(PETSC_COMM_WORLD, &snes);CHKERRQ(ierr); 652 ierr = CreateMesh(PETSC_COMM_WORLD, &user, &dm);CHKERRQ(ierr); 653 ierr = SNESSetDM(snes, dm);CHKERRQ(ierr); 654 ierr = DMSetApplicationContext(dm, &user);CHKERRQ(ierr); 655 656 ierr = PetscMalloc(2 * sizeof(void (*)(const PetscReal[], PetscScalar *, void *)), &user.exactFuncs);CHKERRQ(ierr); 657 ierr = SetupDiscretization(dm, &user);CHKERRQ(ierr); 658 ierr = DMPlexCreateClosureIndex(dm, NULL);CHKERRQ(ierr); 659 660 ierr = DMCreateGlobalVector(dm, &u);CHKERRQ(ierr); 661 ierr = VecDuplicate(u, &r);CHKERRQ(ierr); 662 663 ierr = DMSetNullSpaceConstructor(dm, 2, CreatePressureNullSpace);CHKERRQ(ierr); 664 ierr = SNESSetConvergenceTest(snes, SNESConvergenceCorrectPressure, &user, NULL);CHKERRQ(ierr); 665 666 ierr = DMPlexSetSNESLocalFEM(dm,&user,&user,&user);CHKERRQ(ierr); 667 668 ierr = SNESSetFromOptions(snes);CHKERRQ(ierr); 669 670 ierr = DMProjectFunction(dm, 0.0, user.exactFuncs, NULL, INSERT_ALL_VALUES, u);CHKERRQ(ierr); 671 ierr = PetscObjectSetName((PetscObject) u, "Exact Solution");CHKERRQ(ierr); 672 ierr = VecViewFromOptions(u, NULL, "-exact_vec_view");CHKERRQ(ierr); 673 ierr = PetscObjectSetName((PetscObject) u, "Solution");CHKERRQ(ierr); 674 if (user.showInitial) {ierr = DMVecViewLocal(dm, u);CHKERRQ(ierr);} 675 ierr = PetscObjectSetName((PetscObject) u, "Solution");CHKERRQ(ierr); 676 if (user.runType == RUN_FULL) { 677 PetscErrorCode (*initialGuess[2])(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void* ctx) = {zero_vector, zero_scalar}; 678 679 ierr = DMProjectFunction(dm, 0.0, initialGuess, NULL, INSERT_VALUES, u);CHKERRQ(ierr); 680 if (user.debug) { 681 ierr = PetscPrintf(PETSC_COMM_WORLD, "Initial guess\n");CHKERRQ(ierr); 682 ierr = VecView(u, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 683 } 684 ierr = SNESSolve(snes, NULL, u);CHKERRQ(ierr); 685 ierr = DMComputeL2Diff(dm, 0.0, user.exactFuncs, NULL, u, &error);CHKERRQ(ierr); 686 ierr = DMComputeL2FieldDiff(dm, 0.0, user.exactFuncs, NULL, u, ferrors);CHKERRQ(ierr); 687 ierr = PetscPrintf(PETSC_COMM_WORLD, "L_2 Error: %g [%g, %g]\n", (double)error, (double)ferrors[0], (double)ferrors[1]);CHKERRQ(ierr); 688 if (user.showError) { 689 Vec r; 690 691 ierr = DMGetGlobalVector(dm, &r);CHKERRQ(ierr); 692 ierr = DMProjectFunction(dm, 0.0, user.exactFuncs, NULL, INSERT_ALL_VALUES, r);CHKERRQ(ierr); 693 ierr = VecAXPY(r, -1.0, u);CHKERRQ(ierr); 694 ierr = PetscObjectSetName((PetscObject) r, "Solution Error");CHKERRQ(ierr); 695 ierr = VecViewFromOptions(r, NULL, "-error_vec_view");CHKERRQ(ierr); 696 ierr = DMRestoreGlobalVector(dm, &r);CHKERRQ(ierr); 697 } 698 } else { 699 PetscReal res = 0.0; 700 701 /* Check discretization error */ 702 ierr = PetscPrintf(PETSC_COMM_WORLD, "Initial guess\n");CHKERRQ(ierr); 703 ierr = VecView(u, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 704 ierr = DMComputeL2Diff(dm, 0.0, user.exactFuncs, NULL, u, &error);CHKERRQ(ierr); 705 if (error >= 1.0e-11) {ierr = PetscPrintf(PETSC_COMM_WORLD, "L_2 Error: %g\n", (double)error);CHKERRQ(ierr);} 706 else {ierr = PetscPrintf(PETSC_COMM_WORLD, "L_2 Error: < 1.0e-11\n");CHKERRQ(ierr);} 707 /* Check residual */ 708 ierr = SNESComputeFunction(snes, u, r);CHKERRQ(ierr); 709 ierr = PetscPrintf(PETSC_COMM_WORLD, "Initial Residual\n");CHKERRQ(ierr); 710 ierr = VecChop(r, 1.0e-10);CHKERRQ(ierr); 711 ierr = VecView(r, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 712 ierr = VecNorm(r, NORM_2, &res);CHKERRQ(ierr); 713 ierr = PetscPrintf(PETSC_COMM_WORLD, "L_2 Residual: %g\n", (double)res);CHKERRQ(ierr); 714 /* Check Jacobian */ 715 { 716 Mat J, M; 717 MatNullSpace nullspace; 718 Vec b; 719 PetscBool isNull; 720 721 ierr = SNESSetUpMatrices(snes);CHKERRQ(ierr); 722 ierr = SNESGetJacobian(snes, &J, &M, NULL, NULL);CHKERRQ(ierr); 723 ierr = SNESComputeJacobian(snes, u, J, M);CHKERRQ(ierr); 724 ierr = MatGetNullSpace(J, &nullspace);CHKERRQ(ierr); 725 ierr = MatNullSpaceTest(nullspace, J, &isNull);CHKERRQ(ierr); 726 ierr = VecDuplicate(u, &b);CHKERRQ(ierr); 727 ierr = VecSet(r, 0.0);CHKERRQ(ierr); 728 ierr = SNESComputeFunction(snes, r, b);CHKERRQ(ierr); 729 ierr = MatMult(J, u, r);CHKERRQ(ierr); 730 ierr = VecAXPY(r, 1.0, b);CHKERRQ(ierr); 731 ierr = VecDestroy(&b);CHKERRQ(ierr); 732 ierr = PetscPrintf(PETSC_COMM_WORLD, "Au - b = Au + F(0)\n");CHKERRQ(ierr); 733 ierr = VecChop(r, 1.0e-10);CHKERRQ(ierr); 734 ierr = VecView(r, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 735 ierr = VecNorm(r, NORM_2, &res);CHKERRQ(ierr); 736 ierr = PetscPrintf(PETSC_COMM_WORLD, "Linear L_2 Residual: %g\n", (double)res);CHKERRQ(ierr); 737 } 738 } 739 ierr = VecViewFromOptions(u, NULL, "-sol_vec_view");CHKERRQ(ierr); 740 741 ierr = VecDestroy(&u);CHKERRQ(ierr); 742 ierr = VecDestroy(&r);CHKERRQ(ierr); 743 ierr = SNESDestroy(&snes);CHKERRQ(ierr); 744 ierr = DMDestroy(&dm);CHKERRQ(ierr); 745 ierr = PetscFree(user.exactFuncs);CHKERRQ(ierr); 746 ierr = PetscFinalize(); 747 return ierr; 748 } 749 750 /*TEST 751 752 # 2D serial P1 tests 0-3 753 test: 754 suffix: 0 755 requires: triangle 756 args: -run_type test -refinement_limit 0.0 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 757 test: 758 suffix: 1 759 requires: triangle 760 args: -run_type test -refinement_limit 0.0 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 761 test: 762 suffix: 2 763 requires: triangle 764 args: -run_type test -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 765 test: 766 suffix: 3 767 requires: triangle 768 args: -run_type test -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 769 # 2D serial P2 tests 4-5 770 test: 771 suffix: 4 772 requires: triangle 773 args: -run_type test -refinement_limit 0.0 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 774 test: 775 suffix: 5 776 requires: triangle 777 args: -run_type test -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 778 # 2D serial P3 tests 779 test: 780 suffix: 2d_p3_0 781 requires: triangle 782 args: -run_type test -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 783 test: 784 suffix: 2d_p3_1 785 requires: triangle !single 786 args: -run_type full -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 3 -pres_petscspace_degree 2 787 # Parallel tests 6-17 788 test: 789 suffix: 6 790 requires: triangle 791 nsize: 2 792 args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 793 test: 794 suffix: 7 795 requires: triangle 796 nsize: 3 797 args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 798 test: 799 suffix: 8 800 requires: triangle 801 nsize: 5 802 args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 803 test: 804 suffix: 9 805 requires: triangle 806 nsize: 2 807 args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 808 test: 809 suffix: 10 810 requires: triangle 811 nsize: 3 812 args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 813 test: 814 suffix: 11 815 requires: triangle 816 nsize: 5 817 args: -run_type test -refinement_limit 0.0 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 818 test: 819 suffix: 12 820 requires: triangle 821 nsize: 2 822 args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 823 test: 824 suffix: 13 825 requires: triangle 826 nsize: 3 827 args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 828 test: 829 suffix: 14 830 requires: triangle 831 nsize: 5 832 args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 833 test: 834 suffix: 15 835 requires: triangle 836 nsize: 2 837 args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 838 test: 839 suffix: 16 840 requires: triangle 841 nsize: 3 842 args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 843 test: 844 suffix: 17 845 requires: triangle 846 nsize: 5 847 args: -run_type test -refinement_limit 0.0625 -test_partition -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -dm_plex_print_fem 1 848 # 3D serial P1 tests 43-46 849 test: 850 suffix: 43 851 requires: ctetgen 852 args: -run_type test -dim 3 -refinement_limit 0.0 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 853 test: 854 suffix: 44 855 requires: ctetgen 856 args: -run_type test -dim 3 -refinement_limit 0.0 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 857 test: 858 suffix: 45 859 requires: ctetgen 860 args: -run_type test -dim 3 -refinement_limit 0.0125 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 861 test: 862 suffix: 46 863 requires: ctetgen 864 args: -run_type test -dim 3 -refinement_limit 0.0125 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -show_initial -dm_plex_print_fem 1 865 # Full solutions 18-29 866 test: 867 suffix: 18 868 requires: triangle !single 869 filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g" 870 args: -run_type full -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 871 test: 872 suffix: 19 873 requires: triangle !single 874 nsize: 2 875 filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g" 876 args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 877 test: 878 suffix: 20 879 requires: triangle !single 880 filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g" 881 nsize: 3 882 args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 883 test: 884 suffix: 20_parmetis 885 requires: parmetis triangle !single 886 filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g" 887 nsize: 3 888 args: -run_type full -petscpartitioner_type parmetis -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 889 test: 890 suffix: 21 891 requires: triangle !single 892 filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g" 893 nsize: 5 894 args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 0 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 895 test: 896 suffix: 22 897 requires: triangle !single 898 filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g" 899 args: -run_type full -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 900 test: 901 suffix: 23 902 requires: triangle !single 903 nsize: 2 904 filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g" 905 args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 906 test: 907 suffix: 24 908 requires: triangle !single 909 nsize: 3 910 filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g" 911 args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 912 test: 913 suffix: 25 914 requires: triangle !single 915 filter: sed -e "s/total number of linear solver iterations=11/total number of linear solver iterations=12/g" 916 nsize: 5 917 args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 918 test: 919 suffix: 26 920 requires: triangle !single 921 args: -run_type full -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 922 test: 923 suffix: 27 924 requires: triangle !single 925 nsize: 2 926 args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 927 test: 928 suffix: 28 929 requires: triangle !single 930 nsize: 3 931 args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 932 test: 933 suffix: 29 934 requires: triangle !single 935 nsize: 5 936 args: -run_type full -petscpartitioner_type simple -refinement_limit 0.0625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 937 # Full solutions with quads 938 # FULL Schur with LU/Jacobi 939 test: 940 suffix: quad_q2q1_full 941 requires: !single 942 args: -run_type full -simplex 0 -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 943 test: 944 suffix: quad_q2p1_full 945 requires: !single 946 args: -run_type full -simplex 0 -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pres_petscspace_poly_tensor 0 -pres_petscdualspace_lagrange_continuity 0 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 947 # Stokes preconditioners 30-36 948 # Jacobi 949 test: 950 suffix: 30 951 requires: triangle !single 952 filter: sed -e "s/total number of linear solver iterations=756/total number of linear solver iterations=757/g" -e "s/total number of linear solver iterations=758/total number of linear solver iterations=757/g" 953 args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_gmres_restart 100 -pc_type jacobi -ksp_rtol 1.0e-9 -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 954 # Block diagonal \begin{pmatrix} A & 0 \\ 0 & I \end{pmatrix} 955 test: 956 suffix: 31 957 requires: triangle !single 958 args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-4 -pc_type fieldsplit -pc_fieldsplit_type additive -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 959 # Block triangular \begin{pmatrix} A & B \\ 0 & I \end{pmatrix} 960 test: 961 suffix: 32 962 requires: triangle !single 963 args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type multiplicative -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 964 # Diagonal Schur complement \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} 965 test: 966 suffix: 33 967 requires: triangle !single 968 args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type diag -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 969 # Upper triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix} 970 test: 971 suffix: 34 972 requires: triangle !single 973 args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type upper -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 974 # Lower triangular Schur complement \begin{pmatrix} A & B \\ 0 & S \end{pmatrix} 975 test: 976 suffix: 35 977 requires: triangle !single 978 args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type lower -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 979 # Full Schur complement \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & S \end{pmatrix} \begin{pmatrix} I & A^{-1} B \\ 0 & I \end{pmatrix} 980 test: 981 suffix: 36 982 requires: triangle !single 983 args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 984 # SIMPLE \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T diag(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & diag(A)^{-1} B \\ 0 & I \end{pmatrix} 985 test: 986 suffix: pc_simple 987 requires: triangle !single 988 args: -run_type full -refinement_limit 0.00625 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi -snes_error_if_not_converged -ksp_error_if_not_converged -snes_view 989 # SIMPLEC \begin{pmatrix} I & 0 \\ B^T A^{-1} & I \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & B^T rowsum(A)^{-1} B \end{pmatrix} \begin{pmatrix} I & rowsum(A)^{-1} B \\ 0 & I \end{pmatrix} 990 test: 991 suffix: pc_simplec 992 requires: triangle 993 args: -run_type full -dm_refine 3 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -ksp_type fgmres -ksp_max_it 5 -ksp_gmres_restart 100 -ksp_rtol 1.0e-9 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_ksp_max_it 10 -fieldsplit_velocity_ksp_type gmres -fieldsplit_velocity_pc_type lu -fieldsplit_pressure_pc_type jacobi -fieldsplit_pressure_inner_ksp_type preonly -fieldsplit_pressure_inner_pc_type jacobi -fieldsplit_pressure_inner_pc_jacobi_type rowsum -fieldsplit_pressure_upper_ksp_type preonly -fieldsplit_pressure_upper_pc_type jacobi -fieldsplit_pressure_upper_pc_jacobi_type rowsum -snes_converged_reason -ksp_converged_reason -snes_view 994 # FETI-DP solvers (these solvers are quite inefficient, they are here to exercise the code) 995 test: 996 suffix: fetidp_2d_tri 997 requires: triangle mumps 998 filter: grep -v "variant HERMITIAN" | sed -e "s/linear solver iterations=41/linear solver iterations=42/g" 999 nsize: 5 1000 args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -petscpartitioner_type simple -fetidp_fieldsplit_lag_ksp_type preonly 1001 test: 1002 suffix: fetidp_3d_tet_smumps 1003 output_file: output/ex62_fetidp_3d_tet.out 1004 requires: ctetgen suitesparse mumps 1005 filter: grep -v "variant HERMITIAN" | sed -e "s/linear solver iterations=10[0-9]/linear solver iterations=100/g" | sed -e "s/linear solver iterations=9[0-9]/linear solver iterations=100/g" 1006 nsize: 5 1007 args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat -dim 3 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -test_partition -fetidp_bddc_sub_schurs_mat_solver_type mumps 1008 test: 1009 suffix: fetidp_3d_tet_spetsc 1010 requires: ctetgen suitesparse 1011 filter: grep -v "variant HERMITIAN" | sed -e "s/linear solver iterations=10[0-9]/linear solver iterations=100/g" | sed -e "s/linear solver iterations=9[0-9]/linear solver iterations=100/g" 1012 nsize: 5 1013 args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 1000 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_use_deluxe_scaling -fetidp_bddc_pc_bddc_benign_trick -fetidp_bddc_pc_bddc_deluxe_singlemat -dim 3 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky -fetidp_bddelta_pc_factor_mat_solver_type umfpack -fetidp_fieldsplit_lag_ksp_type preonly -test_partition -fetidp_bddc_sub_schurs_mat_solver_type petsc -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack 1014 test: 1015 suffix: fetidp_2d_quad 1016 requires: mumps double 1017 filter: grep -v "variant HERMITIAN" 1018 nsize: 5 1019 args: -run_type full -dm_refine 2 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 200 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type mumps -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type mumps -simplex 0 -petscpartitioner_type simple -fetidp_fieldsplit_lag_ksp_type preonly 1020 test: 1021 suffix: fetidp_3d_hex 1022 requires: suitesparse 1023 filter: grep -v "variant HERMITIAN" | sed -e "s/linear solver iterations=7[0-9]/linear solver iterations=71/g" 1024 nsize: 5 1025 args: -run_type full -dm_refine 1 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type fetidp -ksp_rtol 1.0e-8 -ksp_fetidp_saddlepoint -fetidp_ksp_type cg -fetidp_fieldsplit_p_ksp_max_it 1 -fetidp_fieldsplit_p_ksp_type richardson -fetidp_fieldsplit_p_ksp_richardson_scale 2000 -fetidp_fieldsplit_p_pc_type none -ksp_fetidp_saddlepoint_flip 1 -dim 3 -simplex 0 -fetidp_pc_discrete_harmonic -fetidp_harmonic_pc_factor_mat_solver_type petsc -fetidp_harmonic_pc_type cholesky -petscpartitioner_type simple -fetidp_fieldsplit_lag_ksp_type preonly -fetidp_bddc_pc_bddc_dirichlet_pc_factor_mat_solver_type umfpack -fetidp_bddc_pc_bddc_neumann_pc_factor_mat_solver_type umfpack 1026 # Convergence 1027 test: 1028 suffix: 2d_quad_q1_p0_conv 1029 requires: !single 1030 args: -run_type full -bc_type dirichlet -simplex 0 -interpolate 1 -dm_refine 0 -vel_petscspace_degree 1 -pres_petscspace_degree 0 \ 1031 -snes_convergence_estimate -convest_num_refine 3 -snes_error_if_not_converged \ 1032 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \ 1033 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \ 1034 -fieldsplit_velocity_pc_type lu \ 1035 -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi 1036 test: 1037 suffix: 2d_tri_p2_p1_conv 1038 requires: triangle !single 1039 args: -run_type full -sol_type cubic -bc_type dirichlet -interpolate 1 -dm_refine 0 \ 1040 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \ 1041 -snes_convergence_estimate -convest_num_refine 3 -snes_error_if_not_converged \ 1042 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \ 1043 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \ 1044 -fieldsplit_velocity_pc_type lu \ 1045 -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi 1046 test: 1047 suffix: 2d_quad_q2_q1_conv 1048 requires: !single 1049 args: -run_type full -sol_type cubic -bc_type dirichlet -simplex 0 -interpolate 1 -dm_refine 0 \ 1050 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \ 1051 -snes_convergence_estimate -convest_num_refine 3 -snes_error_if_not_converged \ 1052 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \ 1053 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \ 1054 -fieldsplit_velocity_pc_type lu \ 1055 -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi 1056 test: 1057 suffix: 2d_quad_q2_p1_conv 1058 requires: !single 1059 args: -run_type full -sol_type cubic -bc_type dirichlet -simplex 0 -interpolate 1 -dm_refine 0 \ 1060 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -pres_petscspace_poly_tensor 0 -pres_petscdualspace_lagrange_continuity 0 \ 1061 -snes_convergence_estimate -convest_num_refine 3 -snes_error_if_not_converged \ 1062 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \ 1063 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \ 1064 -fieldsplit_velocity_pc_type lu \ 1065 -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi 1066 # GMG solver 1067 test: 1068 suffix: 2d_tri_p2_p1_gmg_vcycle 1069 requires: triangle 1070 args: -run_type full -sol_type cubic -bc_type dirichlet -interpolate 1 -cells 2,2 -dm_refine_hierarchy 1 \ 1071 -vel_petscspace_degree 2 -pres_petscspace_degree 1 \ 1072 -snes_convergence_estimate -convest_num_refine 1 -snes_error_if_not_converged \ 1073 -ksp_type fgmres -ksp_gmres_restart 10 -ksp_rtol 1.0e-9 -ksp_error_if_not_converged \ 1074 -pc_type fieldsplit -pc_fieldsplit_type schur -pc_fieldsplit_schur_factorization_type full \ 1075 -fieldsplit_velocity_pc_type mg \ 1076 -fieldsplit_pressure_ksp_rtol 1e-10 -fieldsplit_pressure_pc_type jacobi 1077 # Vanka solver 1078 test: 1079 suffix: 2d_quad_q1_p0_vanka_add 1080 requires: double !complex 1081 filter: sed -e "s/linear solver iterations=[0-9][0-9]*""/linear solver iterations=49/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations [0-9][0-9]*""/Linear solve converged due to CONVERGED_RTOL iterations 49/g" 1082 args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 1 -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \ 1083 -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \ 1084 -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged -ksp_converged_reason \ 1085 -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \ 1086 -sub_ksp_type preonly -sub_pc_type lu 1087 # Vanka solver, forming dense inverses on patches 1088 test: 1089 suffix: 2d_quad_q1_p0_vanka_add_dense_inverse 1090 requires: double !complex 1091 filter: sed -e "s/linear solver iterations=[0-9][0-9]*""/linear solver iterations=49/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations [0-9][0-9]*""/Linear solve converged due to CONVERGED_RTOL iterations 49/g" 1092 args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 1 -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \ 1093 -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \ 1094 -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged -ksp_converged_reason \ 1095 -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \ 1096 -pc_patch_dense_inverse -pc_patch_sub_mat_type seqdense 1097 test: 1098 suffix: 2d_quad_q1_p0_vanka_add_unity 1099 requires: double !complex 1100 filter: sed -e "s/linear solver iterations=[0-9][0-9]*""/linear solver iterations=45/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations [0-9][0-9]*""/Linear solve converged due to CONVERGED_RTOL iterations 45/g" 1101 args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 1 -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \ 1102 -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \ 1103 -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged -ksp_converged_reason \ 1104 -pc_type patch -pc_patch_partition_of_unity 1 -pc_patch_construct_codim 0 -pc_patch_construct_type vanka \ 1105 -sub_ksp_type preonly -sub_pc_type lu 1106 test: 1107 suffix: 2d_quad_q2_q1_vanka_add 1108 requires: double !complex 1109 filter: sed -e "s/linear solver iterations=[0-9][0-9][0-9]*""/linear solver iterations=489/g" 1110 args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 0 -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 1111 -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \ 1112 -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged \ 1113 -pc_type patch -pc_patch_partition_of_unity 0 -pc_patch_construct_dim 0 -pc_patch_construct_type vanka \ 1114 -sub_ksp_type preonly -sub_pc_type lu 1115 test: 1116 suffix: 2d_quad_q2_q1_vanka_add_unity 1117 requires: double !complex 1118 filter: sed -e "s/linear solver iterations=[0-9][0-9][0-9]*""/linear solver iterations=795/g" 1119 args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine 0 -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -petscds_jac_pre 0 \ 1120 -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \ 1121 -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged \ 1122 -pc_type patch -pc_patch_partition_of_unity 1 -pc_patch_construct_dim 0 -pc_patch_construct_type vanka \ 1123 -sub_ksp_type preonly -sub_pc_type lu 1124 # Vanka smoother 1125 test: 1126 suffix: 2d_quad_q1_p0_gmg_vanka_add 1127 requires: double !complex long_runtime 1128 args: -run_type full -bc_type dirichlet -simplex 0 -dm_refine_hierarchy 3 -interpolate 1 -vel_petscspace_degree 1 -pres_petscspace_degree 0 -petscds_jac_pre 0 \ 1129 -snes_rtol 1.0e-4 -snes_error_if_not_converged -snes_view -snes_monitor -snes_converged_reason \ 1130 -ksp_type gmres -ksp_rtol 1.0e-5 -ksp_error_if_not_converged -ksp_monitor_true_residual \ 1131 -pc_type mg -pc_mg_levels 3 \ 1132 -mg_levels_ksp_type gmres -mg_levels_ksp_max_it 30 -mg_levels_ksp_monitor_true_residual_no \ 1133 -mg_levels_pc_type patch -mg_levels_pc_patch_partition_of_unity 0 -mg_levels_pc_patch_construct_codim 0 -mg_levels_pc_patch_construct_type vanka \ 1134 -mg_levels_sub_ksp_type preonly -mg_levels_sub_pc_type lu \ 1135 -mg_coarse_pc_type svd 1136 1137 test: 1138 requires: !single 1139 suffix: bddc_quad 1140 nsize: 2 1141 args: -run_type full -dm_refine 1 -bc_type dirichlet -interpolate 1 -vel_petscspace_degree 2 -pres_petscspace_degree 1 -snes_view -snes_error_if_not_converged -dm_mat_type is -ksp_type gmres -ksp_rtol 1.e-8 -pc_type bddc -pc_bddc_corner_selection -pc_bddc_dirichlet_pc_type svd -pc_bddc_neumann_pc_type svd -pc_bddc_coarse_redundant_pc_type svd -simplex 0 -petscpartitioner_type simple -ksp_monitor_short -pc_bddc_symmetric 0 1142 1143 TEST*/ 1144