1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at 2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights 3 // reserved. See files LICENSE and NOTICE for details. 4 // 5 // This file is part of CEED, a collection of benchmarks, miniapps, software 6 // libraries and APIs for efficient high-order finite element and spectral 7 // element discretizations for exascale applications. For more information and 8 // source code availability see http://github.com/ceed. 9 // 10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, 11 // a collaborative effort of two U.S. Department of Energy organizations (Office 12 // of Science and the National Nuclear Security Administration) responsible for 13 // the planning and preparation of a capable exascale ecosystem, including 14 // software, applications, hardware, advanced system engineering and early 15 // testbed platforms, in support of the nation's exascale computing imperative. 16 17 // libCEED + PETSc Example: CEED BPs 18 // 19 // This example demonstrates a simple usage of libCEED with PETSc to solve the 20 // CEED BP benchmark problems, see http://ceed.exascaleproject.org/bps, 21 // on a closed surface, such as the one of a discrete sphere. 22 // 23 // The code uses higher level communication protocols in DMPlex. 24 // 25 // Build with: 26 // 27 // make bpssphere [PETSC_DIR=</path/to/petsc>] [CEED_DIR=</path/to/libceed>] 28 // 29 // Sample runs: 30 // 31 // bpssphere -problem bp1 -degree 3 32 // bpssphere -problem bp2 -ceed /cpu/self -degree 3 33 // bpssphere -problem bp3 -ceed /cpu/self -degree 3 34 // bpssphere -problem bp4 -ceed /cpu/occa -degree 3 35 // bpssphere -problem bp5 -ceed /cpu/occa -degree 3 36 // bpssphere -problem bp6 -ceed /cpu/self -degree 3 37 // 38 //TESTARGS -ceed {ceed_resource} -test -problem bp3 -degree 3 39 40 /// @file 41 /// CEED BPs example using PETSc with DMPlex 42 /// See bps.c for a "raw" implementation using a structured grid. 43 /// and bpsdmplex.c for an implementation using an unstructured grid. 44 static const char help[] = "Solve CEED BPs on a sphere using DMPlex in PETSc\n"; 45 46 #include <stdbool.h> 47 #include <string.h> 48 #include <petscksp.h> 49 #include <petscdmplex.h> 50 #include <ceed.h> 51 #include "setupsphere.h" 52 53 int main(int argc, char **argv) { 54 PetscInt ierr; 55 MPI_Comm comm; 56 char ceedresource[PETSC_MAX_PATH_LEN] = "/cpu/self", 57 filename[PETSC_MAX_PATH_LEN]; 58 double my_rt_start, my_rt, rt_min, rt_max; 59 PetscInt degree = 3, qextra, lsize, gsize, topodim = 2, ncompx = 3, 60 ncompu = 1, xlsize; 61 PetscScalar *r; 62 PetscBool test_mode, benchmark_mode, read_mesh, write_solution, simplex; 63 PetscLogStage solvestage; 64 Vec X, Xloc, rhs, rhsloc; 65 Mat matO; 66 KSP ksp; 67 DM dm; 68 UserO userO; 69 Ceed ceed; 70 CeedData ceeddata; 71 CeedQFunction qf_error; 72 CeedOperator op_error; 73 CeedVector rhsceed, target; 74 bpType bpChoice; 75 76 ierr = PetscInitialize(&argc, &argv, NULL, help); 77 if (ierr) return ierr; 78 comm = PETSC_COMM_WORLD; 79 80 // Read command line options 81 ierr = PetscOptionsBegin(comm, NULL, "CEED BPs in PETSc", NULL); CHKERRQ(ierr); 82 bpChoice = CEED_BP1; 83 ierr = PetscOptionsEnum("-problem", 84 "CEED benchmark problem to solve", NULL, 85 bpTypes, (PetscEnum)bpChoice, (PetscEnum *)&bpChoice, 86 NULL); CHKERRQ(ierr); 87 ncompu = bpOptions[bpChoice].ncompu; 88 test_mode = PETSC_FALSE; 89 ierr = PetscOptionsBool("-test", 90 "Testing mode (do not print unless error is large)", 91 NULL, test_mode, &test_mode, NULL); CHKERRQ(ierr); 92 benchmark_mode = PETSC_FALSE; 93 ierr = PetscOptionsBool("-benchmark", 94 "Benchmarking mode (prints benchmark statistics)", 95 NULL, benchmark_mode, &benchmark_mode, NULL); 96 CHKERRQ(ierr); 97 write_solution = PETSC_FALSE; 98 ierr = PetscOptionsBool("-write_solution", 99 "Write solution for visualization", 100 NULL, write_solution, &write_solution, NULL); 101 CHKERRQ(ierr); 102 degree = test_mode ? 3 : 2; 103 ierr = PetscOptionsInt("-degree", "Polynomial degree of tensor product basis", 104 NULL, degree, °ree, NULL); CHKERRQ(ierr); 105 qextra = bpOptions[bpChoice].qextra; 106 ierr = PetscOptionsInt("-qextra", "Number of extra quadrature points", 107 NULL, qextra, &qextra, NULL); CHKERRQ(ierr); 108 ierr = PetscOptionsString("-ceed", "CEED resource specifier", 109 NULL, ceedresource, ceedresource, 110 sizeof(ceedresource), NULL); CHKERRQ(ierr); 111 read_mesh = PETSC_FALSE; 112 ierr = PetscOptionsString("-mesh", "Read mesh from file", NULL, 113 filename, filename, sizeof(filename), &read_mesh); 114 CHKERRQ(ierr); 115 simplex = PETSC_FALSE; 116 ierr = PetscOptionsBool("-simplex", "Use simplices, or tensor product cells", 117 NULL, simplex, &simplex, NULL); CHKERRQ(ierr); 118 ierr = PetscOptionsEnd(); CHKERRQ(ierr); 119 120 // Setup DM 121 if (read_mesh) { 122 ierr = DMPlexCreateFromFile(PETSC_COMM_WORLD, filename, PETSC_TRUE, &dm); 123 CHKERRQ(ierr); 124 } else { 125 // Create the mesh as a 0-refined sphere. This will create a cubic surface, not a box. 126 PetscBool simplex = PETSC_FALSE; 127 ierr = DMPlexCreateSphereMesh(PETSC_COMM_WORLD, topodim, simplex, &dm); 128 CHKERRQ(ierr); 129 // Set the object name 130 ierr = PetscObjectSetName((PetscObject)dm, "Sphere"); CHKERRQ(ierr); 131 // Distribute mesh over processes 132 { 133 DM dmDist = NULL; 134 PetscPartitioner part; 135 136 ierr = DMPlexGetPartitioner(dm, &part); CHKERRQ(ierr); 137 ierr = PetscPartitionerSetFromOptions(part); CHKERRQ(ierr); 138 ierr = DMPlexDistribute(dm, 0, NULL, &dmDist); CHKERRQ(ierr); 139 if (dmDist) { 140 ierr = DMDestroy(&dm); CHKERRQ(ierr); 141 dm = dmDist; 142 } 143 } 144 // Refine DMPlex with uniform refinement using runtime option -dm_refine 145 ierr = DMPlexSetRefinementUniform(dm, PETSC_TRUE); CHKERRQ(ierr); 146 ierr = DMSetFromOptions(dm); CHKERRQ(ierr); 147 ierr = ProjectToUnitSphere(dm); CHKERRQ(ierr); 148 // View DMPlex via runtime option 149 ierr = DMViewFromOptions(dm, NULL, "-dm_view"); CHKERRQ(ierr); 150 } 151 152 // Create DM 153 ierr = SetupDMByDegree(dm, degree, ncompu, topodim); 154 CHKERRQ(ierr); 155 156 // Create vectors 157 ierr = DMCreateGlobalVector(dm, &X); CHKERRQ(ierr); 158 ierr = VecGetLocalSize(X, &lsize); CHKERRQ(ierr); 159 ierr = VecGetSize(X, &gsize); CHKERRQ(ierr); 160 ierr = DMCreateLocalVector(dm, &Xloc); CHKERRQ(ierr); 161 ierr = VecGetSize(Xloc, &xlsize); CHKERRQ(ierr); 162 ierr = VecDuplicate(X, &rhs); CHKERRQ(ierr); 163 164 // Operator 165 ierr = PetscMalloc1(1, &userO); CHKERRQ(ierr); 166 ierr = MatCreateShell(comm, lsize, lsize, gsize, gsize, 167 userO, &matO); CHKERRQ(ierr); 168 ierr = MatShellSetOperation(matO, MATOP_MULT, 169 (void(*)(void))MatMult_Ceed); 170 CHKERRQ(ierr); 171 172 // Set up libCEED 173 CeedInit(ceedresource, &ceed); 174 175 // Print summary 176 if (!test_mode) { 177 PetscInt P = degree + 1, Q = P + qextra; 178 const char *usedresource; 179 CeedGetResource(ceed, &usedresource); 180 ierr = PetscPrintf(comm, 181 "\n-- CEED Benchmark Problem %d on the Sphere -- libCEED + PETSc --\n" 182 " libCEED:\n" 183 " libCEED Backend : %s\n" 184 " Mesh:\n" 185 " Number of 1D Basis Nodes (p) : %d\n" 186 " Number of 1D Quadrature Points (q) : %d\n" 187 " Global nodes : %D\n", 188 bpChoice+1, ceedresource, P, Q, gsize/ncompu); 189 CHKERRQ(ierr); 190 } 191 192 // Create RHS vector 193 ierr = VecDuplicate(Xloc, &rhsloc); CHKERRQ(ierr); 194 ierr = VecZeroEntries(rhsloc); CHKERRQ(ierr); 195 ierr = VecGetArray(rhsloc, &r); CHKERRQ(ierr); 196 CeedVectorCreate(ceed, xlsize, &rhsceed); 197 CeedVectorSetArray(rhsceed, CEED_MEM_HOST, CEED_USE_POINTER, r); 198 199 // Setup libCEED's objects 200 ierr = PetscMalloc1(1, &ceeddata); CHKERRQ(ierr); 201 ierr = SetupLibceedByDegree(dm, ceed, degree, topodim, qextra, 202 ncompx, ncompu, gsize, xlsize, bpChoice, 203 ceeddata, true, rhsceed, &target); CHKERRQ(ierr); 204 205 // Gather RHS 206 ierr = VecRestoreArray(rhsloc, &r); CHKERRQ(ierr); 207 ierr = VecZeroEntries(rhs); CHKERRQ(ierr); 208 ierr = DMLocalToGlobalBegin(dm, rhsloc, ADD_VALUES, rhs); CHKERRQ(ierr); 209 ierr = DMLocalToGlobalEnd(dm, rhsloc, ADD_VALUES, rhs); CHKERRQ(ierr); 210 CeedVectorDestroy(&rhsceed); 211 212 // Create the error Q-function 213 CeedQFunctionCreateInterior(ceed, 1, bpOptions[bpChoice].error, 214 bpOptions[bpChoice].errorfname, &qf_error); 215 CeedQFunctionAddInput(qf_error, "u", ncompu, CEED_EVAL_INTERP); 216 CeedQFunctionAddInput(qf_error, "true_soln", ncompu, CEED_EVAL_NONE); 217 CeedQFunctionAddOutput(qf_error, "error", ncompu, CEED_EVAL_NONE); 218 219 // Create the error operator 220 CeedOperatorCreate(ceed, qf_error, NULL, NULL, &op_error); 221 CeedOperatorSetField(op_error, "u", ceeddata->Erestrictu, 222 ceeddata->basisu, CEED_VECTOR_ACTIVE); 223 CeedOperatorSetField(op_error, "true_soln", ceeddata->Erestrictui, 224 CEED_BASIS_COLLOCATED, target); 225 CeedOperatorSetField(op_error, "error", ceeddata->Erestrictui, 226 CEED_BASIS_COLLOCATED, CEED_VECTOR_ACTIVE); 227 228 // Set up Mat 229 userO->comm = comm; 230 userO->dm = dm; 231 userO->Xloc = Xloc; 232 ierr = VecDuplicate(Xloc, &userO->Yloc); CHKERRQ(ierr); 233 userO->xceed = ceeddata->xceed; 234 userO->yceed = ceeddata->yceed; 235 userO->op = ceeddata->op_apply; 236 userO->ceed = ceed; 237 userO->topodim = topodim; 238 userO->simplex = simplex; 239 240 // Setup solver 241 ierr = KSPCreate(comm, &ksp); CHKERRQ(ierr); 242 { 243 PC pc; 244 ierr = KSPGetPC(ksp, &pc); CHKERRQ(ierr); 245 if (bpChoice == CEED_BP1 || bpChoice == CEED_BP2) { 246 ierr = PCSetType(pc, PCJACOBI); CHKERRQ(ierr); 247 ierr = PCJacobiSetType(pc, PC_JACOBI_ROWSUM); CHKERRQ(ierr); 248 } else { 249 ierr = PCSetType(pc, PCNONE); CHKERRQ(ierr); 250 MatNullSpace nullspace; 251 252 ierr = MatNullSpaceCreate(PETSC_COMM_WORLD, PETSC_TRUE, 0, 0, &nullspace); 253 CHKERRQ(ierr); 254 ierr = MatSetNullSpace(matO, nullspace); CHKERRQ(ierr); 255 ierr = MatNullSpaceDestroy(&nullspace); CHKERRQ(ierr); 256 } 257 ierr = KSPSetType(ksp, KSPCG); CHKERRQ(ierr); 258 ierr = KSPSetNormType(ksp, KSP_NORM_NATURAL); CHKERRQ(ierr); 259 ierr = KSPSetTolerances(ksp, 1e-10, PETSC_DEFAULT, PETSC_DEFAULT, 260 PETSC_DEFAULT); CHKERRQ(ierr); 261 } 262 ierr = KSPSetFromOptions(ksp); CHKERRQ(ierr); 263 ierr = KSPSetOperators(ksp, matO, matO); CHKERRQ(ierr); 264 265 // First run, if benchmarking 266 if (benchmark_mode) { 267 ierr = KSPSetTolerances(ksp, 1e-10, PETSC_DEFAULT, PETSC_DEFAULT, 1); 268 CHKERRQ(ierr); 269 my_rt_start = MPI_Wtime(); 270 ierr = KSPSolve(ksp, rhs, X); CHKERRQ(ierr); 271 my_rt = MPI_Wtime() - my_rt_start; 272 ierr = MPI_Allreduce(MPI_IN_PLACE, &my_rt, 1, MPI_DOUBLE, MPI_MIN, comm); 273 CHKERRQ(ierr); 274 // Set maxits based on first iteration timing 275 if (my_rt > 0.02) { 276 ierr = KSPSetTolerances(ksp, 1e-10, PETSC_DEFAULT, PETSC_DEFAULT, 5); 277 CHKERRQ(ierr); 278 } else { 279 ierr = KSPSetTolerances(ksp, 1e-10, PETSC_DEFAULT, PETSC_DEFAULT, 20); 280 CHKERRQ(ierr); 281 } 282 } 283 284 // Timed solve 285 ierr = VecZeroEntries(X); CHKERRQ(ierr); 286 ierr = PetscBarrier((PetscObject)ksp); CHKERRQ(ierr); 287 288 // -- Performance logging 289 ierr = PetscLogStageRegister("Solve Stage", &solvestage); CHKERRQ(ierr); 290 ierr = PetscLogStagePush(solvestage); CHKERRQ(ierr); 291 292 // -- Solve 293 my_rt_start = MPI_Wtime(); 294 ierr = KSPSolve(ksp, rhs, X); CHKERRQ(ierr); 295 my_rt = MPI_Wtime() - my_rt_start; 296 297 // -- Performance logging 298 ierr = PetscLogStagePop(); 299 300 // Output results 301 { 302 KSPType ksptype; 303 KSPConvergedReason reason; 304 PetscReal rnorm; 305 PetscInt its; 306 ierr = KSPGetType(ksp, &ksptype); CHKERRQ(ierr); 307 ierr = KSPGetConvergedReason(ksp, &reason); CHKERRQ(ierr); 308 ierr = KSPGetIterationNumber(ksp, &its); CHKERRQ(ierr); 309 ierr = KSPGetResidualNorm(ksp, &rnorm); CHKERRQ(ierr); 310 if (!test_mode || reason < 0 || rnorm > 1e-8) { 311 ierr = PetscPrintf(comm, 312 " KSP:\n" 313 " KSP Type : %s\n" 314 " KSP Convergence : %s\n" 315 " Total KSP Iterations : %D\n" 316 " Final rnorm : %e\n", 317 ksptype, KSPConvergedReasons[reason], its, 318 (double)rnorm); CHKERRQ(ierr); 319 } 320 if (!test_mode) { 321 ierr = PetscPrintf(comm," Performance:\n"); CHKERRQ(ierr); 322 } 323 { 324 PetscReal maxerror; 325 ierr = ComputeErrorMax(userO, op_error, X, target, &maxerror); 326 CHKERRQ(ierr); 327 PetscReal tol = 6e-2; 328 if (!test_mode || maxerror > tol) { 329 ierr = MPI_Allreduce(&my_rt, &rt_min, 1, MPI_DOUBLE, MPI_MIN, comm); 330 CHKERRQ(ierr); 331 ierr = MPI_Allreduce(&my_rt, &rt_max, 1, MPI_DOUBLE, MPI_MAX, comm); 332 CHKERRQ(ierr); 333 ierr = PetscPrintf(comm, 334 " Pointwise Error (max) : %e\n" 335 " CG Solve Time : %g (%g) sec\n", 336 (double)maxerror, rt_max, rt_min); CHKERRQ(ierr); 337 } 338 } 339 if (benchmark_mode && (!test_mode)) { 340 ierr = PetscPrintf(comm, 341 " DoFs/Sec in CG : %g (%g) million\n", 342 1e-6*gsize*its/rt_max, 343 1e-6*gsize*its/rt_min); CHKERRQ(ierr); 344 } 345 } 346 347 // Output solution 348 if (write_solution) { 349 PetscViewer vtkviewersoln; 350 351 ierr = PetscViewerCreate(comm, &vtkviewersoln); CHKERRQ(ierr); 352 ierr = PetscViewerSetType(vtkviewersoln, PETSCVIEWERVTK); CHKERRQ(ierr); 353 ierr = PetscViewerFileSetName(vtkviewersoln, "solution.vtu"); CHKERRQ(ierr); 354 ierr = VecView(X, vtkviewersoln); CHKERRQ(ierr); 355 ierr = PetscViewerDestroy(&vtkviewersoln); CHKERRQ(ierr); 356 } 357 358 // Cleanup 359 ierr = VecDestroy(&X); CHKERRQ(ierr); 360 ierr = VecDestroy(&Xloc); CHKERRQ(ierr); 361 ierr = VecDestroy(&userO->Yloc); CHKERRQ(ierr); 362 ierr = MatDestroy(&matO); CHKERRQ(ierr); 363 ierr = PetscFree(userO); CHKERRQ(ierr); 364 ierr = CeedDataDestroy(0, ceeddata); CHKERRQ(ierr); 365 ierr = DMDestroy(&dm); CHKERRQ(ierr); 366 367 ierr = VecDestroy(&rhs); CHKERRQ(ierr); 368 ierr = VecDestroy(&rhsloc); CHKERRQ(ierr); 369 ierr = KSPDestroy(&ksp); CHKERRQ(ierr); 370 CeedVectorDestroy(&target); 371 CeedQFunctionDestroy(&qf_error); 372 CeedOperatorDestroy(&op_error); 373 CeedDestroy(&ceed); 374 return PetscFinalize(); 375 } 376