static const char help[] = "-Laplacian u = b as a nonlinear problem.\n\n"; /*T Concepts: SNES^parallel Bratu example Concepts: DMDA^using distributed arrays; Concepts: IS coloirng types; Processors: n T*/ /* The linear and nonlinear versions of these should give almost identical results on this problem Richardson Nonlinear: -snes_rtol 1.e-12 -snes_monitor -snes_type nrichardson -snes_linesearch_monitor Linear: -snes_rtol 1.e-12 -snes_monitor -ksp_rtol 1.e-12 -ksp_monitor -ksp_type richardson -pc_type none -ksp_richardson_self_scale -info GMRES Nonlinear: -snes_rtol 1.e-12 -snes_monitor -snes_type ngmres Linear: -snes_rtol 1.e-12 -snes_monitor -ksp_type gmres -ksp_monitor -ksp_rtol 1.e-12 -pc_type none CG Nonlinear: -snes_rtol 1.e-12 -snes_monitor -snes_type ncg -snes_linesearch_monitor Linear: -snes_rtol 1.e-12 -snes_monitor -ksp_type cg -ksp_monitor -ksp_rtol 1.e-12 -pc_type none Multigrid Linear: 1 level: -snes_rtol 1.e-12 -snes_monitor -pc_type mg -mg_levels_ksp_type richardson -mg_levels_pc_type none -mg_levels_ksp_monitor -mg_levels_ksp_richardson_self_scale -ksp_type richardson -ksp_monitor -ksp_rtol 1.e-12 -ksp_monitor_true_residual n levels: -da_refine n Nonlinear: 1 level: -snes_rtol 1.e-12 -snes_monitor -snes_type fas -fas_levels_snes_monitor n levels: -da_refine n -fas_coarse_snes_type newtonls -fas_coarse_pc_type lu -fas_coarse_ksp_type preonly */ /* Include "petscdmda.h" so that we can use distributed arrays (DMDAs). Include "petscsnes.h" so that we can use SNES solvers. Note that this */ #include #include #include /* User-defined routines */ extern PetscErrorCode FormMatrix(DM,Mat); extern PetscErrorCode MyComputeFunction(SNES,Vec,Vec,void*); extern PetscErrorCode MyComputeJacobian(SNES,Vec,Mat,Mat,void*); extern PetscErrorCode NonlinearGS(SNES,Vec); int main(int argc,char **argv) { SNES snes; /* nonlinear solver */ SNES psnes; /* nonlinear Gauss-Seidel approximate solver */ Vec x,b; /* solution vector */ PetscInt its; /* iterations for convergence */ PetscErrorCode ierr; DM da; PetscBool use_ngs_as_npc = PETSC_FALSE; /* use the nonlinear Gauss-Seidel approximate solver */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Initialize program - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr; /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Create nonlinear solver context - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ CHKERRQ(SNESCreate(PETSC_COMM_WORLD,&snes)); CHKERRQ(PetscOptionsGetBool(NULL,NULL,"-use_ngs_as_npc",&use_ngs_as_npc,0)); if (use_ngs_as_npc) { CHKERRQ(SNESGetNPC(snes,&psnes)); CHKERRQ(SNESSetType(psnes,SNESSHELL)); CHKERRQ(SNESShellSetSolve(psnes,NonlinearGS)); } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Create distributed array (DMDA) to manage parallel grid and vectors - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ CHKERRQ(DMDACreate2d(PETSC_COMM_WORLD, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE,DMDA_STENCIL_STAR,4,4,PETSC_DECIDE,PETSC_DECIDE,1,1,NULL,NULL,&da)); CHKERRQ(DMSetFromOptions(da)); CHKERRQ(DMSetUp(da)); CHKERRQ(DMDASetUniformCoordinates(da, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0)); CHKERRQ(SNESSetDM(snes,da)); if (use_ngs_as_npc) { CHKERRQ(SNESShellSetContext(psnes,da)); } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Extract global vectors from DMDA; then duplicate for remaining vectors that are the same types - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ CHKERRQ(DMCreateGlobalVector(da,&x)); CHKERRQ(DMCreateGlobalVector(da,&b)); CHKERRQ(VecSet(b,1.0)); CHKERRQ(SNESSetFunction(snes,NULL,MyComputeFunction,NULL)); CHKERRQ(SNESSetJacobian(snes,NULL,NULL,MyComputeJacobian,NULL)); /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Customize nonlinear solver; set runtime options - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ CHKERRQ(SNESSetFromOptions(snes)); /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Solve nonlinear system - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ CHKERRQ(SNESSolve(snes,b,x)); CHKERRQ(SNESGetIterationNumber(snes,&its)); /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Free work space. All PETSc objects should be destroyed when they are no longer needed. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ CHKERRQ(VecDestroy(&x)); CHKERRQ(VecDestroy(&b)); CHKERRQ(SNESDestroy(&snes)); CHKERRQ(DMDestroy(&da)); ierr = PetscFinalize(); return ierr; } /* ------------------------------------------------------------------- */ PetscErrorCode MyComputeFunction(SNES snes,Vec x,Vec F,void *ctx) { Mat J; DM dm; PetscFunctionBeginUser; CHKERRQ(SNESGetDM(snes,&dm)); CHKERRQ(DMGetApplicationContext(dm,&J)); if (!J) { CHKERRQ(DMSetMatType(dm,MATAIJ)); CHKERRQ(DMCreateMatrix(dm,&J)); CHKERRQ(MatSetDM(J, NULL)); CHKERRQ(FormMatrix(dm,J)); CHKERRQ(DMSetApplicationContext(dm,J)); CHKERRQ(DMSetApplicationContextDestroy(dm,(PetscErrorCode (*)(void**))MatDestroy)); } CHKERRQ(MatMult(J,x,F)); PetscFunctionReturn(0); } PetscErrorCode MyComputeJacobian(SNES snes,Vec x,Mat J,Mat Jp,void *ctx) { DM dm; PetscFunctionBeginUser; CHKERRQ(SNESGetDM(snes,&dm)); CHKERRQ(FormMatrix(dm,Jp)); PetscFunctionReturn(0); } PetscErrorCode FormMatrix(DM da,Mat jac) { PetscInt i,j,nrows = 0; MatStencil col[5],row,*rows; PetscScalar v[5],hx,hy,hxdhy,hydhx; DMDALocalInfo info; PetscFunctionBeginUser; CHKERRQ(DMDAGetLocalInfo(da,&info)); hx = 1.0/(PetscReal)(info.mx-1); hy = 1.0/(PetscReal)(info.my-1); hxdhy = hx/hy; hydhx = hy/hx; CHKERRQ(PetscMalloc1(info.ym*info.xm,&rows)); /* Compute entries for the locally owned part of the Jacobian. - Currently, all PETSc parallel matrix formats are partitioned by contiguous chunks of rows across the processors. - Each processor needs to insert only elements that it owns locally (but any non-local elements will be sent to the appropriate processor during matrix assembly). - Here, we set all entries for a particular row at once. - We can set matrix entries either using either MatSetValuesLocal() or MatSetValues(), as discussed above. */ for (j=info.ys; j