1 2 static char help[] ="Tests PetscObjectSetOptions() for TS object\n\n"; 3 4 /* 5 Concepts: TS^time-dependent nonlinear problems 6 Processors: n 7 */ 8 9 /* ------------------------------------------------------------------------ 10 11 This program solves the PDE 12 13 u * u_xx 14 u_t = --------- 15 2*(t+1)^2 16 17 on the domain 0 <= x <= 1, with boundary conditions 18 u(t,0) = t + 1, u(t,1) = 2*t + 2, 19 and initial condition 20 u(0,x) = 1 + x*x. 21 22 The exact solution is: 23 u(t,x) = (1 + x*x) * (1 + t) 24 25 Note that since the solution is linear in time and quadratic in x, 26 the finite difference scheme actually computes the "exact" solution. 27 28 We use by default the backward Euler method. 29 30 ------------------------------------------------------------------------- */ 31 32 /* 33 Include "petscts.h" to use the PETSc timestepping routines. Note that 34 this file automatically includes "petscsys.h" and other lower-level 35 PETSc include files. 36 37 Include the "petscdmda.h" to allow us to use the distributed array data 38 structures to manage the parallel grid. 39 */ 40 #include <petscts.h> 41 #include <petscdm.h> 42 #include <petscdmda.h> 43 #include <petscdraw.h> 44 45 /* 46 User-defined application context - contains data needed by the 47 application-provided callback routines. 48 */ 49 typedef struct { 50 MPI_Comm comm; /* communicator */ 51 DM da; /* distributed array data structure */ 52 Vec localwork; /* local ghosted work vector */ 53 Vec u_local; /* local ghosted approximate solution vector */ 54 Vec solution; /* global exact solution vector */ 55 PetscInt m; /* total number of grid points */ 56 PetscReal h; /* mesh width: h = 1/(m-1) */ 57 } AppCtx; 58 59 /* 60 User-defined routines, provided below. 61 */ 62 extern PetscErrorCode InitialConditions(Vec,AppCtx*); 63 extern PetscErrorCode RHSFunction(TS,PetscReal,Vec,Vec,void*); 64 extern PetscErrorCode RHSJacobian(TS,PetscReal,Vec,Mat,Mat,void*); 65 extern PetscErrorCode ExactSolution(PetscReal,Vec,AppCtx*); 66 67 int main(int argc,char **argv) 68 { 69 AppCtx appctx; /* user-defined application context */ 70 TS ts; /* timestepping context */ 71 Mat A; /* Jacobian matrix data structure */ 72 Vec u; /* approximate solution vector */ 73 PetscInt time_steps_max = 100; /* default max timesteps */ 74 PetscErrorCode ierr; 75 PetscReal dt; 76 PetscReal time_total_max = 100.0; /* default max total time */ 77 PetscOptions options,optionscopy; 78 79 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 80 Initialize program and set problem parameters 81 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 82 83 ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr; 84 85 ierr = PetscOptionsCreate(&options);CHKERRQ(ierr); 86 ierr = PetscOptionsSetValue(options,"-ts_monitor","ascii");CHKERRQ(ierr); 87 ierr = PetscOptionsSetValue(options,"-snes_monitor","ascii");CHKERRQ(ierr); 88 ierr = PetscOptionsSetValue(options,"-ksp_monitor","ascii");CHKERRQ(ierr); 89 90 appctx.comm = PETSC_COMM_WORLD; 91 appctx.m = 60; 92 93 ierr = PetscOptionsGetInt(options,NULL,"-M",&appctx.m,NULL);CHKERRQ(ierr); 94 95 appctx.h = 1.0/(appctx.m-1.0); 96 97 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 98 Create vector data structures 99 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 100 101 /* 102 Create distributed array (DMDA) to manage parallel grid and vectors 103 and to set up the ghost point communication pattern. There are M 104 total grid values spread equally among all the processors. 105 */ 106 ierr = DMDACreate1d(PETSC_COMM_WORLD,DM_BOUNDARY_NONE,appctx.m,1,1,NULL,&appctx.da);CHKERRQ(ierr); 107 ierr = PetscObjectSetOptions((PetscObject)appctx.da,options);CHKERRQ(ierr); 108 ierr = DMSetFromOptions(appctx.da);CHKERRQ(ierr); 109 ierr = DMSetUp(appctx.da);CHKERRQ(ierr); 110 111 /* 112 Extract global and local vectors from DMDA; we use these to store the 113 approximate solution. Then duplicate these for remaining vectors that 114 have the same types. 115 */ 116 ierr = DMCreateGlobalVector(appctx.da,&u);CHKERRQ(ierr); 117 ierr = DMCreateLocalVector(appctx.da,&appctx.u_local);CHKERRQ(ierr); 118 119 /* 120 Create local work vector for use in evaluating right-hand-side function; 121 create global work vector for storing exact solution. 122 */ 123 ierr = VecDuplicate(appctx.u_local,&appctx.localwork);CHKERRQ(ierr); 124 ierr = VecDuplicate(u,&appctx.solution);CHKERRQ(ierr); 125 126 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 127 Create timestepping solver context; set callback routine for 128 right-hand-side function evaluation. 129 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 130 131 ierr = TSCreate(PETSC_COMM_WORLD,&ts);CHKERRQ(ierr); 132 ierr = PetscObjectSetOptions((PetscObject)ts,options);CHKERRQ(ierr); 133 ierr = TSSetProblemType(ts,TS_NONLINEAR);CHKERRQ(ierr); 134 ierr = TSSetRHSFunction(ts,NULL,RHSFunction,&appctx);CHKERRQ(ierr); 135 136 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 137 For nonlinear problems, the user can provide a Jacobian evaluation 138 routine (or use a finite differencing approximation). 139 140 Create matrix data structure; set Jacobian evaluation routine. 141 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 142 143 ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr); 144 ierr = PetscObjectSetOptions((PetscObject)A,options);CHKERRQ(ierr); 145 ierr = MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,appctx.m,appctx.m);CHKERRQ(ierr); 146 ierr = MatSetFromOptions(A);CHKERRQ(ierr); 147 ierr = MatSetUp(A);CHKERRQ(ierr); 148 ierr = TSSetRHSJacobian(ts,A,A,RHSJacobian,&appctx);CHKERRQ(ierr); 149 150 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 151 Set solution vector and initial timestep 152 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 153 154 dt = appctx.h/2.0; 155 ierr = TSSetTimeStep(ts,dt);CHKERRQ(ierr); 156 157 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 158 Customize timestepping solver: 159 - Set the solution method to be the Backward Euler method. 160 - Set timestepping duration info 161 Then set runtime options, which can override these defaults. 162 For example, 163 -ts_max_steps <maxsteps> -ts_max_time <maxtime> 164 to override the defaults set by TSSetMaxSteps()/TSSetMaxTime(). 165 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 166 167 ierr = TSSetType(ts,TSBEULER);CHKERRQ(ierr); 168 ierr = TSSetMaxSteps(ts,time_steps_max);CHKERRQ(ierr); 169 ierr = TSSetMaxTime(ts,time_total_max);CHKERRQ(ierr); 170 ierr = TSSetExactFinalTime(ts,TS_EXACTFINALTIME_STEPOVER);CHKERRQ(ierr); 171 ierr = TSSetFromOptions(ts);CHKERRQ(ierr); 172 173 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 174 Solve the problem 175 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 176 177 /* 178 Evaluate initial conditions 179 */ 180 ierr = InitialConditions(u,&appctx);CHKERRQ(ierr); 181 182 /* 183 Run the timestepping solver 184 */ 185 ierr = TSSolve(ts,u);CHKERRQ(ierr); 186 187 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 188 Free work space. All PETSc objects should be destroyed when they 189 are no longer needed. 190 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 191 192 ierr = PetscObjectGetOptions((PetscObject)ts,&optionscopy);CHKERRQ(ierr); 193 if (options != optionscopy) SETERRQ(PETSC_COMM_WORLD,PETSC_ERR_PLIB,"PetscObjectGetOptions() failed"); 194 195 ierr = TSDestroy(&ts);CHKERRQ(ierr); 196 ierr = VecDestroy(&u);CHKERRQ(ierr); 197 ierr = MatDestroy(&A);CHKERRQ(ierr); 198 ierr = DMDestroy(&appctx.da);CHKERRQ(ierr); 199 ierr = VecDestroy(&appctx.localwork);CHKERRQ(ierr); 200 ierr = VecDestroy(&appctx.solution);CHKERRQ(ierr); 201 ierr = VecDestroy(&appctx.u_local);CHKERRQ(ierr); 202 ierr = PetscOptionsDestroy(&options);CHKERRQ(ierr); 203 204 /* 205 Always call PetscFinalize() before exiting a program. This routine 206 - finalizes the PETSc libraries as well as MPI 207 - provides summary and diagnostic information if certain runtime 208 options are chosen (e.g., -log_view). 209 */ 210 ierr = PetscFinalize(); 211 return ierr; 212 } 213 /* --------------------------------------------------------------------- */ 214 /* 215 InitialConditions - Computes the solution at the initial time. 216 217 Input Parameters: 218 u - uninitialized solution vector (global) 219 appctx - user-defined application context 220 221 Output Parameter: 222 u - vector with solution at initial time (global) 223 */ 224 PetscErrorCode InitialConditions(Vec u,AppCtx *appctx) 225 { 226 PetscScalar *u_localptr,h = appctx->h,x; 227 PetscInt i,mybase,myend; 228 PetscErrorCode ierr; 229 230 /* 231 Determine starting point of each processor's range of 232 grid values. 233 */ 234 ierr = VecGetOwnershipRange(u,&mybase,&myend);CHKERRQ(ierr); 235 236 /* 237 Get a pointer to vector data. 238 - For default PETSc vectors, VecGetArray() returns a pointer to 239 the data array. Otherwise, the routine is implementation dependent. 240 - You MUST call VecRestoreArray() when you no longer need access to 241 the array. 242 - Note that the Fortran interface to VecGetArray() differs from the 243 C version. See the users manual for details. 244 */ 245 ierr = VecGetArray(u,&u_localptr);CHKERRQ(ierr); 246 247 /* 248 We initialize the solution array by simply writing the solution 249 directly into the array locations. Alternatively, we could use 250 VecSetValues() or VecSetValuesLocal(). 251 */ 252 for (i=mybase; i<myend; i++) { 253 x = h*(PetscReal)i; /* current location in global grid */ 254 u_localptr[i-mybase] = 1.0 + x*x; 255 } 256 257 /* 258 Restore vector 259 */ 260 ierr = VecRestoreArray(u,&u_localptr);CHKERRQ(ierr); 261 262 return 0; 263 } 264 /* --------------------------------------------------------------------- */ 265 /* 266 ExactSolution - Computes the exact solution at a given time. 267 268 Input Parameters: 269 t - current time 270 solution - vector in which exact solution will be computed 271 appctx - user-defined application context 272 273 Output Parameter: 274 solution - vector with the newly computed exact solution 275 */ 276 PetscErrorCode ExactSolution(PetscReal t,Vec solution,AppCtx *appctx) 277 { 278 PetscScalar *s_localptr,h = appctx->h,x; 279 PetscInt i,mybase,myend; 280 PetscErrorCode ierr; 281 282 /* 283 Determine starting and ending points of each processor's 284 range of grid values 285 */ 286 ierr = VecGetOwnershipRange(solution,&mybase,&myend);CHKERRQ(ierr); 287 288 /* 289 Get a pointer to vector data. 290 */ 291 ierr = VecGetArray(solution,&s_localptr);CHKERRQ(ierr); 292 293 /* 294 Simply write the solution directly into the array locations. 295 Alternatively, we could use VecSetValues() or VecSetValuesLocal(). 296 */ 297 for (i=mybase; i<myend; i++) { 298 x = h*(PetscReal)i; 299 s_localptr[i-mybase] = (t + 1.0)*(1.0 + x*x); 300 } 301 302 /* 303 Restore vector 304 */ 305 ierr = VecRestoreArray(solution,&s_localptr);CHKERRQ(ierr); 306 return 0; 307 } 308 /* --------------------------------------------------------------------- */ 309 /* 310 RHSFunction - User-provided routine that evalues the right-hand-side 311 function of the ODE. This routine is set in the main program by 312 calling TSSetRHSFunction(). We compute: 313 global_out = F(global_in) 314 315 Input Parameters: 316 ts - timesteping context 317 t - current time 318 global_in - vector containing the current iterate 319 ctx - (optional) user-provided context for function evaluation. 320 In this case we use the appctx defined above. 321 322 Output Parameter: 323 global_out - vector containing the newly evaluated function 324 */ 325 PetscErrorCode RHSFunction(TS ts,PetscReal t,Vec global_in,Vec global_out,void *ctx) 326 { 327 AppCtx *appctx = (AppCtx*) ctx; /* user-defined application context */ 328 DM da = appctx->da; /* distributed array */ 329 Vec local_in = appctx->u_local; /* local ghosted input vector */ 330 Vec localwork = appctx->localwork; /* local ghosted work vector */ 331 PetscErrorCode ierr; 332 PetscInt i,localsize; 333 PetscMPIInt rank,size; 334 PetscScalar *copyptr,sc; 335 const PetscScalar *localptr; 336 337 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 338 Get ready for local function computations 339 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 340 /* 341 Scatter ghost points to local vector, using the 2-step process 342 DMGlobalToLocalBegin(), DMGlobalToLocalEnd(). 343 By placing code between these two statements, computations can be 344 done while messages are in transition. 345 */ 346 ierr = DMGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);CHKERRQ(ierr); 347 ierr = DMGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);CHKERRQ(ierr); 348 349 /* 350 Access directly the values in our local INPUT work array 351 */ 352 ierr = VecGetArrayRead(local_in,&localptr);CHKERRQ(ierr); 353 354 /* 355 Access directly the values in our local OUTPUT work array 356 */ 357 ierr = VecGetArray(localwork,©ptr);CHKERRQ(ierr); 358 359 sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t)); 360 361 /* 362 Evaluate our function on the nodes owned by this processor 363 */ 364 ierr = VecGetLocalSize(local_in,&localsize);CHKERRQ(ierr); 365 366 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 367 Compute entries for the locally owned part 368 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 369 370 /* 371 Handle boundary conditions: This is done by using the boundary condition 372 u(t,boundary) = g(t,boundary) 373 for some function g. Now take the derivative with respect to t to obtain 374 u_{t}(t,boundary) = g_{t}(t,boundary) 375 376 In our case, u(t,0) = t + 1, so that u_{t}(t,0) = 1 377 and u(t,1) = 2t+ 2, so that u_{t}(t,1) = 2 378 */ 379 ierr = MPI_Comm_rank(appctx->comm,&rank);CHKERRMPI(ierr); 380 ierr = MPI_Comm_size(appctx->comm,&size);CHKERRMPI(ierr); 381 if (!rank) copyptr[0] = 1.0; 382 if (rank == size-1) copyptr[localsize-1] = 2.0; 383 384 /* 385 Handle the interior nodes where the PDE is replace by finite 386 difference operators. 387 */ 388 for (i=1; i<localsize-1; i++) copyptr[i] = localptr[i] * sc * (localptr[i+1] + localptr[i-1] - 2.0*localptr[i]); 389 390 /* 391 Restore vectors 392 */ 393 ierr = VecRestoreArrayRead(local_in,&localptr);CHKERRQ(ierr); 394 ierr = VecRestoreArray(localwork,©ptr);CHKERRQ(ierr); 395 396 /* 397 Insert values from the local OUTPUT vector into the global 398 output vector 399 */ 400 ierr = DMLocalToGlobalBegin(da,localwork,INSERT_VALUES,global_out);CHKERRQ(ierr); 401 ierr = DMLocalToGlobalEnd(da,localwork,INSERT_VALUES,global_out);CHKERRQ(ierr); 402 403 return 0; 404 } 405 /* --------------------------------------------------------------------- */ 406 /* 407 RHSJacobian - User-provided routine to compute the Jacobian of 408 the nonlinear right-hand-side function of the ODE. 409 410 Input Parameters: 411 ts - the TS context 412 t - current time 413 global_in - global input vector 414 dummy - optional user-defined context, as set by TSetRHSJacobian() 415 416 Output Parameters: 417 AA - Jacobian matrix 418 BB - optionally different preconditioning matrix 419 str - flag indicating matrix structure 420 421 Notes: 422 RHSJacobian computes entries for the locally owned part of the Jacobian. 423 - Currently, all PETSc parallel matrix formats are partitioned by 424 contiguous chunks of rows across the processors. 425 - Each processor needs to insert only elements that it owns 426 locally (but any non-local elements will be sent to the 427 appropriate processor during matrix assembly). 428 - Always specify global row and columns of matrix entries when 429 using MatSetValues(). 430 - Here, we set all entries for a particular row at once. 431 - Note that MatSetValues() uses 0-based row and column numbers 432 in Fortran as well as in C. 433 */ 434 PetscErrorCode RHSJacobian(TS ts,PetscReal t,Vec global_in,Mat AA,Mat BB,void *ctx) 435 { 436 AppCtx *appctx = (AppCtx*)ctx; /* user-defined application context */ 437 Vec local_in = appctx->u_local; /* local ghosted input vector */ 438 DM da = appctx->da; /* distributed array */ 439 PetscScalar v[3],sc; 440 const PetscScalar *localptr; 441 PetscErrorCode ierr; 442 PetscInt i,mstart,mend,mstarts,mends,idx[3],is; 443 444 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 445 Get ready for local Jacobian computations 446 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 447 /* 448 Scatter ghost points to local vector, using the 2-step process 449 DMGlobalToLocalBegin(), DMGlobalToLocalEnd(). 450 By placing code between these two statements, computations can be 451 done while messages are in transition. 452 */ 453 ierr = DMGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);CHKERRQ(ierr); 454 ierr = DMGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);CHKERRQ(ierr); 455 456 /* 457 Get pointer to vector data 458 */ 459 ierr = VecGetArrayRead(local_in,&localptr);CHKERRQ(ierr); 460 461 /* 462 Get starting and ending locally owned rows of the matrix 463 */ 464 ierr = MatGetOwnershipRange(BB,&mstarts,&mends);CHKERRQ(ierr); 465 mstart = mstarts; mend = mends; 466 467 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 468 Compute entries for the locally owned part of the Jacobian. 469 - Currently, all PETSc parallel matrix formats are partitioned by 470 contiguous chunks of rows across the processors. 471 - Each processor needs to insert only elements that it owns 472 locally (but any non-local elements will be sent to the 473 appropriate processor during matrix assembly). 474 - Here, we set all entries for a particular row at once. 475 - We can set matrix entries either using either 476 MatSetValuesLocal() or MatSetValues(). 477 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 478 479 /* 480 Set matrix rows corresponding to boundary data 481 */ 482 if (mstart == 0) { 483 v[0] = 0.0; 484 ierr = MatSetValues(BB,1,&mstart,1,&mstart,v,INSERT_VALUES);CHKERRQ(ierr); 485 mstart++; 486 } 487 if (mend == appctx->m) { 488 mend--; 489 v[0] = 0.0; 490 ierr = MatSetValues(BB,1,&mend,1,&mend,v,INSERT_VALUES);CHKERRQ(ierr); 491 } 492 493 /* 494 Set matrix rows corresponding to interior data. We construct the 495 matrix one row at a time. 496 */ 497 sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t)); 498 for (i=mstart; i<mend; i++) { 499 idx[0] = i-1; idx[1] = i; idx[2] = i+1; 500 is = i - mstart + 1; 501 v[0] = sc*localptr[is]; 502 v[1] = sc*(localptr[is+1] + localptr[is-1] - 4.0*localptr[is]); 503 v[2] = sc*localptr[is]; 504 ierr = MatSetValues(BB,1,&i,3,idx,v,INSERT_VALUES);CHKERRQ(ierr); 505 } 506 507 /* 508 Restore vector 509 */ 510 ierr = VecRestoreArrayRead(local_in,&localptr);CHKERRQ(ierr); 511 512 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 513 Complete the matrix assembly process and set some options 514 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 515 /* 516 Assemble matrix, using the 2-step process: 517 MatAssemblyBegin(), MatAssemblyEnd() 518 Computations can be done while messages are in transition 519 by placing code between these two statements. 520 */ 521 ierr = MatAssemblyBegin(BB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 522 ierr = MatAssemblyEnd(BB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 523 if (BB != AA) { 524 ierr = MatAssemblyBegin(AA,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 525 ierr = MatAssemblyEnd(AA,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 526 } 527 528 /* 529 Set and option to indicate that we will never add a new nonzero location 530 to the matrix. If we do, it will generate an error. 531 */ 532 ierr = MatSetOption(BB,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); 533 534 return 0; 535 } 536 537 /*TEST 538 539 test: 540 requires: !single 541 542 TEST*/ 543 544