1 2 static char help[] ="Solves a time-dependent nonlinear PDE with lower and upper bounds on the interior grid points. Uses implicit\n\ 3 timestepping. Runtime options include:\n\ 4 -M <xg>, where <xg> = number of grid points\n\ 5 -debug : Activate debugging printouts\n\ 6 -nox : Deactivate x-window graphics\n\ 7 -ul : lower bound\n\ 8 -uh : upper bound\n\n"; 9 10 /* 11 Concepts: TS^time-dependent nonlinear problems 12 Concepts: TS^Variational inequality nonlinear solver 13 Processors: n 14 */ 15 16 /* ------------------------------------------------------------------------ 17 18 This is a variation of ex2.c to solve the PDE 19 20 u * u_xx 21 u_t = --------- 22 2*(t+1)^2 23 24 with box constraints on the interior grid points 25 ul <= u(t,x) <= uh with x != 0,1 26 on the domain 0 <= x <= 1, with boundary conditions 27 u(t,0) = t + 1, u(t,1) = 2*t + 2, 28 and initial condition 29 u(0,x) = 1 + x*x. 30 31 The exact solution is: 32 u(t,x) = (1 + x*x) * (1 + t) 33 34 We use by default the backward Euler method. 35 36 ------------------------------------------------------------------------- */ 37 38 /* 39 Include "petscts.h" to use the PETSc timestepping routines. Note that 40 this file automatically includes "petscsys.h" and other lower-level 41 PETSc include files. 42 43 Include the "petscdmda.h" to allow us to use the distributed array data 44 structures to manage the parallel grid. 45 */ 46 #include <petscts.h> 47 #include <petscdm.h> 48 #include <petscdmda.h> 49 #include <petscdraw.h> 50 51 /* 52 User-defined application context - contains data needed by the 53 application-provided callback routines. 54 */ 55 typedef struct { 56 MPI_Comm comm; /* communicator */ 57 DM da; /* distributed array data structure */ 58 Vec localwork; /* local ghosted work vector */ 59 Vec u_local; /* local ghosted approximate solution vector */ 60 Vec solution; /* global exact solution vector */ 61 PetscInt m; /* total number of grid points */ 62 PetscReal h; /* mesh width: h = 1/(m-1) */ 63 PetscBool debug; /* flag (1 indicates activation of debugging printouts) */ 64 } AppCtx; 65 66 /* 67 User-defined routines, provided below. 68 */ 69 extern PetscErrorCode InitialConditions(Vec,AppCtx*); 70 extern PetscErrorCode RHSFunction(TS,PetscReal,Vec,Vec,void*); 71 extern PetscErrorCode RHSJacobian(TS,PetscReal,Vec,Mat,Mat,void*); 72 extern PetscErrorCode Monitor(TS,PetscInt,PetscReal,Vec,void*); 73 extern PetscErrorCode ExactSolution(PetscReal,Vec,AppCtx*); 74 extern PetscErrorCode SetBounds(Vec,Vec,PetscScalar,PetscScalar,AppCtx*); 75 76 int main(int argc,char **argv) 77 { 78 AppCtx appctx; /* user-defined application context */ 79 TS ts; /* timestepping context */ 80 Mat A; /* Jacobian matrix data structure */ 81 Vec u; /* approximate solution vector */ 82 Vec r; /* residual vector */ 83 PetscInt time_steps_max = 1000; /* default max timesteps */ 84 PetscErrorCode ierr; 85 PetscReal dt; 86 PetscReal time_total_max = 100.0; /* default max total time */ 87 Vec xl,xu; /* Lower and upper bounds on variables */ 88 PetscScalar ul=0.0,uh = 3.0; 89 PetscBool mymonitor; 90 PetscReal bounds[] = {1.0, 3.3}; 91 92 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 93 Initialize program and set problem parameters 94 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 95 96 ierr = PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr; 97 ierr = PetscViewerDrawSetBounds(PETSC_VIEWER_DRAW_(PETSC_COMM_WORLD),1,bounds);CHKERRQ(ierr); 98 99 appctx.comm = PETSC_COMM_WORLD; 100 appctx.m = 60; 101 ierr = PetscOptionsGetInt(NULL,NULL,"-M",&appctx.m,NULL);CHKERRQ(ierr); 102 ierr = PetscOptionsGetScalar(NULL,NULL,"-ul",&ul,NULL);CHKERRQ(ierr); 103 ierr = PetscOptionsGetScalar(NULL,NULL,"-uh",&uh,NULL);CHKERRQ(ierr); 104 ierr = PetscOptionsHasName(NULL,NULL,"-debug",&appctx.debug);CHKERRQ(ierr); 105 ierr = PetscOptionsHasName(NULL,NULL,"-mymonitor",&mymonitor);CHKERRQ(ierr); 106 appctx.h = 1.0/(appctx.m-1.0); 107 108 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 109 Create vector data structures 110 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 111 112 /* 113 Create distributed array (DMDA) to manage parallel grid and vectors 114 and to set up the ghost point communication pattern. There are M 115 total grid values spread equally among all the processors. 116 */ 117 ierr = DMDACreate1d(PETSC_COMM_WORLD,DM_BOUNDARY_NONE,appctx.m,1,1,NULL,&appctx.da);CHKERRQ(ierr); 118 ierr = DMSetFromOptions(appctx.da);CHKERRQ(ierr); 119 ierr = DMSetUp(appctx.da);CHKERRQ(ierr); 120 121 /* 122 Extract global and local vectors from DMDA; we use these to store the 123 approximate solution. Then duplicate these for remaining vectors that 124 have the same types. 125 */ 126 ierr = DMCreateGlobalVector(appctx.da,&u);CHKERRQ(ierr); 127 ierr = DMCreateLocalVector(appctx.da,&appctx.u_local);CHKERRQ(ierr); 128 129 /* 130 Create local work vector for use in evaluating right-hand-side function; 131 create global work vector for storing exact solution. 132 */ 133 ierr = VecDuplicate(appctx.u_local,&appctx.localwork);CHKERRQ(ierr); 134 ierr = VecDuplicate(u,&appctx.solution);CHKERRQ(ierr); 135 136 /* Create residual vector */ 137 ierr = VecDuplicate(u,&r);CHKERRQ(ierr); 138 /* Create lower and upper bound vectors */ 139 ierr = VecDuplicate(u,&xl);CHKERRQ(ierr); 140 ierr = VecDuplicate(u,&xu);CHKERRQ(ierr); 141 ierr = SetBounds(xl,xu,ul,uh,&appctx);CHKERRQ(ierr); 142 143 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 144 Create timestepping solver context; set callback routine for 145 right-hand-side function evaluation. 146 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 147 148 ierr = TSCreate(PETSC_COMM_WORLD,&ts);CHKERRQ(ierr); 149 ierr = TSSetProblemType(ts,TS_NONLINEAR);CHKERRQ(ierr); 150 ierr = TSSetRHSFunction(ts,r,RHSFunction,&appctx);CHKERRQ(ierr); 151 152 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 153 Set optional user-defined monitoring routine 154 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 155 156 if (mymonitor) { 157 ierr = TSMonitorSet(ts,Monitor,&appctx,NULL);CHKERRQ(ierr); 158 } 159 160 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 161 For nonlinear problems, the user can provide a Jacobian evaluation 162 routine (or use a finite differencing approximation). 163 164 Create matrix data structure; set Jacobian evaluation routine. 165 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 166 167 ierr = MatCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr); 168 ierr = MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,appctx.m,appctx.m);CHKERRQ(ierr); 169 ierr = MatSetFromOptions(A);CHKERRQ(ierr); 170 ierr = MatSetUp(A);CHKERRQ(ierr); 171 ierr = TSSetRHSJacobian(ts,A,A,RHSJacobian,&appctx);CHKERRQ(ierr); 172 173 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 174 Set solution vector and initial timestep 175 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 176 177 dt = appctx.h/2.0; 178 ierr = TSSetTimeStep(ts,dt);CHKERRQ(ierr); 179 180 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 181 Customize timestepping solver: 182 - Set the solution method to be the Backward Euler method. 183 - Set timestepping duration info 184 Then set runtime options, which can override these defaults. 185 For example, 186 -ts_max_steps <maxsteps> -ts_max_time <maxtime> 187 to override the defaults set by TSSetMaxSteps()/TSSetMaxTime(). 188 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 189 190 ierr = TSSetType(ts,TSBEULER);CHKERRQ(ierr); 191 ierr = TSSetMaxSteps(ts,time_steps_max);CHKERRQ(ierr); 192 ierr = TSSetMaxTime(ts,time_total_max);CHKERRQ(ierr); 193 ierr = TSSetExactFinalTime(ts,TS_EXACTFINALTIME_STEPOVER);CHKERRQ(ierr); 194 /* Set lower and upper bound on the solution vector for each time step */ 195 ierr = TSVISetVariableBounds(ts,xl,xu);CHKERRQ(ierr); 196 ierr = TSSetFromOptions(ts);CHKERRQ(ierr); 197 198 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 199 Solve the problem 200 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 201 202 /* 203 Evaluate initial conditions 204 */ 205 ierr = InitialConditions(u,&appctx);CHKERRQ(ierr); 206 207 /* 208 Run the timestepping solver 209 */ 210 ierr = TSSolve(ts,u);CHKERRQ(ierr); 211 212 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 213 Free work space. All PETSc objects should be destroyed when they 214 are no longer needed. 215 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 216 217 ierr = VecDestroy(&r);CHKERRQ(ierr); 218 ierr = VecDestroy(&xl);CHKERRQ(ierr); 219 ierr = VecDestroy(&xu);CHKERRQ(ierr); 220 ierr = TSDestroy(&ts);CHKERRQ(ierr); 221 ierr = VecDestroy(&u);CHKERRQ(ierr); 222 ierr = MatDestroy(&A);CHKERRQ(ierr); 223 ierr = DMDestroy(&appctx.da);CHKERRQ(ierr); 224 ierr = VecDestroy(&appctx.localwork);CHKERRQ(ierr); 225 ierr = VecDestroy(&appctx.solution);CHKERRQ(ierr); 226 ierr = VecDestroy(&appctx.u_local);CHKERRQ(ierr); 227 228 /* 229 Always call PetscFinalize() before exiting a program. This routine 230 - finalizes the PETSc libraries as well as MPI 231 - provides summary and diagnostic information if certain runtime 232 options are chosen (e.g., -log_view). 233 */ 234 ierr = PetscFinalize(); 235 return ierr; 236 } 237 /* --------------------------------------------------------------------- */ 238 /* 239 InitialConditions - Computes the solution at the initial time. 240 241 Input Parameters: 242 u - uninitialized solution vector (global) 243 appctx - user-defined application context 244 245 Output Parameter: 246 u - vector with solution at initial time (global) 247 */ 248 PetscErrorCode InitialConditions(Vec u,AppCtx *appctx) 249 { 250 PetscScalar *u_localptr,h = appctx->h,x; 251 PetscInt i,mybase,myend; 252 PetscErrorCode ierr; 253 254 /* 255 Determine starting point of each processor's range of 256 grid values. 257 */ 258 ierr = VecGetOwnershipRange(u,&mybase,&myend);CHKERRQ(ierr); 259 260 /* 261 Get a pointer to vector data. 262 - For default PETSc vectors, VecGetArray() returns a pointer to 263 the data array. Otherwise, the routine is implementation dependent. 264 - You MUST call VecRestoreArray() when you no longer need access to 265 the array. 266 - Note that the Fortran interface to VecGetArray() differs from the 267 C version. See the users manual for details. 268 */ 269 ierr = VecGetArray(u,&u_localptr);CHKERRQ(ierr); 270 271 /* 272 We initialize the solution array by simply writing the solution 273 directly into the array locations. Alternatively, we could use 274 VecSetValues() or VecSetValuesLocal(). 275 */ 276 for (i=mybase; i<myend; i++) { 277 x = h*(PetscReal)i; /* current location in global grid */ 278 u_localptr[i-mybase] = 1.0 + x*x; 279 } 280 281 /* 282 Restore vector 283 */ 284 ierr = VecRestoreArray(u,&u_localptr);CHKERRQ(ierr); 285 286 /* 287 Print debugging information if desired 288 */ 289 if (appctx->debug) { 290 ierr = PetscPrintf(appctx->comm,"initial guess vector\n");CHKERRQ(ierr); 291 ierr = VecView(u,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 292 } 293 294 return 0; 295 } 296 297 /* --------------------------------------------------------------------- */ 298 /* 299 SetBounds - Sets the lower and uper bounds on the interior points 300 301 Input parameters: 302 xl - vector of lower bounds 303 xu - vector of upper bounds 304 ul - constant lower bound for all variables 305 uh - constant upper bound for all variables 306 appctx - Application context 307 */ 308 PetscErrorCode SetBounds(Vec xl, Vec xu, PetscScalar ul, PetscScalar uh,AppCtx *appctx) 309 { 310 PetscErrorCode ierr; 311 PetscScalar *l,*u; 312 PetscMPIInt rank,size; 313 PetscInt localsize; 314 315 PetscFunctionBeginUser; 316 ierr = VecSet(xl,ul);CHKERRQ(ierr); 317 ierr = VecSet(xu,uh);CHKERRQ(ierr); 318 ierr = VecGetLocalSize(xl,&localsize);CHKERRQ(ierr); 319 ierr = VecGetArray(xl,&l);CHKERRQ(ierr); 320 ierr = VecGetArray(xu,&u);CHKERRQ(ierr); 321 322 ierr = MPI_Comm_rank(appctx->comm,&rank);CHKERRMPI(ierr); 323 ierr = MPI_Comm_size(appctx->comm,&size);CHKERRMPI(ierr); 324 if (!rank) { 325 l[0] = -PETSC_INFINITY; 326 u[0] = PETSC_INFINITY; 327 } 328 if (rank == size-1) { 329 l[localsize-1] = -PETSC_INFINITY; 330 u[localsize-1] = PETSC_INFINITY; 331 } 332 ierr = VecRestoreArray(xl,&l);CHKERRQ(ierr); 333 ierr = VecRestoreArray(xu,&u);CHKERRQ(ierr); 334 PetscFunctionReturn(0); 335 } 336 337 /* --------------------------------------------------------------------- */ 338 /* 339 ExactSolution - Computes the exact solution at a given time. 340 341 Input Parameters: 342 t - current time 343 solution - vector in which exact solution will be computed 344 appctx - user-defined application context 345 346 Output Parameter: 347 solution - vector with the newly computed exact solution 348 */ 349 PetscErrorCode ExactSolution(PetscReal t,Vec solution,AppCtx *appctx) 350 { 351 PetscScalar *s_localptr,h = appctx->h,x; 352 PetscInt i,mybase,myend; 353 PetscErrorCode ierr; 354 355 /* 356 Determine starting and ending points of each processor's 357 range of grid values 358 */ 359 ierr = VecGetOwnershipRange(solution,&mybase,&myend);CHKERRQ(ierr); 360 361 /* 362 Get a pointer to vector data. 363 */ 364 ierr = VecGetArray(solution,&s_localptr);CHKERRQ(ierr); 365 366 /* 367 Simply write the solution directly into the array locations. 368 Alternatively, we could use VecSetValues() or VecSetValuesLocal(). 369 */ 370 for (i=mybase; i<myend; i++) { 371 x = h*(PetscReal)i; 372 s_localptr[i-mybase] = (t + 1.0)*(1.0 + x*x); 373 } 374 375 /* 376 Restore vector 377 */ 378 ierr = VecRestoreArray(solution,&s_localptr);CHKERRQ(ierr); 379 return 0; 380 } 381 /* --------------------------------------------------------------------- */ 382 /* 383 Monitor - User-provided routine to monitor the solution computed at 384 each timestep. This example plots the solution and computes the 385 error in two different norms. 386 387 Input Parameters: 388 ts - the timestep context 389 step - the count of the current step (with 0 meaning the 390 initial condition) 391 time - the current time 392 u - the solution at this timestep 393 ctx - the user-provided context for this monitoring routine. 394 In this case we use the application context which contains 395 information about the problem size, workspace and the exact 396 solution. 397 */ 398 PetscErrorCode Monitor(TS ts,PetscInt step,PetscReal time,Vec u,void *ctx) 399 { 400 AppCtx *appctx = (AppCtx*) ctx; /* user-defined application context */ 401 PetscErrorCode ierr; 402 PetscReal en2,en2s,enmax; 403 PetscDraw draw; 404 405 /* 406 We use the default X windows viewer 407 PETSC_VIEWER_DRAW_(appctx->comm) 408 that is associated with the current communicator. This saves 409 the effort of calling PetscViewerDrawOpen() to create the window. 410 Note that if we wished to plot several items in separate windows we 411 would create each viewer with PetscViewerDrawOpen() and store them in 412 the application context, appctx. 413 414 PetscReal buffering makes graphics look better. 415 */ 416 ierr = PetscViewerDrawGetDraw(PETSC_VIEWER_DRAW_(appctx->comm),0,&draw);CHKERRQ(ierr); 417 ierr = PetscDrawSetDoubleBuffer(draw);CHKERRQ(ierr); 418 ierr = VecView(u,PETSC_VIEWER_DRAW_(appctx->comm));CHKERRQ(ierr); 419 420 /* 421 Compute the exact solution at this timestep 422 */ 423 ierr = ExactSolution(time,appctx->solution,appctx);CHKERRQ(ierr); 424 425 /* 426 Print debugging information if desired 427 */ 428 if (appctx->debug) { 429 ierr = PetscPrintf(appctx->comm,"Computed solution vector\n");CHKERRQ(ierr); 430 ierr = VecView(u,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 431 ierr = PetscPrintf(appctx->comm,"Exact solution vector\n");CHKERRQ(ierr); 432 ierr = VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 433 } 434 435 /* 436 Compute the 2-norm and max-norm of the error 437 */ 438 ierr = VecAXPY(appctx->solution,-1.0,u);CHKERRQ(ierr); 439 ierr = VecNorm(appctx->solution,NORM_2,&en2);CHKERRQ(ierr); 440 en2s = PetscSqrtReal(appctx->h)*en2; /* scale the 2-norm by the grid spacing */ 441 ierr = VecNorm(appctx->solution,NORM_MAX,&enmax);CHKERRQ(ierr); 442 443 /* 444 PetscPrintf() causes only the first processor in this 445 communicator to print the timestep information. 446 */ 447 ierr = PetscPrintf(appctx->comm,"Timestep %D: time = %g,2-norm error = %g, max norm error = %g\n",step,(double)time,(double)en2s,(double)enmax);CHKERRQ(ierr); 448 449 /* 450 Print debugging information if desired 451 */ 452 /* if (appctx->debug) { 453 ierr = PetscPrintf(appctx->comm,"Error vector\n");CHKERRQ(ierr); 454 ierr = VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 455 } */ 456 return 0; 457 } 458 /* --------------------------------------------------------------------- */ 459 /* 460 RHSFunction - User-provided routine that evalues the right-hand-side 461 function of the ODE. This routine is set in the main program by 462 calling TSSetRHSFunction(). We compute: 463 global_out = F(global_in) 464 465 Input Parameters: 466 ts - timesteping context 467 t - current time 468 global_in - vector containing the current iterate 469 ctx - (optional) user-provided context for function evaluation. 470 In this case we use the appctx defined above. 471 472 Output Parameter: 473 global_out - vector containing the newly evaluated function 474 */ 475 PetscErrorCode RHSFunction(TS ts,PetscReal t,Vec global_in,Vec global_out,void *ctx) 476 { 477 AppCtx *appctx = (AppCtx*) ctx; /* user-defined application context */ 478 DM da = appctx->da; /* distributed array */ 479 Vec local_in = appctx->u_local; /* local ghosted input vector */ 480 Vec localwork = appctx->localwork; /* local ghosted work vector */ 481 PetscErrorCode ierr; 482 PetscInt i,localsize; 483 PetscMPIInt rank,size; 484 PetscScalar *copyptr,sc; 485 const PetscScalar *localptr; 486 487 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 488 Get ready for local function computations 489 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 490 /* 491 Scatter ghost points to local vector, using the 2-step process 492 DMGlobalToLocalBegin(), DMGlobalToLocalEnd(). 493 By placing code between these two statements, computations can be 494 done while messages are in transition. 495 */ 496 ierr = DMGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);CHKERRQ(ierr); 497 ierr = DMGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);CHKERRQ(ierr); 498 499 /* 500 Access directly the values in our local INPUT work array 501 */ 502 ierr = VecGetArrayRead(local_in,&localptr);CHKERRQ(ierr); 503 504 /* 505 Access directly the values in our local OUTPUT work array 506 */ 507 ierr = VecGetArray(localwork,©ptr);CHKERRQ(ierr); 508 509 sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t)); 510 511 /* 512 Evaluate our function on the nodes owned by this processor 513 */ 514 ierr = VecGetLocalSize(local_in,&localsize);CHKERRQ(ierr); 515 516 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 517 Compute entries for the locally owned part 518 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 519 520 /* 521 Handle boundary conditions: This is done by using the boundary condition 522 u(t,boundary) = g(t,boundary) 523 for some function g. Now take the derivative with respect to t to obtain 524 u_{t}(t,boundary) = g_{t}(t,boundary) 525 526 In our case, u(t,0) = t + 1, so that u_{t}(t,0) = 1 527 and u(t,1) = 2t+ 2, so that u_{t}(t,1) = 2 528 */ 529 ierr = MPI_Comm_rank(appctx->comm,&rank);CHKERRMPI(ierr); 530 ierr = MPI_Comm_size(appctx->comm,&size);CHKERRMPI(ierr); 531 if (!rank) copyptr[0] = 1.0; 532 if (rank == size-1) copyptr[localsize-1] = (t < .5) ? 2.0 : 0.0; 533 534 /* 535 Handle the interior nodes where the PDE is replace by finite 536 difference operators. 537 */ 538 for (i=1; i<localsize-1; i++) copyptr[i] = localptr[i] * sc * (localptr[i+1] + localptr[i-1] - 2.0*localptr[i]); 539 540 /* 541 Restore vectors 542 */ 543 ierr = VecRestoreArrayRead(local_in,&localptr);CHKERRQ(ierr); 544 ierr = VecRestoreArray(localwork,©ptr);CHKERRQ(ierr); 545 546 /* 547 Insert values from the local OUTPUT vector into the global 548 output vector 549 */ 550 ierr = DMLocalToGlobalBegin(da,localwork,INSERT_VALUES,global_out);CHKERRQ(ierr); 551 ierr = DMLocalToGlobalEnd(da,localwork,INSERT_VALUES,global_out);CHKERRQ(ierr); 552 553 /* Print debugging information if desired */ 554 /* if (appctx->debug) { 555 ierr = PetscPrintf(appctx->comm,"RHS function vector\n");CHKERRQ(ierr); 556 ierr = VecView(global_out,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 557 } */ 558 559 return 0; 560 } 561 /* --------------------------------------------------------------------- */ 562 /* 563 RHSJacobian - User-provided routine to compute the Jacobian of 564 the nonlinear right-hand-side function of the ODE. 565 566 Input Parameters: 567 ts - the TS context 568 t - current time 569 global_in - global input vector 570 dummy - optional user-defined context, as set by TSetRHSJacobian() 571 572 Output Parameters: 573 AA - Jacobian matrix 574 BB - optionally different preconditioning matrix 575 str - flag indicating matrix structure 576 577 Notes: 578 RHSJacobian computes entries for the locally owned part of the Jacobian. 579 - Currently, all PETSc parallel matrix formats are partitioned by 580 contiguous chunks of rows across the processors. 581 - Each processor needs to insert only elements that it owns 582 locally (but any non-local elements will be sent to the 583 appropriate processor during matrix assembly). 584 - Always specify global row and columns of matrix entries when 585 using MatSetValues(). 586 - Here, we set all entries for a particular row at once. 587 - Note that MatSetValues() uses 0-based row and column numbers 588 in Fortran as well as in C. 589 */ 590 PetscErrorCode RHSJacobian(TS ts,PetscReal t,Vec global_in,Mat AA,Mat B,void *ctx) 591 { 592 AppCtx *appctx = (AppCtx*)ctx; /* user-defined application context */ 593 Vec local_in = appctx->u_local; /* local ghosted input vector */ 594 DM da = appctx->da; /* distributed array */ 595 PetscScalar v[3],sc; 596 const PetscScalar *localptr; 597 PetscErrorCode ierr; 598 PetscInt i,mstart,mend,mstarts,mends,idx[3],is; 599 600 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 601 Get ready for local Jacobian computations 602 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 603 /* 604 Scatter ghost points to local vector, using the 2-step process 605 DMGlobalToLocalBegin(), DMGlobalToLocalEnd(). 606 By placing code between these two statements, computations can be 607 done while messages are in transition. 608 */ 609 ierr = DMGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);CHKERRQ(ierr); 610 ierr = DMGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);CHKERRQ(ierr); 611 612 /* 613 Get pointer to vector data 614 */ 615 ierr = VecGetArrayRead(local_in,&localptr);CHKERRQ(ierr); 616 617 /* 618 Get starting and ending locally owned rows of the matrix 619 */ 620 ierr = MatGetOwnershipRange(B,&mstarts,&mends);CHKERRQ(ierr); 621 mstart = mstarts; mend = mends; 622 623 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 624 Compute entries for the locally owned part of the Jacobian. 625 - Currently, all PETSc parallel matrix formats are partitioned by 626 contiguous chunks of rows across the processors. 627 - Each processor needs to insert only elements that it owns 628 locally (but any non-local elements will be sent to the 629 appropriate processor during matrix assembly). 630 - Here, we set all entries for a particular row at once. 631 - We can set matrix entries either using either 632 MatSetValuesLocal() or MatSetValues(). 633 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 634 635 /* 636 Set matrix rows corresponding to boundary data 637 */ 638 if (mstart == 0) { 639 v[0] = 0.0; 640 ierr = MatSetValues(B,1,&mstart,1,&mstart,v,INSERT_VALUES);CHKERRQ(ierr); 641 mstart++; 642 } 643 if (mend == appctx->m) { 644 mend--; 645 v[0] = 0.0; 646 ierr = MatSetValues(B,1,&mend,1,&mend,v,INSERT_VALUES);CHKERRQ(ierr); 647 } 648 649 /* 650 Set matrix rows corresponding to interior data. We construct the 651 matrix one row at a time. 652 */ 653 sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t)); 654 for (i=mstart; i<mend; i++) { 655 idx[0] = i-1; idx[1] = i; idx[2] = i+1; 656 is = i - mstart + 1; 657 v[0] = sc*localptr[is]; 658 v[1] = sc*(localptr[is+1] + localptr[is-1] - 4.0*localptr[is]); 659 v[2] = sc*localptr[is]; 660 ierr = MatSetValues(B,1,&i,3,idx,v,INSERT_VALUES);CHKERRQ(ierr); 661 } 662 663 /* 664 Restore vector 665 */ 666 ierr = VecRestoreArrayRead(local_in,&localptr);CHKERRQ(ierr); 667 668 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 669 Complete the matrix assembly process and set some options 670 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ 671 /* 672 Assemble matrix, using the 2-step process: 673 MatAssemblyBegin(), MatAssemblyEnd() 674 Computations can be done while messages are in transition 675 by placing code between these two statements. 676 */ 677 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 678 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 679 680 /* 681 Set and option to indicate that we will never add a new nonzero location 682 to the matrix. If we do, it will generate an error. 683 */ 684 ierr = MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); 685 686 return 0; 687 } 688 689 /*TEST 690 691 test: 692 args: -snes_type vinewtonrsls -ts_type glee -mymonitor -ts_max_steps 10 -nox 693 requires: !single 694 695 TEST*/ 696