xref: /petsc/src/ts/tutorials/autodiff/adr_ex5adj.cxx (revision ccb4e88a40f0b86eaeca07ff64c64e4de2fae686)
1 static char help[] = "Demonstrates adjoint sensitivity analysis for Reaction-Diffusion Equations.\n";
2 
3 /*
4    Concepts: TS^time-dependent nonlinear problems
5    Concepts: Automatic differentiation using ADOL-C
6 */
7 /*
8    REQUIRES configuration of PETSc with option --download-adolc.
9 
10    For documentation on ADOL-C, see
11      $PETSC_ARCH/externalpackages/ADOL-C-2.6.0/ADOL-C/doc/adolc-manual.pdf
12 
13    REQUIRES configuration of PETSc with option --download-colpack
14 
15    For documentation on ColPack, see
16      $PETSC_ARCH/externalpackages/git.colpack/README.md
17 */
18 /* ------------------------------------------------------------------------
19   See ../advection-diffusion-reaction/ex5 for a description of the problem
20   ------------------------------------------------------------------------- */
21 
22 /*
23   Runtime options:
24 
25     Solver:
26       -forwardonly       - Run the forward simulation without adjoint.
27       -implicitform      - Provide IFunction and IJacobian to TS, if not set, RHSFunction and RHSJacobian will be used.
28       -aijpc             - Set the preconditioner matrix to be aij (the Jacobian matrix can be of a different type such as ELL).
29 
30     Jacobian generation:
31       -jacobian_by_hand  - Use the hand-coded Jacobian of ex5.c, rather than generating it automatically.
32       -no_annotation     - Do not annotate ADOL-C active variables. (Should be used alongside -jacobian_by_hand.)
33       -adolc_sparse      - Calculate Jacobian in compressed format, using ADOL-C sparse functionality.
34       -adolc_sparse_view - Print sparsity pattern used by -adolc_sparse option.
35  */
36 /*
37   NOTE: If -adolc_sparse option is used, at least four processors should be used, so that no processor owns boundaries which are
38         identified by the periodic boundary conditions. The number of grid points in both x- and y-directions should be multiples
39         of 5, in order for the 5-point stencil to be cleanly parallelised.
40 */
41 
42 #include <petscdmda.h>
43 #include <petscts.h>
44 #include "adolc-utils/drivers.cxx"
45 #include <adolc/adolc.h>
46 
47 /* (Passive) field for the two variables */
48 typedef struct {
49   PetscScalar u,v;
50 } Field;
51 
52 /* Active field for the two variables */
53 typedef struct {
54   adouble u,v;
55 } AField;
56 
57 /* Application context */
58 typedef struct {
59   PetscReal D1,D2,gamma,kappa;
60   AField    **u_a,**f_a;
61   PetscBool aijpc;
62   AdolcCtx  *adctx; /* Automatic differentation support */
63 } AppCtx;
64 
65 extern PetscErrorCode InitialConditions(DM da,Vec U);
66 extern PetscErrorCode InitializeLambda(DM da,Vec lambda,PetscReal x,PetscReal y);
67 extern PetscErrorCode IFunctionLocalPassive(DMDALocalInfo *info,PetscReal t,Field**u,Field**udot,Field**f,void *ptr);
68 extern PetscErrorCode IFunctionActive(TS ts,PetscReal ftime,Vec U,Vec Udot,Vec F,void *ptr);
69 extern PetscErrorCode RHSFunctionActive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr);
70 extern PetscErrorCode RHSFunctionPassive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr);
71 extern PetscErrorCode IJacobianByHand(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx);
72 extern PetscErrorCode IJacobianAdolc(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx);
73 extern PetscErrorCode RHSJacobianByHand(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx);
74 extern PetscErrorCode RHSJacobianAdolc(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx);
75 
76 int main(int argc,char **argv)
77 {
78   TS             ts;
79   Vec            x,r,xdot;
80   PetscErrorCode ierr;
81   DM             da;
82   AppCtx         appctx;
83   AdolcCtx       *adctx;
84   Vec            lambda[1];
85   PetscBool      forwardonly=PETSC_FALSE,implicitform=PETSC_FALSE,byhand=PETSC_FALSE;
86   PetscInt       gxm,gym,i,dofs = 2,ctrl[3] = {0,0,0};
87   PetscScalar    **Seed = NULL,**Rec = NULL,*u_vec;
88   unsigned int   **JP = NULL;
89   ISColoring     iscoloring;
90 
91   ierr = PetscInitialize(&argc,&argv,NULL,help);if (ierr) return ierr;
92   ierr = PetscNew(&adctx);CHKERRQ(ierr);
93   ierr = PetscOptionsGetBool(NULL,NULL,"-forwardonly",&forwardonly,NULL);CHKERRQ(ierr);
94   ierr = PetscOptionsGetBool(NULL,NULL,"-implicitform",&implicitform,NULL);CHKERRQ(ierr);
95   appctx.aijpc = PETSC_FALSE,adctx->no_an = PETSC_FALSE,adctx->sparse = PETSC_FALSE,adctx->sparse_view = PETSC_FALSE;adctx->sparse_view_done = PETSC_FALSE;
96   ierr = PetscOptionsGetBool(NULL,NULL,"-aijpc",&appctx.aijpc,NULL);CHKERRQ(ierr);
97   ierr = PetscOptionsGetBool(NULL,NULL,"-no_annotation",&adctx->no_an,NULL);CHKERRQ(ierr);
98   ierr = PetscOptionsGetBool(NULL,NULL,"-jacobian_by_hand",&byhand,NULL);CHKERRQ(ierr);
99   ierr = PetscOptionsGetBool(NULL,NULL,"-adolc_sparse",&adctx->sparse,NULL);CHKERRQ(ierr);
100   ierr = PetscOptionsGetBool(NULL,NULL,"-adolc_sparse_view",&adctx->sparse_view,NULL);CHKERRQ(ierr);
101   appctx.D1    = 8.0e-5;
102   appctx.D2    = 4.0e-5;
103   appctx.gamma = .024;
104   appctx.kappa = .06;
105   appctx.adctx = adctx;
106 
107   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
108      Create distributed array (DMDA) to manage parallel grid and vectors
109   - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
110   ierr = DMDACreate2d(PETSC_COMM_WORLD,DM_BOUNDARY_PERIODIC,DM_BOUNDARY_PERIODIC,DMDA_STENCIL_STAR,65,65,PETSC_DECIDE,PETSC_DECIDE,2,1,NULL,NULL,&da);CHKERRQ(ierr);
111   ierr = DMSetFromOptions(da);CHKERRQ(ierr);
112   ierr = DMSetUp(da);CHKERRQ(ierr);
113   ierr = DMDASetFieldName(da,0,"u");CHKERRQ(ierr);
114   ierr = DMDASetFieldName(da,1,"v");CHKERRQ(ierr);
115 
116   /*  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
117      Extract global vectors from DMDA; then duplicate for remaining
118      vectors that are the same types
119    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
120   ierr = DMCreateGlobalVector(da,&x);CHKERRQ(ierr);
121   ierr = VecDuplicate(x,&r);CHKERRQ(ierr);
122   ierr = VecDuplicate(x,&xdot);CHKERRQ(ierr);
123 
124   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
125      Create timestepping solver context
126      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
127   ierr = TSCreate(PETSC_COMM_WORLD,&ts);CHKERRQ(ierr);
128   ierr = TSSetType(ts,TSCN);CHKERRQ(ierr);
129   ierr = TSSetDM(ts,da);CHKERRQ(ierr);
130   ierr = TSSetProblemType(ts,TS_NONLINEAR);CHKERRQ(ierr);
131   if (!implicitform) {
132     ierr = TSSetRHSFunction(ts,NULL,RHSFunctionPassive,&appctx);CHKERRQ(ierr);
133   } else {
134     ierr = DMDATSSetIFunctionLocal(da,INSERT_VALUES,(DMDATSIFunctionLocal)IFunctionLocalPassive,&appctx);CHKERRQ(ierr);
135   }
136 
137   if (!adctx->no_an) {
138     ierr = DMDAGetGhostCorners(da,NULL,NULL,NULL,&gxm,&gym,NULL);CHKERRQ(ierr);
139     adctx->m = dofs*gxm*gym;adctx->n = dofs*gxm*gym; /* Number of dependent and independent variables */
140 
141     /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
142        Trace function(s) just once
143      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
144     if (!implicitform) {
145       ierr = RHSFunctionActive(ts,1.0,x,r,&appctx);CHKERRQ(ierr);
146     } else {
147       ierr = IFunctionActive(ts,1.0,x,xdot,r,&appctx);CHKERRQ(ierr);
148     }
149 
150     /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
151       In the case where ADOL-C generates the Jacobian in compressed format,
152       seed and recovery matrices are required. Since the sparsity structure
153       of the Jacobian does not change over the course of the time
154       integration, we can save computational effort by only generating
155       these objects once.
156        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
157     if (adctx->sparse) {
158       /*
159          Generate sparsity pattern
160 
161          One way to do this is to use the Jacobian pattern driver `jac_pat`
162          provided by ColPack. Otherwise, it is the responsibility of the user
163          to obtain the sparsity pattern.
164       */
165       ierr = PetscMalloc1(adctx->n,&u_vec);CHKERRQ(ierr);
166       JP = (unsigned int **) malloc(adctx->m*sizeof(unsigned int*));
167       jac_pat(1,adctx->m,adctx->n,u_vec,JP,ctrl);
168       if (adctx->sparse_view) {
169         ierr = PrintSparsity(MPI_COMM_WORLD,adctx->m,JP);CHKERRQ(ierr);
170       }
171 
172       /*
173         Extract a column colouring
174 
175         For problems using DMDA, colourings can be extracted directly, as
176         follows. There are also ColPack drivers available. Otherwise, it is the
177         responsibility of the user to obtain a suitable colouring.
178       */
179       ierr = DMCreateColoring(da,IS_COLORING_LOCAL,&iscoloring);CHKERRQ(ierr);
180       ierr = ISColoringGetIS(iscoloring,PETSC_USE_POINTER,&adctx->p,NULL);CHKERRQ(ierr);
181 
182       /* Generate seed matrix to propagate through the forward mode of AD */
183       ierr = AdolcMalloc2(adctx->n,adctx->p,&Seed);CHKERRQ(ierr);
184       ierr = GenerateSeedMatrix(iscoloring,Seed);CHKERRQ(ierr);
185       ierr = ISColoringDestroy(&iscoloring);CHKERRQ(ierr);
186 
187       /*
188         Generate recovery matrix, which is used to recover the Jacobian from
189         compressed format */
190       ierr = AdolcMalloc2(adctx->m,adctx->p,&Rec);CHKERRQ(ierr);
191       ierr = GetRecoveryMatrix(Seed,JP,adctx->m,adctx->p,Rec);CHKERRQ(ierr);
192 
193       /* Store results and free workspace */
194       adctx->Rec = Rec;
195       for (i=0;i<adctx->m;i++)
196         free(JP[i]);
197       free(JP);
198       ierr = PetscFree(u_vec);CHKERRQ(ierr);
199 
200     } else {
201 
202       /*
203         In 'full' Jacobian mode, propagate an identity matrix through the
204         forward mode of AD.
205       */
206       adctx->p = adctx->n;
207       ierr = AdolcMalloc2(adctx->n,adctx->p,&Seed);CHKERRQ(ierr);
208       ierr = Identity(adctx->n,Seed);CHKERRQ(ierr);
209     }
210     adctx->Seed = Seed;
211   }
212 
213   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
214      Set Jacobian
215    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
216   if (!implicitform) {
217     if (!byhand) {
218       ierr = TSSetRHSJacobian(ts,NULL,NULL,RHSJacobianAdolc,&appctx);CHKERRQ(ierr);
219     } else {
220       ierr = TSSetRHSJacobian(ts,NULL,NULL,RHSJacobianByHand,&appctx);CHKERRQ(ierr);
221     }
222   } else {
223     if (appctx.aijpc) {
224       Mat A,B;
225 
226       ierr = DMSetMatType(da,MATSELL);CHKERRQ(ierr);
227       ierr = DMCreateMatrix(da,&A);CHKERRQ(ierr);
228       ierr = MatConvert(A,MATAIJ,MAT_INITIAL_MATRIX,&B);CHKERRQ(ierr);
229       /* FIXME do we need to change viewer to display matrix in natural ordering as DMCreateMatrix_DA does? */
230       if (!byhand) {
231         ierr = TSSetIJacobian(ts,A,B,IJacobianAdolc,&appctx);CHKERRQ(ierr);
232       } else {
233         ierr = TSSetIJacobian(ts,A,B,IJacobianByHand,&appctx);CHKERRQ(ierr);
234       }
235       ierr = MatDestroy(&A);CHKERRQ(ierr);
236       ierr = MatDestroy(&B);CHKERRQ(ierr);
237     } else {
238       if (!byhand) {
239         ierr = TSSetIJacobian(ts,NULL,NULL,IJacobianAdolc,&appctx);CHKERRQ(ierr);
240       } else {
241         ierr = TSSetIJacobian(ts,NULL,NULL,IJacobianByHand,&appctx);CHKERRQ(ierr);
242       }
243     }
244   }
245 
246   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
247      Set initial conditions
248    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
249   ierr = InitialConditions(da,x);CHKERRQ(ierr);
250   ierr = TSSetSolution(ts,x);CHKERRQ(ierr);
251 
252   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
253     Have the TS save its trajectory so that TSAdjointSolve() may be used
254     and set solver options
255    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
256   if (!forwardonly) {
257     ierr = TSSetSaveTrajectory(ts);CHKERRQ(ierr);
258     ierr = TSSetMaxTime(ts,200.0);CHKERRQ(ierr);
259     ierr = TSSetTimeStep(ts,0.5);CHKERRQ(ierr);
260   } else {
261     ierr = TSSetMaxTime(ts,2000.0);CHKERRQ(ierr);
262     ierr = TSSetTimeStep(ts,10);CHKERRQ(ierr);
263   }
264   ierr = TSSetExactFinalTime(ts,TS_EXACTFINALTIME_STEPOVER);CHKERRQ(ierr);
265   ierr = TSSetFromOptions(ts);CHKERRQ(ierr);
266 
267   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
268      Solve ODE system
269      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
270   ierr = TSSolve(ts,x);CHKERRQ(ierr);
271   if (!forwardonly) {
272     /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
273        Start the Adjoint model
274        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
275     ierr = VecDuplicate(x,&lambda[0]);CHKERRQ(ierr);
276     /*   Reset initial conditions for the adjoint integration */
277     ierr = InitializeLambda(da,lambda[0],0.5,0.5);CHKERRQ(ierr);
278     ierr = TSSetCostGradients(ts,1,lambda,NULL);CHKERRQ(ierr);
279     ierr = TSAdjointSolve(ts);CHKERRQ(ierr);
280     ierr = VecDestroy(&lambda[0]);CHKERRQ(ierr);
281   }
282 
283   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
284      Free work space.
285    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
286   ierr = VecDestroy(&xdot);CHKERRQ(ierr);
287   ierr = VecDestroy(&r);CHKERRQ(ierr);
288   ierr = VecDestroy(&x);CHKERRQ(ierr);
289   ierr = TSDestroy(&ts);CHKERRQ(ierr);
290   if (!adctx->no_an) {
291     if (adctx->sparse) {ierr = AdolcFree2(Rec);CHKERRQ(ierr);}
292     ierr = AdolcFree2(Seed);CHKERRQ(ierr);
293   }
294   ierr = DMDestroy(&da);CHKERRQ(ierr);
295   ierr = PetscFree(adctx);CHKERRQ(ierr);
296   ierr = PetscFinalize();
297   return ierr;
298 }
299 
300 PetscErrorCode InitialConditions(DM da,Vec U)
301 {
302   PetscErrorCode ierr;
303   PetscInt       i,j,xs,ys,xm,ym,Mx,My;
304   Field          **u;
305   PetscReal      hx,hy,x,y;
306 
307   PetscFunctionBegin;
308   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
309 
310   hx = 2.5/(PetscReal)Mx;
311   hy = 2.5/(PetscReal)My;
312 
313   /*
314      Get pointers to vector data
315   */
316   ierr = DMDAVecGetArray(da,U,&u);CHKERRQ(ierr);
317 
318   /*
319      Get local grid boundaries
320   */
321   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
322 
323   /*
324      Compute function over the locally owned part of the grid
325   */
326   for (j=ys; j<ys+ym; j++) {
327     y = j*hy;
328     for (i=xs; i<xs+xm; i++) {
329       x = i*hx;
330       if (PetscGTE(x,1.0) && PetscLTE(x,1.5) && PetscGTE(y,1.0) && PetscLTE(y,1.5)) u[j][i].v = PetscPowReal(PetscSinReal(4.0*PETSC_PI*x),2.0)*PetscPowReal(PetscSinReal(4.0*PETSC_PI*y),2.0)/4.0;
331       else u[j][i].v = 0.0;
332 
333       u[j][i].u = 1.0 - 2.0*u[j][i].v;
334     }
335   }
336 
337   /*
338      Restore vectors
339   */
340   ierr = DMDAVecRestoreArray(da,U,&u);CHKERRQ(ierr);
341   PetscFunctionReturn(0);
342 }
343 
344 PetscErrorCode InitializeLambda(DM da,Vec lambda,PetscReal x,PetscReal y)
345 {
346    PetscInt i,j,Mx,My,xs,ys,xm,ym;
347    PetscErrorCode ierr;
348    Field **l;
349    PetscFunctionBegin;
350 
351    ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
352    /* locate the global i index for x and j index for y */
353    i = (PetscInt)(x*(Mx-1));
354    j = (PetscInt)(y*(My-1));
355    ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
356 
357    if (xs <= i && i < xs+xm && ys <= j && j < ys+ym) {
358      /* the i,j vertex is on this process */
359      ierr = DMDAVecGetArray(da,lambda,&l);CHKERRQ(ierr);
360      l[j][i].u = 1.0;
361      l[j][i].v = 1.0;
362      ierr = DMDAVecRestoreArray(da,lambda,&l);CHKERRQ(ierr);
363    }
364    PetscFunctionReturn(0);
365 }
366 
367 PetscErrorCode IFunctionLocalPassive(DMDALocalInfo *info,PetscReal t,Field**u,Field**udot,Field**f,void *ptr)
368 {
369   AppCtx         *appctx = (AppCtx*)ptr;
370   PetscInt       i,j,xs,ys,xm,ym;
371   PetscReal      hx,hy,sx,sy;
372   PetscScalar    uc,uxx,uyy,vc,vxx,vyy;
373   PetscErrorCode ierr;
374 
375   PetscFunctionBegin;
376   hx = 2.50/(PetscReal)(info->mx); sx = 1.0/(hx*hx);
377   hy = 2.50/(PetscReal)(info->my); sy = 1.0/(hy*hy);
378 
379   /* Get local grid boundaries */
380   xs = info->xs; xm = info->xm; ys = info->ys; ym = info->ym;
381 
382   /* Compute function over the locally owned part of the grid */
383   for (j=ys; j<ys+ym; j++) {
384     for (i=xs; i<xs+xm; i++) {
385       uc        = u[j][i].u;
386       uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
387       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
388       vc        = u[j][i].v;
389       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
390       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
391       f[j][i].u = udot[j][i].u - appctx->D1*(uxx + uyy) + uc*vc*vc - appctx->gamma*(1.0 - uc);
392       f[j][i].v = udot[j][i].v - appctx->D2*(vxx + vyy) - uc*vc*vc + (appctx->gamma + appctx->kappa)*vc;
393     }
394   }
395   ierr = PetscLogFlops(16.0*xm*ym);CHKERRQ(ierr);
396   PetscFunctionReturn(0);
397 }
398 
399 PetscErrorCode IFunctionActive(TS ts,PetscReal ftime,Vec U,Vec Udot,Vec F,void *ptr)
400 {
401   PetscErrorCode ierr;
402   AppCtx         *appctx = (AppCtx*)ptr;
403   DM             da;
404   DMDALocalInfo  info;
405   Field          **u,**f,**udot;
406   Vec            localU;
407   PetscInt       i,j,xs,ys,xm,ym,gxs,gys,gxm,gym;
408   PetscReal      hx,hy,sx,sy;
409   adouble        uc,uxx,uyy,vc,vxx,vyy;
410   AField         **f_a,*f_c,**u_a,*u_c;
411   PetscScalar    dummy;
412 
413   PetscFunctionBegin;
414 
415   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
416   ierr = DMDAGetLocalInfo(da,&info);CHKERRQ(ierr);
417   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
418   hx = 2.50/(PetscReal)(info.mx); sx = 1.0/(hx*hx);
419   hy = 2.50/(PetscReal)(info.my); sy = 1.0/(hy*hy);
420   xs = info.xs; xm = info.xm; gxs = info.gxs; gxm = info.gxm;
421   ys = info.ys; ym = info.ym; gys = info.gys; gym = info.gym;
422 
423   /*
424      Scatter ghost points to local vector,using the 2-step process
425         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
426      By placing code between these two statements, computations can be
427      done while messages are in transition.
428   */
429   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
430   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
431 
432   /*
433      Get pointers to vector data
434   */
435   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
436   ierr = DMDAVecGetArray(da,F,&f);CHKERRQ(ierr);
437   ierr = DMDAVecGetArrayRead(da,Udot,&udot);CHKERRQ(ierr);
438 
439   /*
440     Create contiguous 1-arrays of AFields
441 
442     NOTE: Memory for ADOL-C active variables (such as adouble and AField)
443           cannot be allocated using PetscMalloc, as this does not call the
444           relevant class constructor. Instead, we use the C++ keyword `new`.
445   */
446   u_c = new AField[info.gxm*info.gym];
447   f_c = new AField[info.gxm*info.gym];
448 
449   /* Create corresponding 2-arrays of AFields */
450   u_a = new AField*[info.gym];
451   f_a = new AField*[info.gym];
452 
453   /* Align indices between array types to endow 2d array with ghost points */
454   ierr = GiveGhostPoints(da,u_c,&u_a);CHKERRQ(ierr);
455   ierr = GiveGhostPoints(da,f_c,&f_a);CHKERRQ(ierr);
456 
457   trace_on(1);  /* Start of active section on tape 1 */
458 
459   /*
460     Mark independence
461 
462     NOTE: Ghost points are marked as independent, in place of the points they represent on
463           other processors / on other boundaries.
464   */
465   for (j=gys; j<gys+gym; j++) {
466     for (i=gxs; i<gxs+gxm; i++) {
467       u_a[j][i].u <<= u[j][i].u;
468       u_a[j][i].v <<= u[j][i].v;
469     }
470   }
471 
472   /* Compute function over the locally owned part of the grid */
473   for (j=ys; j<ys+ym; j++) {
474     for (i=xs; i<xs+xm; i++) {
475       uc        = u_a[j][i].u;
476       uxx       = (-2.0*uc + u_a[j][i-1].u + u_a[j][i+1].u)*sx;
477       uyy       = (-2.0*uc + u_a[j-1][i].u + u_a[j+1][i].u)*sy;
478       vc        = u_a[j][i].v;
479       vxx       = (-2.0*vc + u_a[j][i-1].v + u_a[j][i+1].v)*sx;
480       vyy       = (-2.0*vc + u_a[j-1][i].v + u_a[j+1][i].v)*sy;
481       f_a[j][i].u = udot[j][i].u - appctx->D1*(uxx + uyy) + uc*vc*vc - appctx->gamma*(1.0 - uc);
482       f_a[j][i].v = udot[j][i].v - appctx->D2*(vxx + vyy) - uc*vc*vc + (appctx->gamma + appctx->kappa)*vc;
483     }
484   }
485 
486   /*
487     Mark dependence
488 
489     NOTE: Marking dependence of dummy variables makes the index notation much simpler when forming
490           the Jacobian later.
491   */
492   for (j=gys; j<gys+gym; j++) {
493     for (i=gxs; i<gxs+gxm; i++) {
494       if ((i < xs) || (i >= xs+xm) || (j < ys) || (j >= ys+ym)) {
495         f_a[j][i].u >>= dummy;
496         f_a[j][i].v >>= dummy;
497       } else {
498         f_a[j][i].u >>= f[j][i].u;
499         f_a[j][i].v >>= f[j][i].v;
500       }
501     }
502   }
503   trace_off();  /* End of active section */
504   ierr = PetscLogFlops(16.0*xm*ym);CHKERRQ(ierr);
505 
506   /* Restore vectors */
507   ierr = DMDAVecRestoreArray(da,F,&f);CHKERRQ(ierr);
508   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
509   ierr = DMDAVecRestoreArrayRead(da,Udot,&udot);CHKERRQ(ierr);
510 
511   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
512 
513   /* Destroy AFields appropriately */
514   f_a += info.gys;
515   u_a += info.gys;
516   delete[] f_a;
517   delete[] u_a;
518   delete[] f_c;
519   delete[] u_c;
520 
521   PetscFunctionReturn(0);
522 }
523 
524 PetscErrorCode RHSFunctionPassive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr)
525 {
526   AppCtx         *appctx = (AppCtx*)ptr;
527   DM             da;
528   PetscErrorCode ierr;
529   PetscInt       i,j,xs,ys,xm,ym,Mx,My;
530   PetscReal      hx,hy,sx,sy;
531   PetscScalar    uc,uxx,uyy,vc,vxx,vyy;
532   Field          **u,**f;
533   Vec            localU,localF;
534 
535   PetscFunctionBegin;
536   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
537   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
538   hx = 2.50/(PetscReal)(Mx);sx = 1.0/(hx*hx);
539   hy = 2.50/(PetscReal)(My);sy = 1.0/(hy*hy);
540   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
541   ierr = DMGetLocalVector(da,&localF);CHKERRQ(ierr);
542 
543   /*
544      Scatter ghost points to local vector,using the 2-step process
545         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
546      By placing code between these two statements, computations can be
547      done while messages are in transition.
548   */
549   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
550   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
551   ierr = VecZeroEntries(F);CHKERRQ(ierr); // NOTE (1): See (2) below
552   ierr = DMGlobalToLocalBegin(da,F,INSERT_VALUES,localF);CHKERRQ(ierr);
553   ierr = DMGlobalToLocalEnd(da,F,INSERT_VALUES,localF);CHKERRQ(ierr);
554 
555   /*
556      Get pointers to vector data
557   */
558   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
559   ierr = DMDAVecGetArray(da,localF,&f);CHKERRQ(ierr);
560 
561   /*
562      Get local grid boundaries
563   */
564   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
565 
566   /*
567      Compute function over the locally owned part of the grid
568   */
569   for (j=ys; j<ys+ym; j++) {
570     for (i=xs; i<xs+xm; i++) {
571       uc        = u[j][i].u;
572       uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
573       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
574       vc        = u[j][i].v;
575       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
576       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
577       f[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);
578       f[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc;
579     }
580   }
581 
582   /*
583      Gather global vector, using the 2-step process
584         DMLocalToGlobalBegin(),DMLocalToGlobalEnd().
585 
586      NOTE (2): We need to use ADD_VALUES if boundaries are not of type DM_BOUNDARY_NONE or
587                DM_BOUNDARY_GHOSTED, meaning we should also zero F before addition (see (1) above).
588   */
589   ierr = DMLocalToGlobalBegin(da,localF,ADD_VALUES,F);CHKERRQ(ierr);
590   ierr = DMLocalToGlobalEnd(da,localF,ADD_VALUES,F);CHKERRQ(ierr);
591 
592   /*
593      Restore vectors
594   */
595   ierr = DMDAVecRestoreArray(da,localF,&f);CHKERRQ(ierr);
596   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
597   ierr = DMRestoreLocalVector(da,&localF);CHKERRQ(ierr);
598   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
599   ierr = PetscLogFlops(16.0*xm*ym);CHKERRQ(ierr);
600   PetscFunctionReturn(0);
601 }
602 
603 PetscErrorCode RHSFunctionActive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr)
604 {
605   AppCtx         *appctx = (AppCtx*)ptr;
606   DM             da;
607   PetscErrorCode ierr;
608   PetscInt       i,j,xs,ys,xm,ym,gxs,gys,gxm,gym,Mx,My;
609   PetscReal      hx,hy,sx,sy;
610   AField         **f_a,*f_c,**u_a,*u_c;
611   adouble        uc,uxx,uyy,vc,vxx,vyy;
612   Field          **u,**f;
613   Vec            localU,localF;
614 
615   PetscFunctionBegin;
616   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
617   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
618   hx = 2.50/(PetscReal)(Mx);sx = 1.0/(hx*hx);
619   hy = 2.50/(PetscReal)(My);sy = 1.0/(hy*hy);
620   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
621   ierr = DMGetLocalVector(da,&localF);CHKERRQ(ierr);
622 
623   /*
624      Scatter ghost points to local vector,using the 2-step process
625         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
626      By placing code between these two statements, computations can be
627      done while messages are in transition.
628   */
629   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
630   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
631   ierr = VecZeroEntries(F);CHKERRQ(ierr); // NOTE (1): See (2) below
632   ierr = DMGlobalToLocalBegin(da,F,INSERT_VALUES,localF);CHKERRQ(ierr);
633   ierr = DMGlobalToLocalEnd(da,F,INSERT_VALUES,localF);CHKERRQ(ierr);
634 
635   /*
636      Get pointers to vector data
637   */
638   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
639   ierr = DMDAVecGetArray(da,localF,&f);CHKERRQ(ierr);
640 
641   /*
642      Get local and ghosted grid boundaries
643   */
644   ierr = DMDAGetGhostCorners(da,&gxs,&gys,NULL,&gxm,&gym,NULL);CHKERRQ(ierr);
645   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
646 
647   /*
648     Create contiguous 1-arrays of AFields
649 
650     NOTE: Memory for ADOL-C active variables (such as adouble and AField)
651           cannot be allocated using PetscMalloc, as this does not call the
652           relevant class constructor. Instead, we use the C++ keyword `new`.
653   */
654   u_c = new AField[gxm*gym];
655   f_c = new AField[gxm*gym];
656 
657   /* Create corresponding 2-arrays of AFields */
658   u_a = new AField*[gym];
659   f_a = new AField*[gym];
660 
661   /* Align indices between array types to endow 2d array with ghost points */
662   ierr = GiveGhostPoints(da,u_c,&u_a);CHKERRQ(ierr);
663   ierr = GiveGhostPoints(da,f_c,&f_a);CHKERRQ(ierr);
664 
665   /*
666      Compute function over the locally owned part of the grid
667   */
668   trace_on(1);  // ----------------------------------------------- Start of active section
669 
670   /*
671     Mark independence
672 
673     NOTE: Ghost points are marked as independent, in place of the points they represent on
674           other processors / on other boundaries.
675   */
676   for (j=gys; j<gys+gym; j++) {
677     for (i=gxs; i<gxs+gxm; i++) {
678       u_a[j][i].u <<= u[j][i].u;
679       u_a[j][i].v <<= u[j][i].v;
680     }
681   }
682 
683   /*
684     Compute function over the locally owned part of the grid
685   */
686   for (j=ys; j<ys+ym; j++) {
687     for (i=xs; i<xs+xm; i++) {
688       uc          = u_a[j][i].u;
689       uxx         = (-2.0*uc + u_a[j][i-1].u + u_a[j][i+1].u)*sx;
690       uyy         = (-2.0*uc + u_a[j-1][i].u + u_a[j+1][i].u)*sy;
691       vc          = u_a[j][i].v;
692       vxx         = (-2.0*vc + u_a[j][i-1].v + u_a[j][i+1].v)*sx;
693       vyy         = (-2.0*vc + u_a[j-1][i].v + u_a[j+1][i].v)*sy;
694       f_a[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);
695       f_a[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc;
696     }
697   }
698   /*
699     Mark dependence
700 
701     NOTE: Ghost points are marked as dependent in order to vastly simplify index notation
702           during Jacobian assembly.
703   */
704   for (j=gys; j<gys+gym; j++) {
705     for (i=gxs; i<gxs+gxm; i++) {
706       f_a[j][i].u >>= f[j][i].u;
707       f_a[j][i].v >>= f[j][i].v;
708     }
709   }
710   trace_off();  // ----------------------------------------------- End of active section
711 
712   /* Test zeroth order scalar evaluation in ADOL-C gives the same result */
713 //  if (appctx->adctx->zos) {
714 //    ierr = TestZOS2d(da,f,u,appctx);CHKERRQ(ierr); // FIXME: Why does this give nonzero?
715 //  }
716 
717   /*
718      Gather global vector, using the 2-step process
719         DMLocalToGlobalBegin(),DMLocalToGlobalEnd().
720 
721      NOTE (2): We need to use ADD_VALUES if boundaries are not of type DM_BOUNDARY_NONE or
722                DM_BOUNDARY_GHOSTED, meaning we should also zero F before addition (see (1) above).
723   */
724   ierr = DMLocalToGlobalBegin(da,localF,ADD_VALUES,F);CHKERRQ(ierr);
725   ierr = DMLocalToGlobalEnd(da,localF,ADD_VALUES,F);CHKERRQ(ierr);
726 
727   /*
728      Restore vectors
729   */
730   ierr = DMDAVecRestoreArray(da,localF,&f);CHKERRQ(ierr);
731   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
732   ierr = DMRestoreLocalVector(da,&localF);CHKERRQ(ierr);
733   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
734   ierr = PetscLogFlops(16.0*xm*ym);CHKERRQ(ierr);
735 
736   /* Destroy AFields appropriately */
737   f_a += gys;
738   u_a += gys;
739   delete[] f_a;
740   delete[] u_a;
741   delete[] f_c;
742   delete[] u_c;
743 
744   PetscFunctionReturn(0);
745 }
746 
747 PetscErrorCode IJacobianAdolc(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx)
748 {
749   AppCtx            *appctx = (AppCtx*)ctx;
750   DM                da;
751   PetscErrorCode    ierr;
752   const PetscScalar *u_vec;
753   Vec               localU;
754 
755   PetscFunctionBegin;
756   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
757   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
758 
759   /*
760      Scatter ghost points to local vector,using the 2-step process
761         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
762      By placing code between these two statements, computations can be
763      done while messages are in transition.
764   */
765   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
766   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
767 
768   /* Get pointers to vector data */
769   ierr = VecGetArrayRead(localU,&u_vec);CHKERRQ(ierr);
770 
771   /*
772     Compute Jacobian
773   */
774   ierr = PetscAdolcComputeIJacobianLocalIDMass(1,A,u_vec,a,appctx->adctx);CHKERRQ(ierr);
775 
776   /*
777      Restore vectors
778   */
779   ierr = VecRestoreArrayRead(localU,&u_vec);CHKERRQ(ierr);
780   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
781   PetscFunctionReturn(0);
782 }
783 
784 PetscErrorCode IJacobianByHand(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx)
785 {
786   AppCtx         *appctx = (AppCtx*)ctx;     /* user-defined application context */
787   DM             da;
788   PetscErrorCode ierr;
789   PetscInt       i,j,Mx,My,xs,ys,xm,ym;
790   PetscReal      hx,hy,sx,sy;
791   PetscScalar    uc,vc;
792   Field          **u;
793   Vec            localU;
794   MatStencil     stencil[6],rowstencil;
795   PetscScalar    entries[6];
796 
797   PetscFunctionBegin;
798   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
799   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
800   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
801 
802   hx = 2.50/(PetscReal)Mx; sx = 1.0/(hx*hx);
803   hy = 2.50/(PetscReal)My; sy = 1.0/(hy*hy);
804 
805   /*
806      Scatter ghost points to local vector,using the 2-step process
807         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
808      By placing code between these two statements, computations can be
809      done while messages are in transition.
810   */
811   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
812   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
813 
814   /*
815      Get pointers to vector data
816   */
817   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
818 
819   /*
820      Get local grid boundaries
821   */
822   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
823 
824   stencil[0].k = 0;
825   stencil[1].k = 0;
826   stencil[2].k = 0;
827   stencil[3].k = 0;
828   stencil[4].k = 0;
829   stencil[5].k = 0;
830   rowstencil.k = 0;
831   /*
832      Compute function over the locally owned part of the grid
833   */
834   for (j=ys; j<ys+ym; j++) {
835 
836     stencil[0].j = j-1;
837     stencil[1].j = j+1;
838     stencil[2].j = j;
839     stencil[3].j = j;
840     stencil[4].j = j;
841     stencil[5].j = j;
842     rowstencil.k = 0; rowstencil.j = j;
843     for (i=xs; i<xs+xm; i++) {
844       uc = u[j][i].u;
845       vc = u[j][i].v;
846 
847       /*      uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
848       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
849 
850       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
851       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
852        f[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);*/
853 
854       stencil[0].i = i; stencil[0].c = 0; entries[0] = -appctx->D1*sy;
855       stencil[1].i = i; stencil[1].c = 0; entries[1] = -appctx->D1*sy;
856       stencil[2].i = i-1; stencil[2].c = 0; entries[2] = -appctx->D1*sx;
857       stencil[3].i = i+1; stencil[3].c = 0; entries[3] = -appctx->D1*sx;
858       stencil[4].i = i; stencil[4].c = 0; entries[4] = 2.0*appctx->D1*(sx + sy) + vc*vc + appctx->gamma + a;
859       stencil[5].i = i; stencil[5].c = 1; entries[5] = 2.0*uc*vc;
860       rowstencil.i = i; rowstencil.c = 0;
861 
862       ierr = MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
863       if (appctx->aijpc) {
864         ierr = MatSetValuesStencil(B,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
865       }
866       stencil[0].c = 1; entries[0] = -appctx->D2*sy;
867       stencil[1].c = 1; entries[1] = -appctx->D2*sy;
868       stencil[2].c = 1; entries[2] = -appctx->D2*sx;
869       stencil[3].c = 1; entries[3] = -appctx->D2*sx;
870       stencil[4].c = 1; entries[4] = 2.0*appctx->D2*(sx + sy) - 2.0*uc*vc + appctx->gamma + appctx->kappa + a;
871       stencil[5].c = 0; entries[5] = -vc*vc;
872 
873       ierr = MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
874       if (appctx->aijpc) {
875         ierr = MatSetValuesStencil(B,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
876       }
877       /* f[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc; */
878     }
879   }
880 
881   /*
882      Restore vectors
883   */
884   ierr = PetscLogFlops(19.0*xm*ym);CHKERRQ(ierr);
885   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
886   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
887   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
888   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
889   ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
890   if (appctx->aijpc) {
891     ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
892     ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
893     ierr = MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
894   }
895   PetscFunctionReturn(0);
896 }
897 
898 PetscErrorCode RHSJacobianByHand(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx)
899 {
900   AppCtx         *appctx = (AppCtx*)ctx;     /* user-defined application context */
901   DM             da;
902   PetscErrorCode ierr;
903   PetscInt       i,j,Mx,My,xs,ys,xm,ym;
904   PetscReal      hx,hy,sx,sy;
905   PetscScalar    uc,vc;
906   Field          **u;
907   Vec            localU;
908   MatStencil     stencil[6],rowstencil;
909   PetscScalar    entries[6];
910 
911   PetscFunctionBegin;
912   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
913   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
914   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
915 
916   hx = 2.50/(PetscReal)(Mx); sx = 1.0/(hx*hx);
917   hy = 2.50/(PetscReal)(My); sy = 1.0/(hy*hy);
918 
919   /*
920      Scatter ghost points to local vector,using the 2-step process
921         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
922      By placing code between these two statements, computations can be
923      done while messages are in transition.
924   */
925   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
926   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
927 
928   /*
929      Get pointers to vector data
930   */
931   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
932 
933   /*
934      Get local grid boundaries
935   */
936   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
937 
938   stencil[0].k = 0;
939   stencil[1].k = 0;
940   stencil[2].k = 0;
941   stencil[3].k = 0;
942   stencil[4].k = 0;
943   stencil[5].k = 0;
944   rowstencil.k = 0;
945 
946   /*
947      Compute function over the locally owned part of the grid
948   */
949   for (j=ys; j<ys+ym; j++) {
950 
951     stencil[0].j = j-1;
952     stencil[1].j = j+1;
953     stencil[2].j = j;
954     stencil[3].j = j;
955     stencil[4].j = j;
956     stencil[5].j = j;
957     rowstencil.k = 0; rowstencil.j = j;
958     for (i=xs; i<xs+xm; i++) {
959       uc = u[j][i].u;
960       vc = u[j][i].v;
961 
962       /*      uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
963       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
964 
965       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
966       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
967        f[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);*/
968 
969       stencil[0].i = i; stencil[0].c = 0; entries[0] = appctx->D1*sy;
970       stencil[1].i = i; stencil[1].c = 0; entries[1] = appctx->D1*sy;
971       stencil[2].i = i-1; stencil[2].c = 0; entries[2] = appctx->D1*sx;
972       stencil[3].i = i+1; stencil[3].c = 0; entries[3] = appctx->D1*sx;
973       stencil[4].i = i; stencil[4].c = 0; entries[4] = -2.0*appctx->D1*(sx + sy) - vc*vc - appctx->gamma;
974       stencil[5].i = i; stencil[5].c = 1; entries[5] = -2.0*uc*vc;
975       rowstencil.i = i; rowstencil.c = 0;
976 
977       ierr = MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
978 
979       stencil[0].c = 1; entries[0] = appctx->D2*sy;
980       stencil[1].c = 1; entries[1] = appctx->D2*sy;
981       stencil[2].c = 1; entries[2] = appctx->D2*sx;
982       stencil[3].c = 1; entries[3] = appctx->D2*sx;
983       stencil[4].c = 1; entries[4] = -2.0*appctx->D2*(sx + sy) + 2.0*uc*vc - appctx->gamma - appctx->kappa;
984       stencil[5].c = 0; entries[5] = vc*vc;
985       rowstencil.c = 1;
986 
987       ierr = MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
988       /* f[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc; */
989     }
990   }
991 
992   /*
993      Restore vectors
994   */
995   ierr = PetscLogFlops(19.0*xm*ym);CHKERRQ(ierr);
996   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
997   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
998   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
999   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1000   ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
1001   if (appctx->aijpc) {
1002     ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1003     ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1004     ierr = MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
1005   }
1006   PetscFunctionReturn(0);
1007 }
1008 
1009 PetscErrorCode RHSJacobianAdolc(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx)
1010 {
1011   AppCtx         *appctx = (AppCtx*)ctx;
1012   DM             da;
1013   PetscErrorCode ierr;
1014   PetscScalar    *u_vec;
1015   Vec            localU;
1016 
1017   PetscFunctionBegin;
1018   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
1019   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
1020 
1021   /*
1022      Scatter ghost points to local vector,using the 2-step process
1023         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
1024      By placing code between these two statements, computations can be
1025      done while messages are in transition.
1026   */
1027   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
1028   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
1029 
1030   /* Get pointers to vector data */
1031   ierr = VecGetArray(localU,&u_vec);CHKERRQ(ierr);
1032 
1033   /*
1034     Compute Jacobian
1035   */
1036   ierr = PetscAdolcComputeRHSJacobianLocal(1,A,u_vec,appctx->adctx);CHKERRQ(ierr);
1037 
1038   /*
1039      Restore vectors
1040   */
1041   ierr = VecRestoreArray(localU,&u_vec);CHKERRQ(ierr);
1042   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
1043   PetscFunctionReturn(0);
1044 }
1045 
1046 /*TEST
1047 
1048   build:
1049     requires: double !complex adolc colpack
1050 
1051   test:
1052     suffix: 1
1053     nsize: 1
1054     args: -ts_max_steps 1 -da_grid_x 12 -da_grid_y 12 -snes_test_jacobian
1055     output_file: output/adr_ex5adj_1.out
1056 
1057   test:
1058     suffix: 2
1059     nsize: 1
1060     args: -ts_max_steps 1 -da_grid_x 12 -da_grid_y 12 -snes_test_jacobian -implicitform
1061     output_file: output/adr_ex5adj_2.out
1062 
1063   test:
1064     suffix: 3
1065     nsize: 4
1066     args: -ts_max_steps 10 -da_grid_x 12 -da_grid_y 12 -ts_monitor -ts_adjoint_monitor
1067     output_file: output/adr_ex5adj_3.out
1068 
1069   test:
1070     suffix: 4
1071     nsize: 4
1072     args: -ts_max_steps 10 -da_grid_x 12 -da_grid_y 12 -ts_monitor -ts_adjoint_monitor -implicitform
1073     output_file: output/adr_ex5adj_4.out
1074 
1075   testset:
1076     suffix: 5
1077     nsize: 4
1078     args: -ts_max_steps 10 -da_grid_x 15 -da_grid_y 15 -ts_monitor -ts_adjoint_monitor -adolc_sparse
1079     output_file: output/adr_ex5adj_5.out
1080 
1081   testset:
1082     suffix: 6
1083     nsize: 4
1084     args: -ts_max_steps 10 -da_grid_x 15 -da_grid_y 15 -ts_monitor -ts_adjoint_monitor -adolc_sparse -implicitform
1085     output_file: output/adr_ex5adj_6.out
1086 
1087 TEST*/
1088