xref: /petsc/src/ts/tutorials/autodiff/adr_ex5adj.cxx (revision efbe7e8a80d07327753dbe0b33efee01e046af3f)
1 static char help[] = "Demonstrates adjoint sensitivity analysis for Reaction-Diffusion Equations.\n";
2 
3 /*
4    Concepts: TS^time-dependent nonlinear problems
5    Concepts: Automatic differentiation using ADOL-C
6 */
7 /*
8    REQUIRES configuration of PETSc with option --download-adolc.
9 
10    For documentation on ADOL-C, see
11      $PETSC_ARCH/externalpackages/ADOL-C-2.6.0/ADOL-C/doc/adolc-manual.pdf
12 
13    REQUIRES configuration of PETSc with option --download-colpack
14 
15    For documentation on ColPack, see
16      $PETSC_ARCH/externalpackages/git.colpack/README.md
17 */
18 /* ------------------------------------------------------------------------
19   See ../advection-diffusion-reaction/ex5 for a description of the problem
20   ------------------------------------------------------------------------- */
21 
22 /*
23   Runtime options:
24 
25     Solver:
26       -forwardonly       - Run the forward simulation without adjoint.
27       -implicitform      - Provide IFunction and IJacobian to TS, if not set, RHSFunction and RHSJacobian will be used.
28       -aijpc             - Set the preconditioner matrix to be aij (the Jacobian matrix can be of a different type such as ELL).
29 
30     Jacobian generation:
31       -jacobian_by_hand  - Use the hand-coded Jacobian of ex5.c, rather than generating it automatically.
32       -no_annotation     - Do not annotate ADOL-C active variables. (Should be used alongside -jacobian_by_hand.)
33       -adolc_sparse      - Calculate Jacobian in compressed format, using ADOL-C sparse functionality.
34       -adolc_sparse_view - Print sparsity pattern used by -adolc_sparse option.
35  */
36 /*
37   NOTE: If -adolc_sparse option is used, at least four processors should be used, so that no processor owns boundaries which are
38         identified by the periodic boundary conditions. The number of grid points in both x- and y-directions should be multiples
39         of 5, in order for the 5-point stencil to be cleanly parallelised.
40 */
41 
42 #include <petscdmda.h>
43 #include <petscts.h>
44 #include "adolc-utils/drivers.cxx"
45 #include <adolc/adolc.h>
46 
47 /* (Passive) field for the two variables */
48 typedef struct {
49   PetscScalar u,v;
50 } Field;
51 
52 /* Active field for the two variables */
53 typedef struct {
54   adouble u,v;
55 } AField;
56 
57 /* Application context */
58 typedef struct {
59   PetscReal D1,D2,gamma,kappa;
60   AField    **u_a,**f_a;
61   PetscBool aijpc;
62   AdolcCtx  *adctx; /* Automatic differentation support */
63 } AppCtx;
64 
65 extern PetscErrorCode InitialConditions(DM da,Vec U);
66 extern PetscErrorCode InitializeLambda(DM da,Vec lambda,PetscReal x,PetscReal y);
67 extern PetscErrorCode IFunctionLocalPassive(DMDALocalInfo *info,PetscReal t,Field**u,Field**udot,Field**f,void *ptr);
68 extern PetscErrorCode IFunctionActive(TS ts,PetscReal ftime,Vec U,Vec Udot,Vec F,void *ptr);
69 extern PetscErrorCode RHSFunctionActive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr);
70 extern PetscErrorCode RHSFunctionPassive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr);
71 extern PetscErrorCode IJacobianByHand(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx);
72 extern PetscErrorCode IJacobianAdolc(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx);
73 extern PetscErrorCode RHSJacobianByHand(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx);
74 extern PetscErrorCode RHSJacobianAdolc(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx);
75 
76 int main(int argc,char **argv)
77 {
78   TS             ts;
79   Vec            x,r,xdot;
80   PetscErrorCode ierr;
81   DM             da;
82   AppCtx         appctx;
83   AdolcCtx       *adctx;
84   Vec            lambda[1];
85   PetscBool      forwardonly=PETSC_FALSE,implicitform=PETSC_FALSE,byhand=PETSC_FALSE;
86   PetscInt       gxm,gym,i,dofs = 2,ctrl[3] = {0,0,0};
87   PetscScalar    **Seed = NULL,**Rec = NULL,*u_vec;
88   unsigned int   **JP = NULL;
89   ISColoring     iscoloring;
90 
91   ierr = PetscInitialize(&argc,&argv,NULL,help);if (ierr) return ierr;
92   ierr = PetscNew(&adctx);CHKERRQ(ierr);
93   ierr = PetscOptionsGetBool(NULL,NULL,"-forwardonly",&forwardonly,NULL);CHKERRQ(ierr);
94   ierr = PetscOptionsGetBool(NULL,NULL,"-implicitform",&implicitform,NULL);CHKERRQ(ierr);
95   appctx.aijpc = PETSC_FALSE,adctx->no_an = PETSC_FALSE,adctx->sparse = PETSC_FALSE,adctx->sparse_view = PETSC_FALSE;adctx->sparse_view_done = PETSC_FALSE;
96   ierr = PetscOptionsGetBool(NULL,NULL,"-aijpc",&appctx.aijpc,NULL);CHKERRQ(ierr);
97   ierr = PetscOptionsGetBool(NULL,NULL,"-no_annotation",&adctx->no_an,NULL);CHKERRQ(ierr);
98   ierr = PetscOptionsGetBool(NULL,NULL,"-jacobian_by_hand",&byhand,NULL);CHKERRQ(ierr);
99   ierr = PetscOptionsGetBool(NULL,NULL,"-adolc_sparse",&adctx->sparse,NULL);CHKERRQ(ierr);
100   ierr = PetscOptionsGetBool(NULL,NULL,"-adolc_sparse_view",&adctx->sparse_view,NULL);CHKERRQ(ierr);
101   appctx.D1    = 8.0e-5;
102   appctx.D2    = 4.0e-5;
103   appctx.gamma = .024;
104   appctx.kappa = .06;
105   appctx.adctx = adctx;
106 
107   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
108      Create distributed array (DMDA) to manage parallel grid and vectors
109   - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
110   ierr = DMDACreate2d(PETSC_COMM_WORLD,DM_BOUNDARY_PERIODIC,DM_BOUNDARY_PERIODIC,DMDA_STENCIL_STAR,65,65,PETSC_DECIDE,PETSC_DECIDE,2,1,NULL,NULL,&da);CHKERRQ(ierr);
111   ierr = DMSetFromOptions(da);CHKERRQ(ierr);
112   ierr = DMSetUp(da);CHKERRQ(ierr);
113   ierr = DMDASetFieldName(da,0,"u");CHKERRQ(ierr);
114   ierr = DMDASetFieldName(da,1,"v");CHKERRQ(ierr);
115 
116   /*  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
117      Extract global vectors from DMDA; then duplicate for remaining
118      vectors that are the same types
119    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
120   ierr = DMCreateGlobalVector(da,&x);CHKERRQ(ierr);
121   ierr = VecDuplicate(x,&r);CHKERRQ(ierr);
122   ierr = VecDuplicate(x,&xdot);CHKERRQ(ierr);
123 
124   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
125      Create timestepping solver context
126      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
127   ierr = TSCreate(PETSC_COMM_WORLD,&ts);CHKERRQ(ierr);
128   ierr = TSSetType(ts,TSCN);CHKERRQ(ierr);
129   ierr = TSSetDM(ts,da);CHKERRQ(ierr);
130   ierr = TSSetProblemType(ts,TS_NONLINEAR);CHKERRQ(ierr);
131   if (!implicitform) {
132     ierr = TSSetRHSFunction(ts,NULL,RHSFunctionPassive,&appctx);CHKERRQ(ierr);
133   } else {
134     ierr = DMDATSSetIFunctionLocal(da,INSERT_VALUES,(DMDATSIFunctionLocal)IFunctionLocalPassive,&appctx);CHKERRQ(ierr);
135   }
136 
137   if (!adctx->no_an) {
138     ierr = DMDAGetGhostCorners(da,NULL,NULL,NULL,&gxm,&gym,NULL);CHKERRQ(ierr);
139     adctx->m = dofs*gxm*gym;adctx->n = dofs*gxm*gym; /* Number of dependent and independent variables */
140 
141     /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
142        Trace function(s) just once
143      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
144     if (!implicitform) {
145       ierr = RHSFunctionActive(ts,1.0,x,r,&appctx);CHKERRQ(ierr);
146     } else {
147       ierr = IFunctionActive(ts,1.0,x,xdot,r,&appctx);CHKERRQ(ierr);
148     }
149 
150     /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
151       In the case where ADOL-C generates the Jacobian in compressed format,
152       seed and recovery matrices are required. Since the sparsity structure
153       of the Jacobian does not change over the course of the time
154       integration, we can save computational effort by only generating
155       these objects once.
156        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
157     if (adctx->sparse) {
158       /*
159          Generate sparsity pattern
160 
161          One way to do this is to use the Jacobian pattern driver `jac_pat`
162          provided by ColPack. Otherwise, it is the responsibility of the user
163          to obtain the sparsity pattern.
164       */
165       ierr = PetscMalloc1(adctx->n,&u_vec);CHKERRQ(ierr);
166       JP = (unsigned int **) malloc(adctx->m*sizeof(unsigned int*));
167       jac_pat(1,adctx->m,adctx->n,u_vec,JP,ctrl);
168       if (adctx->sparse_view) {
169         ierr = PrintSparsity(MPI_COMM_WORLD,adctx->m,JP);CHKERRQ(ierr);
170       }
171 
172       /*
173         Extract a column colouring
174 
175         For problems using DMDA, colourings can be extracted directly, as
176         follows. There are also ColPack drivers available. Otherwise, it is the
177         responsibility of the user to obtain a suitable colouring.
178       */
179       ierr = DMCreateColoring(da,IS_COLORING_LOCAL,&iscoloring);CHKERRQ(ierr);
180       ierr = ISColoringGetIS(iscoloring,PETSC_USE_POINTER,&adctx->p,NULL);CHKERRQ(ierr);
181 
182       /* Generate seed matrix to propagate through the forward mode of AD */
183       ierr = AdolcMalloc2(adctx->n,adctx->p,&Seed);CHKERRQ(ierr);
184       ierr = GenerateSeedMatrix(iscoloring,Seed);CHKERRQ(ierr);
185       ierr = ISColoringDestroy(&iscoloring);CHKERRQ(ierr);
186 
187       /*
188         Generate recovery matrix, which is used to recover the Jacobian from
189         compressed format */
190       ierr = AdolcMalloc2(adctx->m,adctx->p,&Rec);CHKERRQ(ierr);
191       ierr = GetRecoveryMatrix(Seed,JP,adctx->m,adctx->p,Rec);CHKERRQ(ierr);
192 
193       /* Store results and free workspace */
194       adctx->Rec = Rec;
195       for (i=0;i<adctx->m;i++)
196         free(JP[i]);
197       free(JP);
198       ierr = PetscFree(u_vec);CHKERRQ(ierr);
199 
200     } else {
201 
202       /*
203         In 'full' Jacobian mode, propagate an identity matrix through the
204         forward mode of AD.
205       */
206       adctx->p = adctx->n;
207       ierr = AdolcMalloc2(adctx->n,adctx->p,&Seed);CHKERRQ(ierr);
208       ierr = Identity(adctx->n,Seed);CHKERRQ(ierr);
209     }
210     adctx->Seed = Seed;
211   }
212 
213   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
214      Set Jacobian
215    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
216   if (!implicitform) {
217     if (!byhand) {
218       ierr = TSSetRHSJacobian(ts,NULL,NULL,RHSJacobianAdolc,&appctx);CHKERRQ(ierr);
219     } else {
220       ierr = TSSetRHSJacobian(ts,NULL,NULL,RHSJacobianByHand,&appctx);CHKERRQ(ierr);
221     }
222   } else {
223     if (appctx.aijpc) {
224       Mat A,B;
225 
226       ierr = DMSetMatType(da,MATSELL);CHKERRQ(ierr);
227       ierr = DMCreateMatrix(da,&A);CHKERRQ(ierr);
228       ierr = MatConvert(A,MATAIJ,MAT_INITIAL_MATRIX,&B);CHKERRQ(ierr);
229       /* FIXME do we need to change viewer to display matrix in natural ordering as DMCreateMatrix_DA does? */
230       if (!byhand) {
231         ierr = TSSetIJacobian(ts,A,B,IJacobianAdolc,&appctx);CHKERRQ(ierr);
232       } else {
233         ierr = TSSetIJacobian(ts,A,B,IJacobianByHand,&appctx);CHKERRQ(ierr);
234       }
235       ierr = MatDestroy(&A);CHKERRQ(ierr);
236       ierr = MatDestroy(&B);CHKERRQ(ierr);
237     } else {
238       if (!byhand) {
239         ierr = TSSetIJacobian(ts,NULL,NULL,IJacobianAdolc,&appctx);CHKERRQ(ierr);
240       } else {
241         ierr = TSSetIJacobian(ts,NULL,NULL,IJacobianByHand,&appctx);CHKERRQ(ierr);
242       }
243     }
244   }
245 
246   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
247      Set initial conditions
248    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
249   ierr = InitialConditions(da,x);CHKERRQ(ierr);
250   ierr = TSSetSolution(ts,x);CHKERRQ(ierr);
251 
252   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
253     Have the TS save its trajectory so that TSAdjointSolve() may be used
254     and set solver options
255    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
256   if (!forwardonly) {
257     ierr = TSSetSaveTrajectory(ts);CHKERRQ(ierr);
258     ierr = TSSetMaxTime(ts,200.0);CHKERRQ(ierr);
259     ierr = TSSetTimeStep(ts,0.5);CHKERRQ(ierr);
260   } else {
261     ierr = TSSetMaxTime(ts,2000.0);CHKERRQ(ierr);
262     ierr = TSSetTimeStep(ts,10);CHKERRQ(ierr);
263   }
264   ierr = TSSetExactFinalTime(ts,TS_EXACTFINALTIME_STEPOVER);CHKERRQ(ierr);
265   ierr = TSSetFromOptions(ts);CHKERRQ(ierr);
266 
267   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
268      Solve ODE system
269      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
270   ierr = TSSolve(ts,x);CHKERRQ(ierr);
271   if (!forwardonly) {
272     /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
273        Start the Adjoint model
274        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
275     ierr = VecDuplicate(x,&lambda[0]);CHKERRQ(ierr);
276     /*   Reset initial conditions for the adjoint integration */
277     ierr = InitializeLambda(da,lambda[0],0.5,0.5);CHKERRQ(ierr);
278     ierr = TSSetCostGradients(ts,1,lambda,NULL);CHKERRQ(ierr);
279     ierr = TSAdjointSolve(ts);CHKERRQ(ierr);
280     ierr = VecDestroy(&lambda[0]);CHKERRQ(ierr);
281   }
282 
283   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
284      Free work space.
285    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
286   ierr = VecDestroy(&xdot);CHKERRQ(ierr);
287   ierr = VecDestroy(&r);CHKERRQ(ierr);
288   ierr = VecDestroy(&x);CHKERRQ(ierr);
289   ierr = TSDestroy(&ts);CHKERRQ(ierr);
290   if (!adctx->no_an) {
291     if (adctx->sparse)
292       ierr = AdolcFree2(Rec);CHKERRQ(ierr);
293     ierr = AdolcFree2(Seed);CHKERRQ(ierr);
294   }
295   ierr = DMDestroy(&da);CHKERRQ(ierr);
296   ierr = PetscFree(adctx);CHKERRQ(ierr);
297   ierr = PetscFinalize();
298   return ierr;
299 }
300 
301 PetscErrorCode InitialConditions(DM da,Vec U)
302 {
303   PetscErrorCode ierr;
304   PetscInt       i,j,xs,ys,xm,ym,Mx,My;
305   Field          **u;
306   PetscReal      hx,hy,x,y;
307 
308   PetscFunctionBegin;
309   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
310 
311   hx = 2.5/(PetscReal)Mx;
312   hy = 2.5/(PetscReal)My;
313 
314   /*
315      Get pointers to vector data
316   */
317   ierr = DMDAVecGetArray(da,U,&u);CHKERRQ(ierr);
318 
319   /*
320      Get local grid boundaries
321   */
322   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
323 
324   /*
325      Compute function over the locally owned part of the grid
326   */
327   for (j=ys; j<ys+ym; j++) {
328     y = j*hy;
329     for (i=xs; i<xs+xm; i++) {
330       x = i*hx;
331       if ((1.0 <= x) && (x <= 1.5) && (1.0 <= y) && (y <= 1.5)) u[j][i].v = .25*PetscPowReal(PetscSinReal(4.0*PETSC_PI*x),2.0)*PetscPowReal(PetscSinReal(4.0*PETSC_PI*y),2.0);
332       else u[j][i].v = 0.0;
333 
334       u[j][i].u = 1.0 - 2.0*u[j][i].v;
335     }
336   }
337 
338   /*
339      Restore vectors
340   */
341   ierr = DMDAVecRestoreArray(da,U,&u);CHKERRQ(ierr);
342   PetscFunctionReturn(0);
343 }
344 
345 PetscErrorCode InitializeLambda(DM da,Vec lambda,PetscReal x,PetscReal y)
346 {
347    PetscInt i,j,Mx,My,xs,ys,xm,ym;
348    PetscErrorCode ierr;
349    Field **l;
350    PetscFunctionBegin;
351 
352    ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
353    /* locate the global i index for x and j index for y */
354    i = (PetscInt)(x*(Mx-1));
355    j = (PetscInt)(y*(My-1));
356    ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
357 
358    if (xs <= i && i < xs+xm && ys <= j && j < ys+ym) {
359      /* the i,j vertex is on this process */
360      ierr = DMDAVecGetArray(da,lambda,&l);CHKERRQ(ierr);
361      l[j][i].u = 1.0;
362      l[j][i].v = 1.0;
363      ierr = DMDAVecRestoreArray(da,lambda,&l);CHKERRQ(ierr);
364    }
365    PetscFunctionReturn(0);
366 }
367 
368 PetscErrorCode IFunctionLocalPassive(DMDALocalInfo *info,PetscReal t,Field**u,Field**udot,Field**f,void *ptr)
369 {
370   AppCtx         *appctx = (AppCtx*)ptr;
371   PetscInt       i,j,xs,ys,xm,ym;
372   PetscReal      hx,hy,sx,sy;
373   PetscScalar    uc,uxx,uyy,vc,vxx,vyy;
374   PetscErrorCode ierr;
375 
376   PetscFunctionBegin;
377   hx = 2.50/(PetscReal)(info->mx); sx = 1.0/(hx*hx);
378   hy = 2.50/(PetscReal)(info->my); sy = 1.0/(hy*hy);
379 
380   /* Get local grid boundaries */
381   xs = info->xs; xm = info->xm; ys = info->ys; ym = info->ym;
382 
383   /* Compute function over the locally owned part of the grid */
384   for (j=ys; j<ys+ym; j++) {
385     for (i=xs; i<xs+xm; i++) {
386       uc        = u[j][i].u;
387       uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
388       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
389       vc        = u[j][i].v;
390       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
391       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
392       f[j][i].u = udot[j][i].u - appctx->D1*(uxx + uyy) + uc*vc*vc - appctx->gamma*(1.0 - uc);
393       f[j][i].v = udot[j][i].v - appctx->D2*(vxx + vyy) - uc*vc*vc + (appctx->gamma + appctx->kappa)*vc;
394     }
395   }
396   ierr = PetscLogFlops(16.0*xm*ym);CHKERRQ(ierr);
397   PetscFunctionReturn(0);
398 }
399 
400 PetscErrorCode IFunctionActive(TS ts,PetscReal ftime,Vec U,Vec Udot,Vec F,void *ptr)
401 {
402   PetscErrorCode ierr;
403   AppCtx         *appctx = (AppCtx*)ptr;
404   DM             da;
405   DMDALocalInfo  info;
406   Field          **u,**f,**udot;
407   Vec            localU;
408   PetscInt       i,j,xs,ys,xm,ym,gxs,gys,gxm,gym;
409   PetscReal      hx,hy,sx,sy;
410   adouble        uc,uxx,uyy,vc,vxx,vyy;
411   AField         **f_a,*f_c,**u_a,*u_c;
412   PetscScalar    dummy;
413 
414   PetscFunctionBegin;
415 
416   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
417   ierr = DMDAGetLocalInfo(da,&info);CHKERRQ(ierr);
418   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
419   hx = 2.50/(PetscReal)(info.mx); sx = 1.0/(hx*hx);
420   hy = 2.50/(PetscReal)(info.my); sy = 1.0/(hy*hy);
421   xs = info.xs; xm = info.xm; gxs = info.gxs; gxm = info.gxm;
422   ys = info.ys; ym = info.ym; gys = info.gys; gym = info.gym;
423 
424   /*
425      Scatter ghost points to local vector,using the 2-step process
426         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
427      By placing code between these two statements, computations can be
428      done while messages are in transition.
429   */
430   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
431   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
432 
433   /*
434      Get pointers to vector data
435   */
436   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
437   ierr = DMDAVecGetArray(da,F,&f);CHKERRQ(ierr);
438   ierr = DMDAVecGetArrayRead(da,Udot,&udot);CHKERRQ(ierr);
439 
440   /*
441     Create contiguous 1-arrays of AFields
442 
443     NOTE: Memory for ADOL-C active variables (such as adouble and AField)
444           cannot be allocated using PetscMalloc, as this does not call the
445           relevant class constructor. Instead, we use the C++ keyword `new`.
446   */
447   u_c = new AField[info.gxm*info.gym];
448   f_c = new AField[info.gxm*info.gym];
449 
450   /* Create corresponding 2-arrays of AFields */
451   u_a = new AField*[info.gym];
452   f_a = new AField*[info.gym];
453 
454   /* Align indices between array types to endow 2d array with ghost points */
455   ierr = GiveGhostPoints(da,u_c,&u_a);CHKERRQ(ierr);
456   ierr = GiveGhostPoints(da,f_c,&f_a);CHKERRQ(ierr);
457 
458   trace_on(1);  /* Start of active section on tape 1 */
459 
460   /*
461     Mark independence
462 
463     NOTE: Ghost points are marked as independent, in place of the points they represent on
464           other processors / on other boundaries.
465   */
466   for (j=gys; j<gys+gym; j++) {
467     for (i=gxs; i<gxs+gxm; i++) {
468       u_a[j][i].u <<= u[j][i].u;
469       u_a[j][i].v <<= u[j][i].v;
470     }
471   }
472 
473   /* Compute function over the locally owned part of the grid */
474   for (j=ys; j<ys+ym; j++) {
475     for (i=xs; i<xs+xm; i++) {
476       uc        = u_a[j][i].u;
477       uxx       = (-2.0*uc + u_a[j][i-1].u + u_a[j][i+1].u)*sx;
478       uyy       = (-2.0*uc + u_a[j-1][i].u + u_a[j+1][i].u)*sy;
479       vc        = u_a[j][i].v;
480       vxx       = (-2.0*vc + u_a[j][i-1].v + u_a[j][i+1].v)*sx;
481       vyy       = (-2.0*vc + u_a[j-1][i].v + u_a[j+1][i].v)*sy;
482       f_a[j][i].u = udot[j][i].u - appctx->D1*(uxx + uyy) + uc*vc*vc - appctx->gamma*(1.0 - uc);
483       f_a[j][i].v = udot[j][i].v - appctx->D2*(vxx + vyy) - uc*vc*vc + (appctx->gamma + appctx->kappa)*vc;
484     }
485   }
486 
487   /*
488     Mark dependence
489 
490     NOTE: Marking dependence of dummy variables makes the index notation much simpler when forming
491           the Jacobian later.
492   */
493   for (j=gys; j<gys+gym; j++) {
494     for (i=gxs; i<gxs+gxm; i++) {
495       if ((i < xs) || (i >= xs+xm) || (j < ys) || (j >= ys+ym)) {
496         f_a[j][i].u >>= dummy;
497         f_a[j][i].v >>= dummy;
498       } else {
499         f_a[j][i].u >>= f[j][i].u;
500         f_a[j][i].v >>= f[j][i].v;
501       }
502     }
503   }
504   trace_off();  /* End of active section */
505   ierr = PetscLogFlops(16.0*xm*ym);CHKERRQ(ierr);
506 
507   /* Restore vectors */
508   ierr = DMDAVecRestoreArray(da,F,&f);CHKERRQ(ierr);
509   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
510   ierr = DMDAVecRestoreArrayRead(da,Udot,&udot);CHKERRQ(ierr);
511 
512   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
513 
514   /* Destroy AFields appropriately */
515   f_a += info.gys;
516   u_a += info.gys;
517   delete[] f_a;
518   delete[] u_a;
519   delete[] f_c;
520   delete[] u_c;
521 
522   PetscFunctionReturn(0);
523 }
524 
525 PetscErrorCode RHSFunctionPassive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr)
526 {
527   AppCtx         *appctx = (AppCtx*)ptr;
528   DM             da;
529   PetscErrorCode ierr;
530   PetscInt       i,j,xs,ys,xm,ym,Mx,My;
531   PetscReal      hx,hy,sx,sy;
532   PetscScalar    uc,uxx,uyy,vc,vxx,vyy;
533   Field          **u,**f;
534   Vec            localU,localF;
535 
536   PetscFunctionBegin;
537   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
538   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
539   hx = 2.50/(PetscReal)(Mx);sx = 1.0/(hx*hx);
540   hy = 2.50/(PetscReal)(My);sy = 1.0/(hy*hy);
541   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
542   ierr = DMGetLocalVector(da,&localF);CHKERRQ(ierr);
543 
544   /*
545      Scatter ghost points to local vector,using the 2-step process
546         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
547      By placing code between these two statements, computations can be
548      done while messages are in transition.
549   */
550   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
551   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
552   ierr = VecZeroEntries(F);CHKERRQ(ierr); // NOTE (1): See (2) below
553   ierr = DMGlobalToLocalBegin(da,F,INSERT_VALUES,localF);CHKERRQ(ierr);
554   ierr = DMGlobalToLocalEnd(da,F,INSERT_VALUES,localF);CHKERRQ(ierr);
555 
556   /*
557      Get pointers to vector data
558   */
559   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
560   ierr = DMDAVecGetArray(da,localF,&f);CHKERRQ(ierr);
561 
562   /*
563      Get local grid boundaries
564   */
565   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
566 
567   /*
568      Compute function over the locally owned part of the grid
569   */
570   for (j=ys; j<ys+ym; j++) {
571     for (i=xs; i<xs+xm; i++) {
572       uc        = u[j][i].u;
573       uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
574       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
575       vc        = u[j][i].v;
576       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
577       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
578       f[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);
579       f[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc;
580     }
581   }
582 
583   /*
584      Gather global vector, using the 2-step process
585         DMLocalToGlobalBegin(),DMLocalToGlobalEnd().
586 
587      NOTE (2): We need to use ADD_VALUES if boundaries are not of type DM_BOUNDARY_NONE or
588                DM_BOUNDARY_GHOSTED, meaning we should also zero F before addition (see (1) above).
589   */
590   ierr = DMLocalToGlobalBegin(da,localF,ADD_VALUES,F);CHKERRQ(ierr);
591   ierr = DMLocalToGlobalEnd(da,localF,ADD_VALUES,F);CHKERRQ(ierr);
592 
593   /*
594      Restore vectors
595   */
596   ierr = DMDAVecRestoreArray(da,localF,&f);CHKERRQ(ierr);
597   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
598   ierr = DMRestoreLocalVector(da,&localF);CHKERRQ(ierr);
599   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
600   ierr = PetscLogFlops(16.0*xm*ym);CHKERRQ(ierr);
601   PetscFunctionReturn(0);
602 }
603 
604 PetscErrorCode RHSFunctionActive(TS ts,PetscReal ftime,Vec U,Vec F,void *ptr)
605 {
606   AppCtx         *appctx = (AppCtx*)ptr;
607   DM             da;
608   PetscErrorCode ierr;
609   PetscInt       i,j,xs,ys,xm,ym,gxs,gys,gxm,gym,Mx,My;
610   PetscReal      hx,hy,sx,sy;
611   AField         **f_a,*f_c,**u_a,*u_c;
612   adouble        uc,uxx,uyy,vc,vxx,vyy;
613   Field          **u,**f;
614   Vec            localU,localF;
615 
616   PetscFunctionBegin;
617   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
618   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
619   hx = 2.50/(PetscReal)(Mx);sx = 1.0/(hx*hx);
620   hy = 2.50/(PetscReal)(My);sy = 1.0/(hy*hy);
621   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
622   ierr = DMGetLocalVector(da,&localF);CHKERRQ(ierr);
623 
624   /*
625      Scatter ghost points to local vector,using the 2-step process
626         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
627      By placing code between these two statements, computations can be
628      done while messages are in transition.
629   */
630   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
631   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
632   ierr = VecZeroEntries(F);CHKERRQ(ierr); // NOTE (1): See (2) below
633   ierr = DMGlobalToLocalBegin(da,F,INSERT_VALUES,localF);CHKERRQ(ierr);
634   ierr = DMGlobalToLocalEnd(da,F,INSERT_VALUES,localF);CHKERRQ(ierr);
635 
636   /*
637      Get pointers to vector data
638   */
639   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
640   ierr = DMDAVecGetArray(da,localF,&f);CHKERRQ(ierr);
641 
642   /*
643      Get local and ghosted grid boundaries
644   */
645   ierr = DMDAGetGhostCorners(da,&gxs,&gys,NULL,&gxm,&gym,NULL);CHKERRQ(ierr);
646   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
647 
648   /*
649     Create contiguous 1-arrays of AFields
650 
651     NOTE: Memory for ADOL-C active variables (such as adouble and AField)
652           cannot be allocated using PetscMalloc, as this does not call the
653           relevant class constructor. Instead, we use the C++ keyword `new`.
654   */
655   u_c = new AField[gxm*gym];
656   f_c = new AField[gxm*gym];
657 
658   /* Create corresponding 2-arrays of AFields */
659   u_a = new AField*[gym];
660   f_a = new AField*[gym];
661 
662   /* Align indices between array types to endow 2d array with ghost points */
663   ierr = GiveGhostPoints(da,u_c,&u_a);CHKERRQ(ierr);
664   ierr = GiveGhostPoints(da,f_c,&f_a);CHKERRQ(ierr);
665 
666   /*
667      Compute function over the locally owned part of the grid
668   */
669   trace_on(1);  // ----------------------------------------------- Start of active section
670 
671   /*
672     Mark independence
673 
674     NOTE: Ghost points are marked as independent, in place of the points they represent on
675           other processors / on other boundaries.
676   */
677   for (j=gys; j<gys+gym; j++) {
678     for (i=gxs; i<gxs+gxm; i++) {
679       u_a[j][i].u <<= u[j][i].u;
680       u_a[j][i].v <<= u[j][i].v;
681     }
682   }
683 
684   /*
685     Compute function over the locally owned part of the grid
686   */
687   for (j=ys; j<ys+ym; j++) {
688     for (i=xs; i<xs+xm; i++) {
689       uc          = u_a[j][i].u;
690       uxx         = (-2.0*uc + u_a[j][i-1].u + u_a[j][i+1].u)*sx;
691       uyy         = (-2.0*uc + u_a[j-1][i].u + u_a[j+1][i].u)*sy;
692       vc          = u_a[j][i].v;
693       vxx         = (-2.0*vc + u_a[j][i-1].v + u_a[j][i+1].v)*sx;
694       vyy         = (-2.0*vc + u_a[j-1][i].v + u_a[j+1][i].v)*sy;
695       f_a[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);
696       f_a[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc;
697     }
698   }
699   /*
700     Mark dependence
701 
702     NOTE: Ghost points are marked as dependent in order to vastly simplify index notation
703           during Jacobian assembly.
704   */
705   for (j=gys; j<gys+gym; j++) {
706     for (i=gxs; i<gxs+gxm; i++) {
707       f_a[j][i].u >>= f[j][i].u;
708       f_a[j][i].v >>= f[j][i].v;
709     }
710   }
711   trace_off();  // ----------------------------------------------- End of active section
712 
713   /* Test zeroth order scalar evaluation in ADOL-C gives the same result */
714 //  if (appctx->adctx->zos) {
715 //    ierr = TestZOS2d(da,f,u,appctx);CHKERRQ(ierr); // FIXME: Why does this give nonzero?
716 //  }
717 
718   /*
719      Gather global vector, using the 2-step process
720         DMLocalToGlobalBegin(),DMLocalToGlobalEnd().
721 
722      NOTE (2): We need to use ADD_VALUES if boundaries are not of type DM_BOUNDARY_NONE or
723                DM_BOUNDARY_GHOSTED, meaning we should also zero F before addition (see (1) above).
724   */
725   ierr = DMLocalToGlobalBegin(da,localF,ADD_VALUES,F);CHKERRQ(ierr);
726   ierr = DMLocalToGlobalEnd(da,localF,ADD_VALUES,F);CHKERRQ(ierr);
727 
728   /*
729      Restore vectors
730   */
731   ierr = DMDAVecRestoreArray(da,localF,&f);CHKERRQ(ierr);
732   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
733   ierr = DMRestoreLocalVector(da,&localF);CHKERRQ(ierr);
734   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
735   ierr = PetscLogFlops(16.0*xm*ym);CHKERRQ(ierr);
736 
737   /* Destroy AFields appropriately */
738   f_a += gys;
739   u_a += gys;
740   delete[] f_a;
741   delete[] u_a;
742   delete[] f_c;
743   delete[] u_c;
744 
745   PetscFunctionReturn(0);
746 }
747 
748 PetscErrorCode IJacobianAdolc(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx)
749 {
750   AppCtx            *appctx = (AppCtx*)ctx;
751   DM                da;
752   PetscErrorCode    ierr;
753   const PetscScalar *u_vec;
754   Vec               localU;
755 
756   PetscFunctionBegin;
757   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
758   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
759 
760   /*
761      Scatter ghost points to local vector,using the 2-step process
762         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
763      By placing code between these two statements, computations can be
764      done while messages are in transition.
765   */
766   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
767   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
768 
769   /* Get pointers to vector data */
770   ierr = VecGetArrayRead(localU,&u_vec);CHKERRQ(ierr);
771 
772   /*
773     Compute Jacobian
774   */
775   ierr = PetscAdolcComputeIJacobianLocalIDMass(1,A,u_vec,a,appctx->adctx);CHKERRQ(ierr);
776 
777   /*
778      Restore vectors
779   */
780   ierr = VecRestoreArrayRead(localU,&u_vec);CHKERRQ(ierr);
781   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
782   PetscFunctionReturn(0);
783 }
784 
785 PetscErrorCode IJacobianByHand(TS ts,PetscReal t,Vec U,Vec Udot,PetscReal a,Mat A,Mat B,void *ctx)
786 {
787   AppCtx         *appctx = (AppCtx*)ctx;     /* user-defined application context */
788   DM             da;
789   PetscErrorCode ierr;
790   PetscInt       i,j,Mx,My,xs,ys,xm,ym;
791   PetscReal      hx,hy,sx,sy;
792   PetscScalar    uc,vc;
793   Field          **u;
794   Vec            localU;
795   MatStencil     stencil[6],rowstencil;
796   PetscScalar    entries[6];
797 
798   PetscFunctionBegin;
799   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
800   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
801   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
802 
803   hx = 2.50/(PetscReal)Mx; sx = 1.0/(hx*hx);
804   hy = 2.50/(PetscReal)My; sy = 1.0/(hy*hy);
805 
806   /*
807      Scatter ghost points to local vector,using the 2-step process
808         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
809      By placing code between these two statements, computations can be
810      done while messages are in transition.
811   */
812   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
813   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
814 
815   /*
816      Get pointers to vector data
817   */
818   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
819 
820   /*
821      Get local grid boundaries
822   */
823   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
824 
825   stencil[0].k = 0;
826   stencil[1].k = 0;
827   stencil[2].k = 0;
828   stencil[3].k = 0;
829   stencil[4].k = 0;
830   stencil[5].k = 0;
831   rowstencil.k = 0;
832   /*
833      Compute function over the locally owned part of the grid
834   */
835   for (j=ys; j<ys+ym; j++) {
836 
837     stencil[0].j = j-1;
838     stencil[1].j = j+1;
839     stencil[2].j = j;
840     stencil[3].j = j;
841     stencil[4].j = j;
842     stencil[5].j = j;
843     rowstencil.k = 0; rowstencil.j = j;
844     for (i=xs; i<xs+xm; i++) {
845       uc = u[j][i].u;
846       vc = u[j][i].v;
847 
848       /*      uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
849       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
850 
851       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
852       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
853        f[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);*/
854 
855       stencil[0].i = i; stencil[0].c = 0; entries[0] = -appctx->D1*sy;
856       stencil[1].i = i; stencil[1].c = 0; entries[1] = -appctx->D1*sy;
857       stencil[2].i = i-1; stencil[2].c = 0; entries[2] = -appctx->D1*sx;
858       stencil[3].i = i+1; stencil[3].c = 0; entries[3] = -appctx->D1*sx;
859       stencil[4].i = i; stencil[4].c = 0; entries[4] = 2.0*appctx->D1*(sx + sy) + vc*vc + appctx->gamma + a;
860       stencil[5].i = i; stencil[5].c = 1; entries[5] = 2.0*uc*vc;
861       rowstencil.i = i; rowstencil.c = 0;
862 
863       ierr = MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
864       if (appctx->aijpc) {
865         ierr = MatSetValuesStencil(B,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
866       }
867       stencil[0].c = 1; entries[0] = -appctx->D2*sy;
868       stencil[1].c = 1; entries[1] = -appctx->D2*sy;
869       stencil[2].c = 1; entries[2] = -appctx->D2*sx;
870       stencil[3].c = 1; entries[3] = -appctx->D2*sx;
871       stencil[4].c = 1; entries[4] = 2.0*appctx->D2*(sx + sy) - 2.0*uc*vc + appctx->gamma + appctx->kappa + a;
872       stencil[5].c = 0; entries[5] = -vc*vc;
873 
874       ierr = MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
875       if (appctx->aijpc) {
876         ierr = MatSetValuesStencil(B,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
877       }
878       /* f[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc; */
879     }
880   }
881 
882   /*
883      Restore vectors
884   */
885   ierr = PetscLogFlops(19.0*xm*ym);CHKERRQ(ierr);
886   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
887   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
888   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
889   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
890   ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
891   if (appctx->aijpc) {
892     ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
893     ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
894     ierr = MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
895   }
896   PetscFunctionReturn(0);
897 }
898 
899 PetscErrorCode RHSJacobianByHand(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx)
900 {
901   AppCtx         *appctx = (AppCtx*)ctx;     /* user-defined application context */
902   DM             da;
903   PetscErrorCode ierr;
904   PetscInt       i,j,Mx,My,xs,ys,xm,ym;
905   PetscReal      hx,hy,sx,sy;
906   PetscScalar    uc,vc;
907   Field          **u;
908   Vec            localU;
909   MatStencil     stencil[6],rowstencil;
910   PetscScalar    entries[6];
911 
912   PetscFunctionBegin;
913   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
914   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
915   ierr = DMDAGetInfo(da,PETSC_IGNORE,&Mx,&My,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE,PETSC_IGNORE);CHKERRQ(ierr);
916 
917   hx = 2.50/(PetscReal)(Mx); sx = 1.0/(hx*hx);
918   hy = 2.50/(PetscReal)(My); sy = 1.0/(hy*hy);
919 
920   /*
921      Scatter ghost points to local vector,using the 2-step process
922         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
923      By placing code between these two statements, computations can be
924      done while messages are in transition.
925   */
926   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
927   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
928 
929   /*
930      Get pointers to vector data
931   */
932   ierr = DMDAVecGetArrayRead(da,localU,&u);CHKERRQ(ierr);
933 
934   /*
935      Get local grid boundaries
936   */
937   ierr = DMDAGetCorners(da,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
938 
939   stencil[0].k = 0;
940   stencil[1].k = 0;
941   stencil[2].k = 0;
942   stencil[3].k = 0;
943   stencil[4].k = 0;
944   stencil[5].k = 0;
945   rowstencil.k = 0;
946 
947   /*
948      Compute function over the locally owned part of the grid
949   */
950   for (j=ys; j<ys+ym; j++) {
951 
952     stencil[0].j = j-1;
953     stencil[1].j = j+1;
954     stencil[2].j = j;
955     stencil[3].j = j;
956     stencil[4].j = j;
957     stencil[5].j = j;
958     rowstencil.k = 0; rowstencil.j = j;
959     for (i=xs; i<xs+xm; i++) {
960       uc = u[j][i].u;
961       vc = u[j][i].v;
962 
963       /*      uxx       = (-2.0*uc + u[j][i-1].u + u[j][i+1].u)*sx;
964       uyy       = (-2.0*uc + u[j-1][i].u + u[j+1][i].u)*sy;
965 
966       vxx       = (-2.0*vc + u[j][i-1].v + u[j][i+1].v)*sx;
967       vyy       = (-2.0*vc + u[j-1][i].v + u[j+1][i].v)*sy;
968        f[j][i].u = appctx->D1*(uxx + uyy) - uc*vc*vc + appctx->gamma*(1.0 - uc);*/
969 
970       stencil[0].i = i; stencil[0].c = 0; entries[0] = appctx->D1*sy;
971       stencil[1].i = i; stencil[1].c = 0; entries[1] = appctx->D1*sy;
972       stencil[2].i = i-1; stencil[2].c = 0; entries[2] = appctx->D1*sx;
973       stencil[3].i = i+1; stencil[3].c = 0; entries[3] = appctx->D1*sx;
974       stencil[4].i = i; stencil[4].c = 0; entries[4] = -2.0*appctx->D1*(sx + sy) - vc*vc - appctx->gamma;
975       stencil[5].i = i; stencil[5].c = 1; entries[5] = -2.0*uc*vc;
976       rowstencil.i = i; rowstencil.c = 0;
977 
978       ierr = MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
979 
980       stencil[0].c = 1; entries[0] = appctx->D2*sy;
981       stencil[1].c = 1; entries[1] = appctx->D2*sy;
982       stencil[2].c = 1; entries[2] = appctx->D2*sx;
983       stencil[3].c = 1; entries[3] = appctx->D2*sx;
984       stencil[4].c = 1; entries[4] = -2.0*appctx->D2*(sx + sy) + 2.0*uc*vc - appctx->gamma - appctx->kappa;
985       stencil[5].c = 0; entries[5] = vc*vc;
986       rowstencil.c = 1;
987 
988       ierr = MatSetValuesStencil(A,1,&rowstencil,6,stencil,entries,INSERT_VALUES);CHKERRQ(ierr);
989       /* f[j][i].v = appctx->D2*(vxx + vyy) + uc*vc*vc - (appctx->gamma + appctx->kappa)*vc; */
990     }
991   }
992 
993   /*
994      Restore vectors
995   */
996   ierr = PetscLogFlops(19.0*xm*ym);CHKERRQ(ierr);
997   ierr = DMDAVecRestoreArrayRead(da,localU,&u);CHKERRQ(ierr);
998   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
999   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1000   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1001   ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
1002   if (appctx->aijpc) {
1003     ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1004     ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1005     ierr = MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
1006   }
1007   PetscFunctionReturn(0);
1008 }
1009 
1010 PetscErrorCode RHSJacobianAdolc(TS ts,PetscReal t,Vec U,Mat A,Mat B,void *ctx)
1011 {
1012   AppCtx         *appctx = (AppCtx*)ctx;
1013   DM             da;
1014   PetscErrorCode ierr;
1015   PetscScalar    *u_vec;
1016   Vec            localU;
1017 
1018   PetscFunctionBegin;
1019   ierr = TSGetDM(ts,&da);CHKERRQ(ierr);
1020   ierr = DMGetLocalVector(da,&localU);CHKERRQ(ierr);
1021 
1022   /*
1023      Scatter ghost points to local vector,using the 2-step process
1024         DMGlobalToLocalBegin(),DMGlobalToLocalEnd().
1025      By placing code between these two statements, computations can be
1026      done while messages are in transition.
1027   */
1028   ierr = DMGlobalToLocalBegin(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
1029   ierr = DMGlobalToLocalEnd(da,U,INSERT_VALUES,localU);CHKERRQ(ierr);
1030 
1031   /* Get pointers to vector data */
1032   ierr = VecGetArray(localU,&u_vec);CHKERRQ(ierr);
1033 
1034   /*
1035     Compute Jacobian
1036   */
1037   ierr = PetscAdolcComputeRHSJacobianLocal(1,A,u_vec,appctx->adctx);CHKERRQ(ierr);
1038 
1039   /*
1040      Restore vectors
1041   */
1042   ierr = VecRestoreArray(localU,&u_vec);CHKERRQ(ierr);
1043   ierr = DMRestoreLocalVector(da,&localU);CHKERRQ(ierr);
1044   PetscFunctionReturn(0);
1045 }
1046 
1047 /*TEST
1048 
1049   build:
1050     requires: double !complex adolc colpack
1051 
1052   test:
1053     suffix: 1
1054     nsize: 1
1055     args: -ts_max_steps 1 -da_grid_x 12 -da_grid_y 12 -snes_test_jacobian
1056     output_file: output/adr_ex5adj_1.out
1057 
1058   test:
1059     suffix: 2
1060     nsize: 1
1061     args: -ts_max_steps 1 -da_grid_x 12 -da_grid_y 12 -snes_test_jacobian -implicitform
1062     output_file: output/adr_ex5adj_2.out
1063 
1064   test:
1065     suffix: 3
1066     nsize: 4
1067     args: -ts_max_steps 10 -da_grid_x 12 -da_grid_y 12 -ts_monitor -ts_adjoint_monitor
1068     output_file: output/adr_ex5adj_3.out
1069 
1070   test:
1071     suffix: 4
1072     nsize: 4
1073     args: -ts_max_steps 10 -da_grid_x 12 -da_grid_y 12 -ts_monitor -ts_adjoint_monitor -implicitform
1074     output_file: output/adr_ex5adj_4.out
1075 
1076   testset:
1077     suffix: 5
1078     nsize: 4
1079     args: -ts_max_steps 10 -da_grid_x 15 -da_grid_y 15 -ts_monitor -ts_adjoint_monitor -adolc_sparse
1080     output_file: output/adr_ex5adj_5.out
1081 
1082   testset:
1083     suffix: 6
1084     nsize: 4
1085     args: -ts_max_steps 10 -da_grid_x 15 -da_grid_y 15 -ts_monitor -ts_adjoint_monitor -adolc_sparse -implicitform
1086     output_file: output/adr_ex5adj_6.out
1087 
1088 TEST*/
1089