xref: /petsc/src/ts/utils/dmplexlandau/plexland.c (revision c3b5f7ba6bc5ce25a01a67bb37ba5d34b02bbbd7)
1 #include <../src/mat/impls/aij/seq/aij.h>
2 #include <petsc/private/dmpleximpl.h>   /*I "petscdmplex.h" I*/
3 #include <petsclandau.h>                /*I "petsclandau.h"   I*/
4 #include <petscts.h>
5 #include <petscdmforest.h>
6 #include <petscdmcomposite.h>
7 
8 /* Landau collision operator */
9 
10 /* relativistic terms */
11 #if defined(PETSC_USE_REAL_SINGLE)
12 #define SPEED_OF_LIGHT 2.99792458e8F
13 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
14 #else
15 #define SPEED_OF_LIGHT 2.99792458e8
16 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
17 #endif
18 
19 #define PETSC_THREAD_SYNC
20 #include "land_tensors.h"
21 
22 #if defined(PETSC_HAVE_OPENMP)
23 #include <omp.h>
24 #endif
25 
26 /* vector padding not supported */
27 #define LANDAU_VL  1
28 
29 static PetscErrorCode LandauMatMult(Mat A, Vec x, Vec y)
30 {
31   LandauCtx       *ctx;
32   PetscContainer  container;
33 
34   PetscFunctionBegin;
35   PetscCall(PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container));
36   if (container) {
37     PetscCall(PetscContainerGetPointer(container, (void **) &ctx));
38     PetscCall(VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
39     PetscCall(VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
40     PetscCall((*ctx->seqaij_mult)(A,ctx->work_vec,y));
41     PetscCall(VecCopy(y, ctx->work_vec));
42     PetscCall(VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE));
43     PetscCall(VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE));
44     PetscFunctionReturn(0);
45   }
46   PetscCall(MatMult(A,x,y));
47   PetscFunctionReturn(0);
48 }
49 
50 // Computes v3 = v2 + A * v1.
51 static PetscErrorCode LandauMatMultAdd(Mat A,Vec v1,Vec v2,Vec v3)
52 {
53   PetscFunctionBegin;
54   SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "?????");
55   PetscCall(LandauMatMult(A,v1,v3));
56   PetscCall(VecAYPX(v3,1,v2));
57   PetscFunctionReturn(0);
58 }
59 
60 static PetscErrorCode LandauMatMultTranspose(Mat A, Vec x, Vec y)
61 {
62   LandauCtx       *ctx;
63   PetscContainer  container;
64 
65   PetscFunctionBegin;
66   PetscCall(PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container));
67   if (container) {
68     PetscCall(PetscContainerGetPointer(container, (void **) &ctx));
69     PetscCall(VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
70     PetscCall(VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
71     PetscCall((*ctx->seqaij_multtranspose)(A,ctx->work_vec,y));
72     PetscCall(VecCopy(y, ctx->work_vec));
73     PetscCall(VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE));
74     PetscCall(VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE));
75     PetscFunctionReturn(0);
76   }
77   PetscCall(MatMultTranspose(A,x,y));
78   PetscFunctionReturn(0);
79 }
80 
81 static PetscErrorCode LandauMatGetDiagonal(Mat A,Vec x)
82 {
83   LandauCtx       *ctx;
84   PetscContainer  container;
85 
86   PetscFunctionBegin;
87   PetscCall(PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container));
88   if (container) {
89     PetscCall(PetscContainerGetPointer(container, (void **) &ctx));
90     PetscCall((*ctx->seqaij_getdiagonal)(A,ctx->work_vec));
91     PetscCall(VecScatterBegin(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE));
92     PetscCall(VecScatterEnd(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE));
93     PetscFunctionReturn(0);
94   }
95   PetscCall(MatGetDiagonal(A, x));
96   PetscFunctionReturn(0);
97 }
98 
99 static PetscErrorCode LandauGPUMapsDestroy(void *ptr)
100 {
101   P4estVertexMaps *maps = (P4estVertexMaps*)ptr;
102   PetscFunctionBegin;
103   // free device data
104   if (maps[0].deviceType != LANDAU_CPU) {
105 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
106     if (maps[0].deviceType == LANDAU_KOKKOS) {
107       PetscCall(LandauKokkosDestroyMatMaps(maps,  maps[0].numgrids)); // imples Kokkos does
108     } // else could be CUDA
109 #elif defined(PETSC_HAVE_CUDA)
110     if (maps[0].deviceType == LANDAU_CUDA) {
111       PetscCall(LandauCUDADestroyMatMaps(maps, maps[0].numgrids));
112     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %" PetscInt_FMT " ?????",maps->deviceType);
113 #endif
114   }
115   // free host data
116   for (PetscInt grid=0 ; grid < maps[0].numgrids ; grid++) {
117     PetscCall(PetscFree(maps[grid].c_maps));
118     PetscCall(PetscFree(maps[grid].gIdx));
119   }
120   PetscCall(PetscFree(maps));
121 
122   PetscFunctionReturn(0);
123 }
124 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
125 {
126   PetscReal     v2 = 0;
127   PetscFunctionBegin;
128   /* compute v^2 / 2 */
129   for (int i = 0; i < dim; ++i) v2 += x[i]*x[i];
130   /* evaluate the Maxwellian */
131   u[0] = v2/2;
132   PetscFunctionReturn(0);
133 }
134 
135 /* needs double */
136 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
137 {
138   PetscReal     *c2_0_arr = ((PetscReal*)actx);
139   double        u2 = 0, c02 = (double)*c2_0_arr, xx;
140 
141   PetscFunctionBegin;
142   /* compute u^2 / 2 */
143   for (int i = 0; i < dim; ++i) u2 += x[i]*x[i];
144   /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
145   xx = u2/c02;
146 #if defined(PETSC_USE_DEBUG)
147   u[0] = PetscSqrtReal(1. + xx);
148 #else
149   u[0] = xx/(PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
150 #endif
151   PetscFunctionReturn(0);
152 }
153 
154 /*
155  LandauFormJacobian_Internal - Evaluates Jacobian matrix.
156 
157  Input Parameters:
158  .  globX - input vector
159  .  actx - optional user-defined context
160  .  dim - dimension
161 
162  Output Parameters:
163  .  J0acP - Jacobian matrix filled, not created
164  */
165 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
166 {
167   LandauCtx         *ctx = (LandauCtx*)a_ctx;
168   PetscInt          numCells[LANDAU_MAX_GRIDS],Nq,Nb;
169   PetscQuadrature   quad;
170   PetscReal         Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
171   PetscScalar       *cellClosure=NULL;
172   const PetscScalar *xdata=NULL;
173   PetscDS           prob;
174   PetscContainer    container;
175   P4estVertexMaps   *maps;
176   Mat               subJ[LANDAU_MAX_GRIDS*LANDAU_MAX_BATCH_SZ];
177 
178   PetscFunctionBegin;
179   PetscValidHeaderSpecific(a_X,VEC_CLASSID,1);
180   PetscValidHeaderSpecific(JacP,MAT_CLASSID,2);
181   PetscValidPointer(ctx,5);
182   /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
183   PetscCheckFalse(ctx->plex[0] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
184   PetscCall(PetscLogEventBegin(ctx->events[10],0,0,0,0));
185   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
186   PetscCall(PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container));
187   if (container) {
188     PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"maps but no GPU assembly");
189     PetscCall(PetscContainerGetPointer(container, (void **) &maps));
190     PetscCheck(maps,ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container");
191     for (PetscInt i=0;i<ctx->num_grids*ctx->batch_sz;i++) subJ[i] = NULL;
192   } else {
193     PetscCheck(!ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"No maps but GPU assembly");
194     for (PetscInt tid=0 ; tid<ctx->batch_sz ; tid++) {
195       for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
196         PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[ LAND_PACK_IDX(tid,grid) ]));
197       }
198     }
199     maps = NULL;
200   }
201   // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
202   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
203   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); Nb = Nq;
204   PetscCheckFalse(Nq >LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%" PetscInt_FMT ")",Nq,LANDAU_MAX_NQ);
205   // get metadata for collecting dynamic data
206   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
207     PetscInt cStart, cEnd;
208     PetscCheckFalse(ctx->plex[grid] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
209     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
210     numCells[grid] = cEnd - cStart; // grids can have different topology
211   }
212   PetscCall(PetscLogEventEnd(ctx->events[10],0,0,0,0));
213   if (shift==0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
214     DM pack;
215     PetscCall(VecGetDM(a_X, &pack));
216     PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM");
217     PetscCall(PetscLogEventBegin(ctx->events[1],0,0,0,0));
218     PetscCall(MatZeroEntries(JacP));
219     for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
220       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
221       if (dim==2) Eq_m[fieldA] *=  2 * PETSC_PI; /* add the 2pi term that is not in Landau */
222     }
223     if (!ctx->gpu_assembly) {
224       Vec          *locXArray,*globXArray;
225       PetscScalar  *cellClosure_it;
226       PetscInt     cellClosure_sz=0,nDMs,Nf[LANDAU_MAX_GRIDS];
227       PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
228       for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
229         PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
230         PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
231         PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
232       }
233       /* count cellClosure size */
234       PetscCall(DMCompositeGetNumberDM(pack,&nDMs));
235       for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) cellClosure_sz += Nb*Nf[grid]*numCells[grid];
236       PetscCall(PetscMalloc1(cellClosure_sz*ctx->batch_sz,&cellClosure));
237       cellClosure_it = cellClosure;
238       PetscCall(PetscMalloc(sizeof(*locXArray)*nDMs, &locXArray));
239       PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray));
240       PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
241       PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray));
242       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP (once)
243         for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
244           Vec         locX = locXArray[ LAND_PACK_IDX(b_id,grid) ], globX = globXArray[ LAND_PACK_IDX(b_id,grid) ], locX2;
245           PetscInt    cStart, cEnd, ei;
246           PetscCall(VecDuplicate(locX,&locX2));
247           PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2));
248           PetscCall(DMGlobalToLocalEnd  (ctx->plex[grid], globX, INSERT_VALUES, locX2));
249           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
250           for (ei = cStart ; ei < cEnd; ++ei) {
251             PetscScalar *coef = NULL;
252             PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
253             PetscCall(PetscMemcpy(cellClosure_it,coef,Nb*Nf[grid]*sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */
254             PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
255             cellClosure_it += Nb*Nf[grid];
256           }
257           PetscCall(VecDestroy(&locX2));
258         }
259       }
260       PetscCheck(cellClosure_it-cellClosure == cellClosure_sz*ctx->batch_sz,PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscInt_FMT " != cellClosure_sz = %" PetscInt_FMT,cellClosure_it-cellClosure,cellClosure_sz*ctx->batch_sz);
261       PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
262       PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray));
263       PetscCall(PetscFree(locXArray));
264       PetscCall(PetscFree(globXArray));
265       xdata = NULL;
266     } else {
267       PetscMemType mtype;
268       if (ctx->jacobian_field_major_order) { // get data in batch ordering
269         PetscCall(VecScatterBegin(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
270         PetscCall(VecScatterEnd(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
271         PetscCall(VecGetArrayReadAndMemType(ctx->work_vec,&xdata,&mtype));
272       } else {
273         PetscCall(VecGetArrayReadAndMemType(a_X,&xdata,&mtype));
274       }
275       if (mtype!=PETSC_MEMTYPE_HOST && ctx->deviceType == LANDAU_CPU) {
276         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"CPU run with device data: use -mat_type aij");
277       }
278       cellClosure = NULL;
279     }
280     PetscCall(PetscLogEventEnd(ctx->events[1],0,0,0,0));
281   } else xdata = cellClosure = NULL;
282 
283   /* do it */
284   if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
285     if (ctx->deviceType == LANDAU_CUDA) {
286 #if defined(PETSC_HAVE_CUDA)
287       PetscCall(LandauCUDAJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ, JacP));
288 #else
289       SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
290 #endif
291     } else if (ctx->deviceType == LANDAU_KOKKOS) {
292 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
293       PetscCall(LandauKokkosJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ,JacP));
294 #else
295       SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
296 #endif
297     }
298   } else {   /* CPU version */
299     PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
300     PetscInt        ip_offset[LANDAU_MAX_GRIDS+1], ipf_offset[LANDAU_MAX_GRIDS+1], elem_offset[LANDAU_MAX_GRIDS+1],IPf_sz_glb,IPf_sz_tot,num_grids=ctx->num_grids,Nf[LANDAU_MAX_GRIDS];
301     PetscReal       *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
302     PetscReal       Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
303     PetscSection    section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
304     PetscScalar     *coo_vals=NULL;
305     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
306       PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
307       PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
308       PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
309     }
310     /* count IPf size, etc */
311     PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
312     const PetscReal *const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
313     ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
314     for (PetscInt grid=0 ; grid<num_grids ; grid++) {
315       PetscInt nfloc = ctx->species_offset[grid+1] - ctx->species_offset[grid];
316       elem_offset[grid+1] = elem_offset[grid] + numCells[grid];
317       ip_offset[grid+1]   = ip_offset[grid]   + numCells[grid]*Nq;
318       ipf_offset[grid+1]  = ipf_offset[grid]  + Nq*nfloc*numCells[grid];
319     }
320     IPf_sz_glb = ipf_offset[num_grids];
321     IPf_sz_tot = IPf_sz_glb*ctx->batch_sz;
322     // prep COO
323     if (ctx->coo_assembly) {
324       PetscCall(PetscMalloc1(ctx->SData_d.coo_size,&coo_vals)); // allocate every time?
325       PetscCall(PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n",ctx->SData_d.coo_size));
326     }
327     if (shift==0.0) { /* compute dynamic data f and df and init data for Jacobian */
328 #if defined(PETSC_HAVE_THREADSAFETY)
329       double         starttime, endtime;
330       starttime = MPI_Wtime();
331 #endif
332       PetscCall(PetscLogEventBegin(ctx->events[8],0,0,0,0));
333       for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
334         invMass[fieldA]  = ctx->m_0/ctx->masses[fieldA];
335         Eq_m[fieldA]     = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
336         if (dim==2) Eq_m[fieldA] *=  2 * PETSC_PI; /* add the 2pi term that is not in Landau */
337         nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->m_0)*ctx->m_0/ctx->masses[fieldA];
338         nu_beta[fieldA]  = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
339       }
340       PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim==3 ? IPf_sz_tot : 0, &dudz));
341       // F df/dx
342       for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
343         const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; // b_id == OMP thd_id in batch
344         // find my grid:
345         PetscInt       grid = 0;
346         while (b_elem_idx >= elem_offset[grid+1]) grid++; // yuck search for grid
347         {
348           const PetscInt     loc_nip = numCells[grid]*Nq, loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
349           const PetscInt     moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
350           PetscScalar        *coef, coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ];
351           PetscReal          *invJe  = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; // ingJ is static data on batch 0
352           PetscInt           b,f,q;
353           if (cellClosure) {
354             coef = &cellClosure[b_id*IPf_sz_glb + ipf_offset[grid] + loc_elem*Nb*loc_Nf]; // this is const
355           } else {
356             coef = coef_buff;
357             for (f = 0; f < loc_Nf; ++f) {
358               LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
359               for (b = 0; b < Nb; ++b) {
360                 PetscInt idx = Idxs[b];
361                 if (idx >= 0) {
362                   coef[f*Nb+b] = xdata[idx+moffset];
363                 } else {
364                   idx = -idx - 1;
365                   coef[f*Nb+b] = 0;
366                   for (q = 0; q < maps[grid].num_face; q++) {
367                     PetscInt    id    = maps[grid].c_maps[idx][q].gid;
368                     PetscScalar scale = maps[grid].c_maps[idx][q].scale;
369                     coef[f*Nb+b] += scale*xdata[id+moffset];
370                   }
371                 }
372               }
373             }
374           }
375           /* get f and df */
376           for (PetscInt qi = 0; qi < Nq; qi++) {
377             const PetscReal  *invJ = &invJe[qi*dim*dim];
378             const PetscReal  *Bq   = &BB[qi*Nb];
379             const PetscReal  *Dq   = &DD[qi*Nb*dim];
380             PetscReal        u_x[LANDAU_DIM];
381             /* get f & df */
382             for (f = 0; f < loc_Nf; ++f) {
383               const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid] + f*loc_nip + loc_elem*Nq + qi;
384               PetscInt       b, e;
385               PetscReal      refSpaceDer[LANDAU_DIM];
386               ff[idx] = 0.0;
387               for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
388               for (b = 0; b < Nb; ++b) {
389                 const PetscInt    cidx = b;
390                 ff[idx] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]);
391                 for (int d = 0; d < dim; ++d) {
392                   refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]);
393                 }
394               }
395               for (int d = 0; d < LANDAU_DIM; ++d) {
396                 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) {
397                   u_x[d] += invJ[e*dim+d]*refSpaceDer[e];
398                 }
399               }
400               dudx[idx] = u_x[0];
401               dudy[idx] = u_x[1];
402  #if LANDAU_DIM==3
403               dudz[idx] = u_x[2];
404 #endif
405             }
406           } // q
407         } // grid
408       } // grid*batch
409       PetscCall(PetscLogEventEnd(ctx->events[8],0,0,0,0));
410 #if defined(PETSC_HAVE_THREADSAFETY)
411       endtime = MPI_Wtime();
412       if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
413 #endif
414     } // Jacobian setup
415     // assemble Jacobian (or mass)
416     for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
417       const PetscInt b_Nelem      = elem_offset[num_grids];
418       const PetscInt glb_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem;
419       PetscInt       grid         = 0;
420 #if defined(PETSC_HAVE_THREADSAFETY)
421       double         starttime, endtime;
422       starttime                   = MPI_Wtime();
423 #endif
424       while (glb_elem_idx >= elem_offset[grid+1]) grid++;
425       {
426         const PetscInt     loc_Nf  = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
427         const PetscInt     moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset), totDim = loc_Nf*Nq, elemMatSize = totDim*totDim;
428         PetscScalar        *elemMat;
429          const PetscReal   *invJe  = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim];
430         PetscCall(PetscMalloc1(elemMatSize, &elemMat));
431         PetscCall(PetscMemzero(elemMat, elemMatSize*sizeof(*elemMat)));
432         if (shift==0.0) { // Jacobian
433           PetscCall(PetscLogEventBegin(ctx->events[4],0,0,0,0));
434         } else {          // mass
435           PetscCall(PetscLogEventBegin(ctx->events[16],0,0,0,0));
436         }
437         for (PetscInt qj = 0; qj < Nq; ++qj) {
438           const PetscInt   jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
439           PetscReal        g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
440           PetscInt         d,d2,dp,d3,IPf_idx;
441           if (shift==0.0) { // Jacobian
442             const PetscReal * const invJj = &invJe[qj*dim*dim];
443             PetscReal               gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
444             const PetscReal         vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
445             // create g2 & g3
446             for (d=0;d<LANDAU_DIM;d++) { // clear accumulation data D & K
447               gg2_temp[d] = 0;
448               for (d2=0;d2<LANDAU_DIM;d2++) gg3_temp[d][d2] = 0;
449             }
450             /* inner beta reduction */
451             IPf_idx = 0;
452             for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids ; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
453               PetscInt  nip_loc_r = numCells[grid_r]*Nq, Nfloc_r = Nf[grid_r];
454               for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
455                 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
456                   const PetscReal wi       = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
457                   PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
458 #if LANDAU_DIM==2
459                   PetscReal       Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
460                   LandauTensor2D(vj, x, y, Ud, Uk, mask);
461 #else
462                   PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2]-z) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
463                   if (ctx->use_relativistic_corrections) {
464                     LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
465                   } else {
466                     LandauTensor3D(vj, x, y, z, U, mask);
467                   }
468 #endif
469                   for (int f = 0; f < Nfloc_r ; ++f) {
470                     const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid_r] + f*nip_loc_r + ei_r*Nq + qi;  // IPf_idx + f*nip_loc_r + loc_fdf_idx;
471                     temp1[0] += dudx[idx]*nu_beta[f+f_off]*invMass[f+f_off];
472                     temp1[1] += dudy[idx]*nu_beta[f+f_off]*invMass[f+f_off];
473 #if LANDAU_DIM==3
474                     temp1[2] += dudz[idx]*nu_beta[f+f_off]*invMass[f+f_off];
475 #endif
476                     temp2    += ff[idx]*nu_beta[f+f_off];
477                   }
478                   temp1[0] *= wi;
479                   temp1[1] *= wi;
480 #if LANDAU_DIM==3
481                   temp1[2] *= wi;
482 #endif
483                   temp2    *= wi;
484 #if LANDAU_DIM==2
485                   for (d2 = 0; d2 < 2; d2++) {
486                     for (d3 = 0; d3 < 2; ++d3) {
487                       /* K = U * grad(f): g2=e: i,A */
488                       gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
489                       /* D = -U * (I \kron (fx)): g3=f: i,j,A */
490                       gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
491                     }
492                   }
493 #else
494                   for (d2 = 0; d2 < 3; ++d2) {
495                     for (d3 = 0; d3 < 3; ++d3) {
496                       /* K = U * grad(f): g2 = e: i,A */
497                       gg2_temp[d2] += U[d2][d3]*temp1[d3];
498                       /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
499                       gg3_temp[d2][d3] += U[d2][d3]*temp2;
500                     }
501                   }
502 #endif
503                 } // qi
504               } // ei_r
505               IPf_idx += nip_loc_r*Nfloc_r;
506             } /* grid_r - IPs */
507             PetscCheck(IPf_idx == IPf_sz_glb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT,IPf_idx,IPf_sz_glb);
508             // add alpha and put in gg2/3
509             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
510               for (d2 = 0; d2 < dim; d2++) {
511                 gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA+f_off];
512                 for (d3 = 0; d3 < dim; d3++) {
513                   gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA+f_off]*invMass[fieldA+f_off];
514                 }
515               }
516             }
517             /* add electric field term once per IP */
518             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid] ; fieldA < loc_Nf; ++fieldA) {
519               gg2[fieldA][dim-1] += Eq_m[fieldA+f_off];
520             }
521             /* Jacobian transform - g2, g3 */
522             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
523               for (d = 0; d < dim; ++d) {
524                 g2[fieldA][d] = 0.0;
525                 for (d2 = 0; d2 < dim; ++d2) {
526                   g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2];
527                   g3[fieldA][d][d2] = 0.0;
528                   for (d3 = 0; d3 < dim; ++d3) {
529                     for (dp = 0; dp < dim; ++dp) {
530                       g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp];
531                     }
532                   }
533                   g3[fieldA][d][d2] *= wj;
534                 }
535                 g2[fieldA][d] *= wj;
536               }
537             }
538           } else { // mass
539             PetscReal wj = ww[jpidx_glb];
540             /* Jacobian transform - g0 */
541             for (PetscInt fieldA = 0; fieldA < loc_Nf ; ++fieldA) {
542               if (dim==2) {
543                 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
544               } else {
545                 g0[fieldA] = wj * shift; // move this to below and remove g0
546               }
547             }
548           }
549           /* FE matrix construction */
550           {
551             PetscInt  fieldA,d,f,d2,g;
552             const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
553             /* assemble - on the diagonal (I,I) */
554             for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
555               for (f = 0; f < Nb ; f++) {
556                 const PetscInt i = fieldA*Nb + f; /* Element matrix row */
557                 for (g = 0; g < Nb; ++g) {
558                   const PetscInt j    = fieldA*Nb + g; /* Element matrix column */
559                   const PetscInt fOff = i*totDim + j;
560                   if (shift==0.0) {
561                     for (d = 0; d < dim; ++d) {
562                       elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g];
563                       for (d2 = 0; d2 < dim; ++d2) {
564                         elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2];
565                       }
566                     }
567                   } else { // mass
568                     elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g];
569                   }
570                 }
571               }
572             }
573           }
574         } /* qj loop */
575         if (shift==0.0) { // Jacobian
576           PetscCall(PetscLogEventEnd(ctx->events[4],0,0,0,0));
577         } else {
578           PetscCall(PetscLogEventEnd(ctx->events[16],0,0,0,0));
579         }
580 #if defined(PETSC_HAVE_THREADSAFETY)
581         endtime = MPI_Wtime();
582         if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
583 #endif
584         /* assemble matrix */
585         if (!container) {
586           PetscInt cStart;
587           PetscCall(PetscLogEventBegin(ctx->events[6],0,0,0,0));
588           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL));
589           PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[ LAND_PACK_IDX(b_id,grid) ], loc_elem + cStart, elemMat, ADD_VALUES));
590           PetscCall(PetscLogEventEnd(ctx->events[6],0,0,0,0));
591         } else {  // GPU like assembly for debugging
592           PetscInt      fieldA,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE]={0},cols0[LANDAU_MAX_Q_FACE]={0},rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
593           PetscScalar   vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]={0},row_scale[LANDAU_MAX_Q_FACE]={0},col_scale[LANDAU_MAX_Q_FACE]={0};
594           LandauIdx     *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
595           /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
596           for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
597             LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
598             for (f = 0; f < Nb ; f++) {
599               PetscInt idx = Idxs[f];
600               if (idx >= 0) {
601                 nr           = 1;
602                 rows0[0]     = idx;
603                 row_scale[0] = 1.;
604               } else {
605                 idx = -idx - 1;
606                 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
607                   if (maps[grid].c_maps[idx][q].gid < 0) break;
608                   rows0[q]     = maps[grid].c_maps[idx][q].gid;
609                   row_scale[q] = maps[grid].c_maps[idx][q].scale;
610                 }
611               }
612               for (g = 0; g < Nb; ++g) {
613                 idx = Idxs[g];
614                 if (idx >= 0) {
615                   nc = 1;
616                   cols0[0]     = idx;
617                   col_scale[0] = 1.;
618                 } else {
619                   idx = -idx - 1;
620                   nc = maps[grid].num_face;
621                   for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
622                     if (maps[grid].c_maps[idx][q].gid < 0) break;
623                     cols0[q]     = maps[grid].c_maps[idx][q].gid;
624                     col_scale[q] = maps[grid].c_maps[idx][q].scale;
625                   }
626                 }
627                 const PetscInt    i   = fieldA*Nb + f; /* Element matrix row */
628                 const PetscInt    j   = fieldA*Nb + g; /* Element matrix column */
629                 const PetscScalar Aij = elemMat[i*totDim + j];
630                 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
631                   const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
632                   const int idx0   = b_id*coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
633                   for (int q = 0, idx2 = idx0; q < nr; q++) {
634                     for (int d = 0; d < nc; d++, idx2++) {
635                       coo_vals[idx2] = row_scale[q]*col_scale[d]*Aij;
636                     }
637                   }
638                 } else {
639                   for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
640                   for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
641                   for (q = 0; q < nr; q++) {
642                     for (d = 0; d < nc; d++) {
643                       vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij;
644                     }
645                   }
646                   PetscCall(MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES));
647                 }
648               }
649             }
650           }
651         }
652         if (loc_elem==-1) {
653           PetscCall(PetscPrintf(ctx->comm,"CPU Element matrix\n"));
654           for (int d = 0; d < totDim; ++d) {
655             for (int f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm," %12.5e",  PetscRealPart(elemMat[d*totDim + f])));
656             PetscCall(PetscPrintf(ctx->comm,"\n"));
657           }
658           exit(12);
659         }
660         PetscCall(PetscFree(elemMat));
661       } /* grid */
662     } /* outer element & batch loop */
663     if (shift==0.0) { // mass
664       PetscCall(PetscFree4(ff, dudx, dudy, dudz));
665     }
666     if (!container) {   // 'CPU' assembly move nest matrix to global JacP
667       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP
668         for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
669           const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
670           PetscInt          nloc, nzl, colbuf[1024], row;
671           const PetscInt    *cols;
672           const PetscScalar *vals;
673           Mat               B = subJ[ LAND_PACK_IDX(b_id,grid) ];
674           PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
675           PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
676           PetscCall(MatGetSize(B, &nloc, NULL));
677           for (int i=0 ; i<nloc ; i++) {
678             PetscCall(MatGetRow(B,i,&nzl,&cols,&vals));
679             PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
680             for (int j=0; j<nzl; j++) colbuf[j] = moffset + cols[j];
681             row  = moffset + i;
682             PetscCall(MatSetValues(JacP,1,&row,nzl,colbuf,vals,ADD_VALUES));
683             PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals));
684           }
685           PetscCall(MatDestroy(&B));
686         }
687       }
688     }
689     if (coo_vals) {
690       PetscCall(MatSetValuesCOO(JacP,coo_vals,ADD_VALUES));
691       PetscCall(PetscFree(coo_vals));
692     }
693   } /* CPU version */
694   PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY));
695   PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY));
696   /* clean up */
697   if (cellClosure) {
698     PetscCall(PetscFree(cellClosure));
699   }
700   if (xdata) {
701     PetscCall(VecRestoreArrayReadAndMemType(a_X,&xdata));
702   }
703   PetscFunctionReturn(0);
704 }
705 
706 #if defined(LANDAU_ADD_BCS)
707 static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux,
708                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
709                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
710                     PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[])
711 {
712   uexact[0] = 0;
713 }
714 #endif
715 
716 #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }}
717 static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y,
718                           PetscReal *outX, PetscReal *outY)
719 {
720   PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact;
721   if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
722     *outX = x; *outY = y;
723   } else {
724     const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr;
725     PetscReal       cth,sth,xyprime[2],Rth[2][2],rotcos,newrr;
726     if (num_sections==2) {
727       rotcos  = 0.70710678118654;
728       outfact = 1.5; efact = 2.5;
729       /* rotate normalized vector into [-pi/4,pi/4) */
730       if (sinphi >= 0.) {         /* top cell, -pi/2 */
731         cth = 0.707106781186548; sth = -0.707106781186548;
732       } else {                    /* bottom cell -pi/8 */
733         cth = 0.707106781186548; sth = .707106781186548;
734       }
735     } else if (num_sections==3) {
736       rotcos  = 0.86602540378443;
737       outfact = 1.5; efact = 2.5;
738       /* rotate normalized vector into [-pi/6,pi/6) */
739       if (sinphi >= 0.5) {         /* top cell, -pi/3 */
740         cth = 0.5; sth = -0.866025403784439;
741       } else if (sinphi >= -.5) {  /* mid cell 0 */
742         cth = 1.; sth = .0;
743       } else { /* bottom cell +pi/3 */
744         cth = 0.5; sth = 0.866025403784439;
745       }
746     } else if (num_sections==4) {
747       rotcos  = 0.9238795325112;
748       outfact = 1.5; efact = 3;
749       /* rotate normalized vector into [-pi/8,pi/8) */
750       if (sinphi >= 0.707106781186548) {         /* top cell, -3pi/8 */
751         cth = 0.38268343236509;  sth = -0.923879532511287;
752       } else if (sinphi >= 0.) {                 /* mid top cell -pi/8 */
753         cth = 0.923879532511287; sth = -.38268343236509;
754       } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
755         cth = 0.923879532511287; sth = 0.38268343236509;
756       } else {                                   /* bottom cell + 3pi/8 */
757         cth = 0.38268343236509;  sth = .923879532511287;
758       }
759     } else {
760       cth = 0.; sth = 0.; rotcos = 0; efact = 0;
761     }
762     Rth[0][0] = cth; Rth[0][1] =-sth;
763     Rth[1][0] = sth; Rth[1][1] = cth;
764     MATVEC2(Rth,xy,xyprime);
765     if (num_sections==2) {
766       newrr = xyprime[0]/rotcos;
767     } else {
768       PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin;
769       PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax;
770       newrr = rin + routfrac*nroutmax;
771     }
772     *outX = cosphi*newrr; *outY = sinphi*newrr;
773     /* grade */
774     PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
775     if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */
776     else {         rs = r1; re = r2; fact = efact;} /* electron zone */
777     tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr;
778     *outX *= tt;
779     *outY *= tt;
780   }
781 }
782 
783 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
784 {
785   LandauCtx   *ctx = (LandauCtx*)a_ctx;
786   PetscReal   r = abc[0], z = abc[1];
787   if (ctx->inflate) {
788     PetscReal absR, absZ;
789     absR = PetscAbs(r);
790     absZ = PetscAbs(z);
791     CircleInflate(ctx->i_radius[0],ctx->e_radius,ctx->radius[0],ctx->num_sections,absR,absZ,&absR,&absZ); // wrong: how do I know what grid I am on?
792     r = (r > 0) ? absR : -absR;
793     z = (z > 0) ? absZ : -absZ;
794   }
795   xyz[0] = r;
796   xyz[1] = z;
797   if (dim==3) xyz[2] = abc[2];
798 
799   PetscFunctionReturn(0);
800 }
801 
802 /* create DMComposite of meshes for each species group */
803 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack)
804 {
805   PetscFunctionBegin;
806   { /* p4est, quads */
807     /* Create plex mesh of Landau domain */
808     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
809       PetscReal radius = ctx->radius[grid];
810       if (!ctx->sphere) {
811         PetscInt       cells[] = {2,2,2};
812         PetscReal      lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius};
813         DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
814         if (dim==2) { lo[0] = 0; cells[0] /* = cells[1] */ = 1; }
815         PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid])); // todo: make composite and create dm[grid] here
816         PetscCall(DMLocalizeCoordinates(ctx->plex[grid])); /* needed for periodic */
817         if (dim==3) PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "cube"));
818         else PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "half-plane"));
819       } else if (dim==2) { // sphere is all wrong. should just have one inner radius
820         PetscInt       numCells,cells[16][4],i,j;
821         PetscInt       numVerts;
822         PetscReal      inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius;
823         PetscReal      *flatCoords   = NULL;
824         PetscInt       *flatCells    = NULL, *pcell;
825         if (ctx->num_sections==2) {
826 #if 1
827           numCells = 5;
828           numVerts = 10;
829           int cells2[][4] = { {0,1,4,3},
830                               {1,2,5,4},
831                               {3,4,7,6},
832                               {4,5,8,7},
833                               {6,7,8,9} };
834           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
835           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
836           {
837             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
838             for (j = 0; j < numVerts-1; j++) {
839               PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2;
840               PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid];
841               z = rad * PetscSinReal(theta);
842               coords[j][1] = z;
843               r = rad * PetscCosReal(theta);
844               coords[j][0] = r;
845             }
846             coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
847           }
848 #else
849           numCells = 4;
850           numVerts = 8;
851           static int     cells2[][4] = {{0,1,2,3},
852                                         {4,5,1,0},
853                                         {5,6,2,1},
854                                         {6,7,3,2}};
855           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
856           PetscCall(loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
857           {
858             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
859             PetscInt j;
860             for (j = 0; j < 8; j++) {
861               PetscReal z, r;
862               PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.;
863               PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0);
864               z = rad * PetscSinReal(theta);
865               coords[j][1] = z;
866               r = rad * PetscCosReal(theta);
867               coords[j][0] = r;
868             }
869           }
870 #endif
871         } else if (ctx->num_sections==3) {
872           numCells = 7;
873           numVerts = 12;
874           int cells2[][4] = { {0,1,5,4},
875                               {1,2,6,5},
876                               {2,3,7,6},
877                               {4,5,9,8},
878                               {5,6,10,9},
879                               {6,7,11,10},
880                               {8,9,10,11} };
881           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
882           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
883           {
884             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
885             for (j = 0; j < numVerts; j++) {
886               PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3;
887               PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid];
888               z = rad * PetscSinReal(theta);
889               coords[j][1] = z;
890               r = rad * PetscCosReal(theta);
891               coords[j][0] = r;
892             }
893           }
894         } else if (ctx->num_sections==4) {
895           numCells = 10;
896           numVerts = 16;
897           int cells2[][4] = { {0,1,6,5},
898                               {1,2,7,6},
899                               {2,3,8,7},
900                               {3,4,9,8},
901                               {5,6,11,10},
902                               {6,7,12,11},
903                               {7,8,13,12},
904                               {8,9,14,13},
905                               {10,11,12,15},
906                               {12,13,14,15}};
907           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
908           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
909           {
910             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
911             for (j = 0; j < numVerts-1; j++) {
912               PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4;
913               PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid];
914               z = rad * PetscSinReal(theta);
915               coords[j][1] = z;
916               r = rad * PetscCosReal(theta);
917               coords[j][0] = r;
918             }
919             coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
920           }
921         } else {
922           numCells = 0;
923           numVerts = 0;
924         }
925         for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
926           pcell[0] = cells[j][0]; pcell[1] = cells[j][1];
927           pcell[2] = cells[j][2]; pcell[3] = cells[j][3];
928         }
929         PetscCall(DMPlexCreateFromCellListPetsc(comm_self,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,&ctx->plex[grid]));
930         PetscCall(PetscFree2(flatCoords,flatCells));
931         PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "semi-circle"));
932       } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");
933 
934       PetscCall(DMSetFromOptions(ctx->plex[grid]));
935     } // grid loop
936     PetscCall(PetscObjectSetOptionsPrefix((PetscObject)pack,prefix));
937     PetscCall(DMSetFromOptions(pack));
938 
939     { /* convert to p4est (or whatever), wait for discretization to create pack */
940       char           convType[256];
941       PetscBool      flg;
942       PetscErrorCode ierr;
943 
944       ierr = PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");PetscCall(ierr);
945       PetscCall(PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (p4est)","plexland.c",DMList,DMPLEX,convType,256,&flg));
946       ierr = PetscOptionsEnd();PetscCall(ierr);
947       if (flg) {
948         ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
949         for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
950           DM dmforest;
951           PetscCall(DMConvert(ctx->plex[grid],convType,&dmforest));
952           if (dmforest) {
953             PetscBool isForest;
954             PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix));
955             PetscCall(DMIsForest(dmforest,&isForest));
956             if (isForest) {
957               if (ctx->sphere && ctx->inflate) {
958                 PetscCall(DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx));
959               }
960               PetscCall(DMDestroy(&ctx->plex[grid]));
961               ctx->plex[grid] = dmforest; // Forest for adaptivity
962             } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
963           } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
964         }
965       } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
966     }
967   } /* non-file */
968   PetscCall(DMSetDimension(pack, dim));
969   PetscCall(PetscObjectSetName((PetscObject) pack, "Mesh"));
970   PetscCall(DMSetApplicationContext(pack, ctx));
971 
972   PetscFunctionReturn(0);
973 }
974 
975 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx)
976 {
977   PetscInt        ii,i0;
978   char            buf[256];
979   PetscSection    section;
980 
981   PetscFunctionBegin;
982   for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
983     if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e"));
984     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii));
985     /* Setup Discretization - FEM */
986     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]));
987     PetscCall(PetscObjectSetName((PetscObject) ctx->fe[ii], buf));
988     PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject) ctx->fe[ii]));
989   }
990   PetscCall(DMCreateDS(ctx->plex[grid]));
991   PetscCall(DMGetSection(ctx->plex[grid], &section));
992   for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
993     if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se"));
994     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii));
995     PetscCall(PetscSectionSetComponentName(section, i0, 0, buf));
996   }
997   PetscFunctionReturn(0);
998 }
999 
1000 /* Define a Maxwellian function for testing out the operator. */
1001 
1002 /* Using cartesian velocity space coordinates, the particle */
1003 /* density, [1/m^3], is defined according to */
1004 
1005 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
1006 
1007 /* Using some constant, c, we normalize the velocity vector into a */
1008 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
1009 
1010 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
1011 
1012 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
1013 /* for finding the particle within the interval in a box dx^3 around x is */
1014 
1015 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
1016 
1017 typedef struct {
1018   PetscReal v_0;
1019   PetscReal kT_m;
1020   PetscReal n;
1021   PetscReal shift;
1022 } MaxwellianCtx;
1023 
1024 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
1025 {
1026   MaxwellianCtx *mctx = (MaxwellianCtx*)actx;
1027   PetscInt      i;
1028   PetscReal     v2 = 0, theta = 2*mctx->kT_m/(mctx->v_0*mctx->v_0); /* theta = 2kT/mc^2 */
1029   PetscFunctionBegin;
1030   /* compute the exponents, v^2 */
1031   for (i = 0; i < dim; ++i) v2 += x[i]*x[i];
1032   /* evaluate the Maxwellian */
1033   u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1034   if (mctx->shift!=0.) {
1035     v2 = 0;
1036     for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i];
1037     v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift);
1038     /* evaluate the shifted Maxwellian */
1039     u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1040   }
1041   PetscFunctionReturn(0);
1042 }
1043 
1044 /*@
1045  DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state
1046 
1047  Collective on X
1048 
1049  Input Parameters:
1050  .   dm - The mesh (local)
1051  +   time - Current time
1052  -   temps - Temperatures of each species (global)
1053  .   ns - Number density of each species (global)
1054  -   grid - index into current grid - just used for offset into temp and ns
1055  +   actx - Landau context
1056 
1057  Output Parameter:
1058  .   X  - The state (local to this grid)
1059 
1060  Level: beginner
1061 
1062  .keywords: mesh
1063  .seealso: DMPlexLandauCreateVelocitySpace()
1064  @*/
1065 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, void *actx)
1066 {
1067   LandauCtx      *ctx = (LandauCtx*)actx;
1068   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *);
1069   PetscInt       dim;
1070   MaxwellianCtx  *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
1071 
1072   PetscFunctionBegin;
1073   PetscCall(DMGetDimension(dm, &dim));
1074   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
1075   for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
1076     mctxs[i0]      = &data[i0];
1077     data[i0].v_0   = ctx->v_0; // v_0 same for all grids
1078     data[i0].kT_m  = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */
1079     data[i0].n     = ns[ii] * (1+(double)b_id/100.0); // make solves a little different to mimic application, n[0] use for Conner-Hastie
1080     initu[i0]      = maxwellian;
1081     data[i0].shift = 0;
1082   }
1083   data[0].shift = ctx->electronShift;
1084   /* need to make ADD_ALL_VALUES work - TODO */
1085   PetscCall(DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X));
1086   PetscFunctionReturn(0);
1087 }
1088 
1089 /*
1090  LandauSetInitialCondition - Addes Maxwellians with context
1091 
1092  Collective on X
1093 
1094  Input Parameters:
1095  .   dm - The mesh
1096  -   grid - index into current grid - just used for offset into temp and ns
1097  +   actx - Landau context with T and n
1098 
1099  Output Parameter:
1100  .   X  - The state
1101 
1102  Level: beginner
1103 
1104  .keywords: mesh
1105  .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauAddMaxwellians()
1106  */
1107 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, void *actx)
1108 {
1109   LandauCtx        *ctx = (LandauCtx*)actx;
1110   PetscFunctionBegin;
1111   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
1112   PetscCall(VecZeroEntries(X));
1113   PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, ctx));
1114   PetscFunctionReturn(0);
1115 }
1116 
1117 // adapt a level once. Forest in/out
1118 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
1119 {
1120   DM               forest, plex, adaptedDM = NULL;
1121   PetscDS          prob;
1122   PetscBool        isForest;
1123   PetscQuadrature  quad;
1124   PetscInt         Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1125   DMLabel          adaptLabel = NULL;
1126 
1127   PetscFunctionBegin;
1128   forest = ctx->plex[grid];
1129   PetscCall(DMCreateDS(forest));
1130   PetscCall(DMGetDS(forest, &prob));
1131   PetscCall(DMGetDimension(forest, &dim));
1132   PetscCall(DMIsForest(forest, &isForest));
1133   PetscCheck(isForest,ctx->comm,PETSC_ERR_ARG_WRONG,"! Forest");
1134   PetscCall(DMConvert(forest, DMPLEX, &plex));
1135   PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd));
1136   PetscCall(DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel));
1137   PetscCall(PetscFEGetQuadrature(fem, &quad));
1138   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
1139   PetscCheckFalse(Nq >LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%" PetscInt_FMT ")",Nq,LANDAU_MAX_NQ);
1140   PetscCall(PetscDSGetDimensions(prob, &Nb));
1141   if (type==4) {
1142     for (c = cStart; c < cEnd; c++) {
1143       PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
1144     }
1145     PetscCall(PetscInfo(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM"));
1146   } else if (type==2) {
1147     PetscInt  rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2;
1148     PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1149     for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; }
1150     for (c = cStart; c < cEnd; c++) {
1151       PetscReal    tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ];
1152       PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ));
1153       for (qj = 0; qj < Nq; ++qj) {
1154         tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0));
1155         r  = PetscSqrtReal(tt);
1156         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) {
1157           minRad = r;
1158           nr     = 0;
1159           rCellIdx[nr++]= c;
1160           PetscCall(PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", r, c, qj+1, Nq));
1161         } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) {
1162           for (k=0;k<nr;k++) if (c == rCellIdx[k]) break;
1163           if (k==nr) {
1164             rCellIdx[nr++]= c;
1165             PetscCall(PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", r, c, qj+1, Nq, r-minRad));
1166           }
1167         }
1168         if (ctx->sphere) {
1169           if ((tt=r-ctx->e_radius) > 0) {
1170             PetscCall(PetscInfo(sol, "\t\t\t %" PetscInt_FMT " cell r=%g\n",c,tt));
1171             if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) {
1172               eMinRad = tt;
1173               eMaxIdx = 0;
1174               eCellIdx[eMaxIdx++] = c;
1175             } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) {
1176               eCellIdx[eMaxIdx++] = c;
1177             }
1178           }
1179           if ((tt=r-ctx->i_radius[grid]) > 0) {
1180             if (tt < iMinRad - 1.e-5) {
1181               iMinRad = tt;
1182               iMaxIdx = 0;
1183               iCellIdx[iMaxIdx++] = c;
1184             } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) {
1185               iCellIdx[iMaxIdx++] = c;
1186             }
1187           }
1188         }
1189       }
1190     }
1191     for (k=0;k<nr;k++) {
1192       PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE));
1193     }
1194     if (ctx->sphere) {
1195       for (c = 0; c < eMaxIdx; c++) {
1196         PetscCall(DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE));
1197         PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",eCellIdx[c],eMinRad));
1198       }
1199       for (c = 0; c < iMaxIdx; c++) {
1200         PetscCall(DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE));
1201         PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",iCellIdx[c],iMinRad));
1202       }
1203     }
1204     PetscCall(PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],minRad));
1205   } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */
1206     PetscScalar  *coef = NULL;
1207     Vec          coords;
1208     PetscInt     csize,Nv,d,nz;
1209     DM           cdm;
1210     PetscSection cs;
1211     PetscCall(DMGetCoordinatesLocal(forest, &coords));
1212     PetscCall(DMGetCoordinateDM(forest, &cdm));
1213     PetscCall(DMGetLocalSection(cdm, &cs));
1214     for (c = cStart; c < cEnd; c++) {
1215       PetscInt doit = 0, outside = 0;
1216       PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef));
1217       Nv = csize/dim;
1218       for (nz = d = 0; d < Nv; d++) {
1219         PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0);
1220         x = PetscSqrtReal(x);
1221         if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1;             /* refine origin */
1222         else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++;   /* first pass don't refine bottom */
1223         else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */
1224         else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */
1225         if (x < PETSC_MACHINE_EPSILON*10.) nz++;
1226       }
1227       PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef));
1228       if (doit || (outside<Nv && nz)) {
1229         PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
1230       }
1231     }
1232     PetscCall(PetscInfo(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM"));
1233   }
1234   PetscCall(DMDestroy(&plex));
1235   PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM));
1236   PetscCall(DMLabelDestroy(&adaptLabel));
1237   *newForest = adaptedDM;
1238   if (adaptedDM) {
1239     if (isForest) {
1240       PetscCall(DMForestSetAdaptivityForest(adaptedDM,NULL)); // ????
1241     } else exit(33); // ???????
1242     PetscCall(DMConvert(adaptedDM, DMPLEX, &plex));
1243     PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd));
1244     PetscCall(PetscInfo(sol, "\tPhase: adaptToleranceFEM: %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart)));
1245     PetscCall(DMDestroy(&plex));
1246   } else *newForest = NULL;
1247   PetscFunctionReturn(0);
1248 }
1249 
1250 // forest goes in (ctx->plex[grid]), plex comes out
1251 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
1252 {
1253   PetscInt        adaptIter;
1254 
1255   PetscFunctionBegin;
1256   PetscInt  type, limits[5] = {(grid==0) ? ctx->numRERefine : 0, (grid==0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid==0) ? ctx->nZRefine2 : 0,ctx->postAMRRefine[grid]};
1257   for (type=0;type<5;type++) {
1258     for (adaptIter = 0; adaptIter<limits[type];adaptIter++) {
1259       DM  newForest = NULL;
1260       PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest));
1261       if (newForest)  {
1262         PetscCall(DMDestroy(&ctx->plex[grid]));
1263         PetscCall(VecDestroy(uu));
1264         PetscCall(DMCreateGlobalVector(newForest,uu));
1265         PetscCall(PetscObjectSetName((PetscObject) *uu, "uAMR"));
1266         PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, ctx));
1267         ctx->plex[grid] = newForest;
1268       } else {
1269         exit(4); // can happen with no AMR and post refinement
1270       }
1271     }
1272   }
1273   PetscFunctionReturn(0);
1274 }
1275 
1276 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1277 {
1278   PetscErrorCode    ierr;
1279   PetscBool         flg, sph_flg;
1280   PetscInt          ii,nt,nm,nc,num_species_grid[LANDAU_MAX_GRIDS];
1281   PetscReal         v0_grid[LANDAU_MAX_GRIDS];
1282   DM                dummy;
1283 
1284   PetscFunctionBegin;
1285   PetscCall(DMCreate(ctx->comm,&dummy));
1286   /* get options - initialize context */
1287   ctx->verbose = 1; // should be 0 for silent compliance
1288 #if defined(PETSC_HAVE_THREADSAFETY)
1289   ctx->batch_sz = PetscNumOMPThreads;
1290 #else
1291   ctx->batch_sz = 1;
1292 #endif
1293   ctx->batch_view_idx = 0;
1294   ctx->interpolate    = PETSC_TRUE;
1295   ctx->gpu_assembly   = PETSC_TRUE;
1296   ctx->aux_bool       = PETSC_FALSE;
1297   ctx->electronShift  = 0;
1298   ctx->M              = NULL;
1299   ctx->J              = NULL;
1300   /* geometry and grids */
1301   ctx->sphere         = PETSC_FALSE;
1302   ctx->inflate        = PETSC_FALSE;
1303   ctx->aux_bool       = PETSC_FALSE;
1304   ctx->use_p4est      = PETSC_FALSE;
1305   ctx->num_sections   = 3; /* 2, 3 or 4 */
1306   for (PetscInt grid=0;grid<LANDAU_MAX_GRIDS;grid++) {
1307     ctx->radius[grid]           = 5.; /* thermal radius (velocity) */
1308     ctx->numAMRRefine[grid]     = 5;
1309     ctx->postAMRRefine[grid]    = 0;
1310     ctx->species_offset[grid+1] = 1; // one species default
1311     num_species_grid[grid]      = 0;
1312     ctx->plex[grid] = NULL;     /* cache as expensive to Convert */
1313   }
1314   ctx->species_offset[0] = 0;
1315   ctx->re_radius         = 0.;
1316   ctx->vperp0_radius1    = 0;
1317   ctx->vperp0_radius2    = 0;
1318   ctx->nZRefine1         = 0;
1319   ctx->nZRefine2         = 0;
1320   ctx->numRERefine       = 0;
1321   num_species_grid[0]    = 1; // one species default
1322   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1323   ctx->charges[0]        = -1;  /* electron charge (MKS) */
1324   ctx->masses[0]         = 1/1835.469965278441013; /* temporary value in proton mass */
1325   ctx->n[0]              = 1;
1326   ctx->v_0               = 1; /* thermal velocity, we could start with a scale != 1 */
1327   ctx->thermal_temps[0]  = 1;
1328   /* constants, etc. */
1329   ctx->epsilon0          = 8.8542e-12; /* permittivity of free space (MKS) F/m */
1330   ctx->k                 = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1331   ctx->lnLam             = 10;         /* cross section ratio large - small angle collisions */
1332   ctx->n_0               = 1.e20;        /* typical plasma n, but could set it to 1 */
1333   ctx->Ez                = 0;
1334   for (PetscInt grid=0;grid<LANDAU_NUM_TIMERS;grid++) ctx->times[grid] = 0;
1335   ctx->use_matrix_mass   =  PETSC_FALSE;
1336   ctx->use_relativistic_corrections = PETSC_FALSE;
1337   ctx->use_energy_tensor_trick      = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1338   ctx->SData_d.w         = NULL;
1339   ctx->SData_d.x         = NULL;
1340   ctx->SData_d.y         = NULL;
1341   ctx->SData_d.z         = NULL;
1342   ctx->SData_d.invJ      = NULL;
1343   ctx->jacobian_field_major_order     = PETSC_FALSE;
1344   ctx->SData_d.coo_elem_offsets       = NULL;
1345   ctx->SData_d.coo_elem_point_offsets = NULL;
1346   ctx->coo_assembly                   = PETSC_FALSE;
1347   ctx->SData_d.coo_elem_fullNb        = NULL;
1348   ctx->SData_d.coo_size               = 0;
1349   ierr = PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");PetscCall(ierr);
1350   {
1351     char opstring[256];
1352 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1353     ctx->deviceType = LANDAU_KOKKOS;
1354     PetscCall(PetscStrcpy(opstring,"kokkos"));
1355 #elif defined(PETSC_HAVE_CUDA)
1356     ctx->deviceType = LANDAU_CUDA;
1357     PetscCall(PetscStrcpy(opstring,"cuda"));
1358 #else
1359     ctx->deviceType = LANDAU_CPU;
1360     PetscCall(PetscStrcpy(opstring,"cpu"));
1361 #endif
1362     PetscCall(PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,sizeof(opstring),NULL));
1363     PetscCall(PetscStrcmp("cpu",opstring,&flg));
1364     if (flg) {
1365       ctx->deviceType = LANDAU_CPU;
1366     } else {
1367       PetscCall(PetscStrcmp("cuda",opstring,&flg));
1368       if (flg) {
1369         ctx->deviceType = LANDAU_CUDA;
1370       } else {
1371         PetscCall(PetscStrcmp("kokkos",opstring,&flg));
1372         if (flg) ctx->deviceType = LANDAU_KOKKOS;
1373         else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring);
1374       }
1375     }
1376   }
1377   PetscCall(PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL));
1378   PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL));
1379   PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL));
1380   PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"LANDAU_MAX_BATCH_SZ %" PetscInt_FMT " < ctx->batch_sz %" PetscInt_FMT,LANDAU_MAX_BATCH_SZ,ctx->batch_sz);
1381   PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL));
1382   PetscCheck(ctx->batch_view_idx < ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT,ctx->batch_view_idx,ctx->batch_sz);
1383   PetscCall(PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie critical field","plexland.c",ctx->Ez,&ctx->Ez, NULL));
1384   PetscCall(PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL));
1385   PetscCall(PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL));
1386   PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL));
1387   PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL));
1388   PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, &ctx->use_energy_tensor_trick, NULL));
1389 
1390   /* get num species with temperature, set defaults */
1391   for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) {
1392     ctx->thermal_temps[ii] = 1;
1393     ctx->charges[ii]       = 1;
1394     ctx->masses[ii]        = 1;
1395     ctx->n[ii]             = 1;
1396   }
1397   nt = LANDAU_MAX_SPECIES;
1398   PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg));
1399   if (flg) {
1400     PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n",nt));
1401     ctx->num_species = nt;
1402   } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1403   for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1404   nm = LANDAU_MAX_SPECIES-1;
1405   PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg));
1406   if (flg && nm != ctx->num_species-1) {
1407     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT "",nm,ctx->num_species-1);
1408   }
1409   nm = LANDAU_MAX_SPECIES;
1410   PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg));
1411   PetscCheckFalse(flg && nm != ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT "",nm,ctx->num_species);
1412   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1413   ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */
1414   ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */
1415   nc = LANDAU_MAX_SPECIES-1;
1416   PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg));
1417   if (flg) PetscCheck(nc == ctx->num_species-1,ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %" PetscInt_FMT " != num species %" PetscInt_FMT,nc,ctx->num_species-1);
1418   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1419   /* geometry and grids */
1420   nt = LANDAU_MAX_GRIDS;
1421   PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid","Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid","plexland.c", num_species_grid, &nt, &flg));
1422   if (flg) {
1423     ctx->num_grids = nt;
1424     for (ii=nt=0;ii<ctx->num_grids;ii++) nt += num_species_grid[ii];
1425     PetscCheck(ctx->num_species == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %" PetscInt_FMT ")",nt,ctx->num_species,ctx->num_grids,LANDAU_MAX_GRIDS);
1426   } else {
1427     ctx->num_grids = 1; // go back to a single grid run
1428     num_species_grid[0] = ctx->num_species;
1429   }
1430   for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids ; ii++) ctx->species_offset[ii+1] = ctx->species_offset[ii] + num_species_grid[ii];
1431   PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????",ctx->species_offset[ctx->num_grids],ctx->num_species);
1432   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1433     int iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid
1434     v0_grid[grid] = PetscSqrtReal(ctx->k*ctx->thermal_temps[iii]/ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */
1435   }
1436   ii = 0;
1437   PetscCall(PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL));
1438   ctx->v_0 = v0_grid[ii]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1439   ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */
1440   /* domain */
1441   nt = LANDAU_MAX_GRIDS;
1442   PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius","Phase space size in units of thermal velocity of grid","plexland.c",ctx->radius,&nt, &flg));
1443   if (flg) PetscCheck(nt >= ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT,nt,ctx->num_grids);
1444   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1445     if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */
1446       if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1447       else ctx->radius[grid] = -ctx->radius[grid];
1448       ctx->radius[grid] = ctx->radius[grid]*SPEED_OF_LIGHT/ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1449       PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n",ctx->radius[grid],grid));
1450     }
1451     ctx->radius[grid] *= v0_grid[grid]/ctx->v_0; // scale domain by thermal radius relative to v_0
1452   }
1453   /* amr parametres */
1454   nt = LANDAU_MAX_GRIDS;
1455   PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg));
1456   PetscCheckFalse(flg && nt < ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT,nt,ctx->num_grids);
1457   nt = LANDAU_MAX_GRIDS;
1458   PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg));
1459   for (ii=1;ii<ctx->num_grids;ii++)  ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1460   PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg));
1461   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine1",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg));
1462   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine2",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg));
1463   PetscCall(PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg));
1464   PetscCall(PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg));
1465   PetscCall(PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg));
1466   /* spherical domain (not used) */
1467   PetscCall(PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL));
1468   PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg));
1469   PetscCall(PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg));
1470   PetscCall(PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius, &ctx->e_radius, &flg));
1471   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1472   if (!flg) {
1473     ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0;
1474   }
1475   nt = LANDAU_MAX_GRIDS;
1476   PetscCall(PetscOptionsRealArray("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius, &nt, &flg));
1477   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE;
1478   if (!flg) {
1479     ctx->i_radius[0] = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; // need to correct for ion grid domain
1480   }
1481   if (flg) PetscCheck(ctx->num_grids == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_i_radius: %" PetscInt_FMT " != num_species = %" PetscInt_FMT,nt,ctx->num_grids);
1482   if (ctx->sphere) PetscCheck(ctx->e_radius > ctx->i_radius[0],ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",ctx->i_radius[0],ctx->e_radius,ctx->radius[0]);
1483   /* processing options */
1484   PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL));
1485   if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos
1486     PetscCall(PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL));
1487     if (ctx->coo_assembly) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"COO assembly requires 'gpu assembly' even if Kokkos 'CPU' back-end %d",ctx->coo_assembly);
1488   }
1489   PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL));
1490   if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly");
1491   ierr = PetscOptionsEnd();PetscCall(ierr);
1492 
1493   for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii]  = ctx->charges[ii] = 0;
1494   if (ctx->verbose > 0) {
1495     PetscCall(PetscPrintf(ctx->comm, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n",ctx->masses[0],ctx->masses[1]/1.6720e-27,ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0));
1496     PetscCall(PetscPrintf(ctx->comm, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", ctx->charges[0],-ctx->charges[1]/ctx->charges[0],ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0));
1497     PetscCall(PetscPrintf(ctx->comm, "n:             e: %10.3e                           i: %10.3e %10.3e\n", ctx->n[0],ctx->n[1],ctx->num_species>2 ? ctx->n[2] : 0));
1498     PetscCall(PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %" PetscInt_FMT " batched\n", ctx->thermal_temps[0], ctx->thermal_temps[1], (ctx->num_species>2) ? ctx->thermal_temps[2] : 0, ctx->v_0, ctx->v_0/SPEED_OF_LIGHT, ctx->n_0, ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",ctx->batch_sz));
1499     PetscCall(PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",0,ctx->radius[0],ctx->numAMRRefine[0]));
1500     for (ii=1;ii<ctx->num_grids;ii++) PetscCall(PetscPrintf(ctx->comm, ", %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",ii,ctx->radius[ii],ctx->numAMRRefine[ii]));
1501     PetscCall(PetscPrintf(ctx->comm,"\n"));
1502     if (ctx->jacobian_field_major_order) {
1503       PetscCall(PetscPrintf(ctx->comm,"Using field major order for GPU Jacobian\n"));
1504     } else {
1505       PetscCall(PetscPrintf(ctx->comm,"Using default Plex order for all matrices\n"));
1506     }
1507   }
1508   PetscCall(DMDestroy(&dummy));
1509   {
1510     PetscMPIInt    rank;
1511     PetscCallMPI(MPI_Comm_rank(ctx->comm, &rank));
1512     ctx->stage = 0;
1513     PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13])); /* 13 */
1514     PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2])); /* 2 */
1515     PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12])); /* 12 */
1516     PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15])); /* 15 */
1517     PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */
1518     PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */
1519     PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0])); /* 0 */
1520     PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9])); /* 9 */
1521     PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10])); /* 10 */
1522     PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7])); /* 7 */
1523     PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1])); /* 1 */
1524     PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3])); /* 3 */
1525     PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8])); /* 8 */
1526     PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4])); /* 4 */
1527     PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */
1528     PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5])); /* 5 */
1529     PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6])); /* 6 */
1530 
1531     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1532       PetscCall(PetscOptionsClearValue(NULL,"-snes_converged_reason"));
1533       PetscCall(PetscOptionsClearValue(NULL,"-ksp_converged_reason"));
1534       PetscCall(PetscOptionsClearValue(NULL,"-snes_monitor"));
1535       PetscCall(PetscOptionsClearValue(NULL,"-ksp_monitor"));
1536       PetscCall(PetscOptionsClearValue(NULL,"-ts_monitor"));
1537       PetscCall(PetscOptionsClearValue(NULL,"-ts_view"));
1538       PetscCall(PetscOptionsClearValue(NULL,"-ts_adapt_monitor"));
1539       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view"));
1540       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view"));
1541       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_dm_view"));
1542       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_view"));
1543       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_jacobian_view"));
1544       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mat_view"));
1545       PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_converged_reason"));
1546       PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_monitor"));
1547       PetscCall(PetscOptionsClearValue(NULL,"-"));
1548       PetscCall(PetscOptionsClearValue(NULL,"-info"));
1549     }
1550   }
1551   PetscFunctionReturn(0);
1552 }
1553 
1554 static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx)
1555 {
1556   PetscSection      section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
1557   PetscQuadrature   quad;
1558   const PetscReal   *quadWeights;
1559   PetscInt          numCells[LANDAU_MAX_GRIDS],Nq,Nf[LANDAU_MAX_GRIDS], ncellsTot=0;
1560   PetscTabulation   *Tf;
1561   PetscDS           prob;
1562 
1563   PetscFunctionBegin;
1564   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
1565   PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
1566   /* DS, Tab and quad is same on all grids */
1567   PetscCheck(ctx->plex[0],ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
1568   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
1569   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL,  &quadWeights));
1570   PetscCheck(Nq <= LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%" PetscInt_FMT ")",Nq,LANDAU_MAX_NQ);
1571   /* setup each grid */
1572   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
1573     PetscInt cStart, cEnd;
1574     PetscCheckFalse(ctx->plex[grid] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
1575     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1576     numCells[grid] = cEnd - cStart; // grids can have different topology
1577     PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
1578     PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
1579     PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
1580     ncellsTot += numCells[grid];
1581   }
1582 #define MAP_BF_SIZE (64*LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES)
1583   /* create GPU assembly data */
1584   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1585     PetscContainer          container;
1586     PetscScalar             elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat;
1587     pointInterpolationP4est pointMaps[MAP_BF_SIZE][LANDAU_MAX_Q_FACE];
1588     P4estVertexMaps         *maps;
1589     const PetscInt          *plex_batch=NULL,Nb=Nq; // tensor elements;
1590     LandauIdx               *coo_elem_offsets=NULL, *coo_elem_fullNb=NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = NULL;
1591     /* create GPU asssembly data */
1592     PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n",1));
1593     PetscCall(PetscLogEventBegin(ctx->events[2],0,0,0,0));
1594     PetscCall(PetscMalloc(sizeof(*maps)*ctx->num_grids, &maps));
1595 
1596     if (ctx->coo_assembly) { // setup COO assembly -- put COO metadata directly in ctx->SData_d
1597       PetscCall(PetscMalloc3(ncellsTot+1,&coo_elem_offsets,ncellsTot,&coo_elem_fullNb,ncellsTot, &coo_elem_point_offsets)); // array of integer pointers
1598       coo_elem_offsets[0] = 0; // finish later
1599       PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n",ncellsTot));
1600       ctx->SData_d.coo_n_cellsTot         = ncellsTot;
1601       ctx->SData_d.coo_elem_offsets       = (void*)coo_elem_offsets;
1602       ctx->SData_d.coo_elem_fullNb        = (void*)coo_elem_fullNb;
1603       ctx->SData_d.coo_elem_point_offsets = (void*)coo_elem_point_offsets;
1604     } else {
1605       ctx->SData_d.coo_elem_offsets       = ctx->SData_d.coo_elem_fullNb = NULL;
1606       ctx->SData_d.coo_elem_point_offsets = NULL;
1607       ctx->SData_d.coo_n_cellsTot         = 0;
1608     }
1609 
1610     ctx->SData_d.coo_max_fullnb = 0;
1611     for (PetscInt grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1612       PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc*Nq;
1613       if (grid_batch_is_inv[grid]) {
1614         PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch));
1615       }
1616       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1617       // make maps
1618       maps[grid].d_self       = NULL;
1619       maps[grid].num_elements = numCells[grid];
1620       maps[grid].num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q
1621       maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face,(double)(dim-1))+.001); // Q^2
1622       maps[grid].num_reduced  = 0;
1623       maps[grid].deviceType   = ctx->deviceType;
1624       maps[grid].numgrids     = ctx->num_grids;
1625       // count reduced and get
1626       PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx));
1627       for (int ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1628         if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx+1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1629         for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1630           int fullNb = 0;
1631           for (int q = 0; q < Nb; ++q) {
1632             PetscInt    numindices,*indices;
1633             PetscScalar *valuesOrig = elMat = elemMatrix;
1634             PetscCall(PetscArrayzero(elMat, totDim*totDim));
1635             elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1;
1636             PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat));
1637             for (PetscInt f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal
1638               if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) {
1639                 // found it
1640                 if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1641                   if (plex_batch) {
1642                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx) plex_batch[indices[f]];
1643                   } else {
1644                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f];
1645                   }
1646                   fullNb++;
1647                 } else { //found a constraint
1648                   int       jj      = 0;
1649                   PetscReal sum     = 0;
1650                   const PetscInt ff = f;
1651                   maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1
1652 
1653                   do {  // constraints are continuous in Plex - exploit that here
1654                     int ii; // get 'scale'
1655                     for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1656                       if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1657                         pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]);
1658                       }
1659                     }
1660                     sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1661                     // get 'gid'
1662                     if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1663                     else {
1664                       if (plex_batch) {
1665                         pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1666                       } else {
1667                         pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1668                       }
1669                       fullNb++;
1670                     }
1671                   } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1672                   while (jj < maps[grid].num_face) {
1673                     pointMaps[maps[grid].num_reduced][jj].scale = 0;
1674                     pointMaps[maps[grid].num_reduced][jj].gid = -1;
1675                     jj++;
1676                   }
1677                   if (PetscAbs(sum-1.0) > 10*PETSC_MACHINE_EPSILON) { // debug
1678                     int       d,f;
1679                     PetscReal tmp = 0;
1680                     PetscCall(PetscPrintf(PETSC_COMM_SELF,"\t\t%" PetscInt_FMT ".%" PetscInt_FMT ".%" PetscInt_FMT ") ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%" PetscInt_FMT ")\n",eidx,q,fieldA,sum,LANDAU_MAX_Q_FACE,maps[grid].num_face));
1681                     for (d = 0, tmp = 0; d < numindices; ++d) {
1682                       if (tmp!=0 && PetscAbs(tmp-1.0) > 10*PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD,"%3" PetscInt_FMT ") %3" PetscInt_FMT ": ",d,indices[d]));
1683                       for (f = 0; f < numindices; ++f) {
1684                         tmp += PetscRealPart(elMat[d*numindices + f]);
1685                       }
1686                       if (tmp!=0) PetscCall(PetscPrintf(ctx->comm," | %22.16e\n",tmp));
1687                     }
1688                   }
1689                   maps[grid].num_reduced++;
1690                   PetscCheckFalse(maps[grid].num_reduced>=MAP_BF_SIZE,PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %d",maps[grid].num_reduced,MAP_BF_SIZE);
1691                 }
1692                 break;
1693               }
1694             }
1695             // cleanup
1696             PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat));
1697             if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices*numindices, MPIU_SCALAR, &elMat));
1698           }
1699           if (ctx->coo_assembly) { // setup COO assembly
1700             coo_elem_offsets[glb_elem_idx+1] += fullNb*fullNb; // one species block, adds a block for each species, on this element in this grid
1701             if (fieldA==0) { // cache full Nb for this element, on this grid per species
1702               coo_elem_fullNb[glb_elem_idx] = fullNb;
1703               if (fullNb>ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1704             } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %" PetscInt_FMT " %" PetscInt_FMT,coo_elem_fullNb[glb_elem_idx],fullNb);
1705           }
1706         } // field
1707       } // cell
1708       // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1709       PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps));
1710       for (int ej = 0; ej < maps[grid].num_reduced; ++ej) {
1711         for (int q = 0; q < maps[grid].num_face; ++q) {
1712           maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1713           maps[grid].c_maps[ej][q].gid   = pointMaps[ej][q].gid;
1714         }
1715       }
1716 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1717       if (ctx->deviceType == LANDAU_KOKKOS) {
1718         PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid)); // imples Kokkos does
1719       } // else could be CUDA
1720 #endif
1721 #if defined(PETSC_HAVE_CUDA)
1722       if (ctx->deviceType == LANDAU_CUDA) {
1723         PetscCall(LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid));
1724       }
1725 #endif
1726       if (plex_batch) {
1727         PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch));
1728         PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this
1729       }
1730     } /* grids */
1731     // finish COO
1732     if (ctx->coo_assembly) { // setup COO assembly
1733       PetscInt *oor, *ooc;
1734       ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot]*ctx->batch_sz;
1735       PetscCall(PetscMalloc2(ctx->SData_d.coo_size,&oor,ctx->SData_d.coo_size,&ooc));
1736       for (int i=0;i<ctx->SData_d.coo_size;i++) oor[i] = ooc[i] = -1;
1737       // get
1738       for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1739         for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1740           const int              fullNb = coo_elem_fullNb[glb_elem_idx];
1741           const LandauIdx *const Idxs = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1742           coo_elem_point_offsets[glb_elem_idx][0] = 0;
1743           for (int f=0, cnt2=0;f<Nb;f++) {
1744             int idx = Idxs[f];
1745             coo_elem_point_offsets[glb_elem_idx][f+1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1746             if (idx >= 0) {
1747               cnt2++;
1748               coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1749             } else {
1750               idx = -idx - 1;
1751               for (int q = 0 ; q < maps[grid].num_face; q++) {
1752                 if (maps[grid].c_maps[idx][q].gid < 0) break;
1753                 cnt2++;
1754                 coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1755               }
1756             }
1757             PetscCheck(cnt2 <= fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %d < %d",fullNb,cnt2);
1758           }
1759           PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb]==fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %d != fullNb=%d",coo_elem_point_offsets[glb_elem_idx][Nb],fullNb);
1760         }
1761       }
1762       // set
1763       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
1764         for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1765           const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
1766           for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1767             const int  fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
1768             // set (i,j)
1769             for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1770               const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1771               int                    rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
1772               for (int f = 0; f < Nb; ++f) {
1773                 const int nr =  coo_elem_point_offsets[glb_elem_idx][f+1] - coo_elem_point_offsets[glb_elem_idx][f];
1774                 if (nr==1) rows[0] = Idxs[f];
1775                 else {
1776                   const int idx = -Idxs[f] - 1;
1777                   for (int q = 0; q < nr; q++) {
1778                     rows[q] = maps[grid].c_maps[idx][q].gid;
1779                   }
1780                 }
1781                 for (int g = 0; g < Nb; ++g) {
1782                   const int nc =  coo_elem_point_offsets[glb_elem_idx][g+1] - coo_elem_point_offsets[glb_elem_idx][g];
1783                   if (nc==1) cols[0] = Idxs[g];
1784                   else {
1785                     const int idx = -Idxs[g] - 1;
1786                     for (int q = 0; q < nc; q++) {
1787                       cols[q] = maps[grid].c_maps[idx][q].gid;
1788                     }
1789                   }
1790                   const int idx0 = b_id*coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1791                   for (int q = 0, idx = idx0; q < nr; q++) {
1792                     for (int d = 0; d < nc; d++, idx++) {
1793                       oor[idx] = rows[q] + moffset;
1794                       ooc[idx] = cols[d] + moffset;
1795                     }
1796                   }
1797                 }
1798               }
1799             }
1800           } // cell
1801         } // grid
1802       } // batch
1803       PetscCall(MatSetPreallocationCOO(ctx->J,ctx->SData_d.coo_size,oor,ooc));
1804       PetscCall(PetscFree2(oor,ooc));
1805     }
1806     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
1807     PetscCall(PetscContainerSetPointer(container, (void *)maps));
1808     PetscCall(PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy));
1809     PetscCall(PetscObjectCompose((PetscObject) ctx->J, "assembly_maps", (PetscObject) container));
1810     PetscCall(PetscContainerDestroy(&container));
1811     PetscCall(PetscLogEventEnd(ctx->events[2],0,0,0,0));
1812   } // end GPU assembly
1813   { /* create static point data, Jacobian called first, only one vertex copy */
1814     PetscReal      *invJe,*ww,*xx,*yy,*zz=NULL,*invJ_a;
1815     PetscInt       outer_ipidx, outer_ej,grid, nip_glb = 0;
1816     PetscFE        fe;
1817     const PetscInt Nb = Nq;
1818     PetscCall(PetscLogEventBegin(ctx->events[7],0,0,0,0));
1819     PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n"));
1820     for (PetscInt grid=0;grid<ctx->num_grids;grid++) nip_glb += Nq*numCells[grid];
1821     /* collect f data, first time is for Jacobian, but make mass now */
1822     if (ctx->verbose > 0) {
1823       PetscInt ncells = 0, N;
1824       PetscCall(MatGetSize(ctx->J,&N,NULL));
1825       for (PetscInt grid=0;grid<ctx->num_grids;grid++) ncells += numCells[grid];
1826       PetscCall(PetscPrintf(ctx->comm,"%" PetscInt_FMT ") %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n",0,"FormLandau",nip_glb,ncells, Nb, Nq, dim, Nb, ctx->num_species, Nb, dim, N));
1827     }
1828     PetscCall(PetscMalloc4(nip_glb,&ww,nip_glb,&xx,nip_glb,&yy,nip_glb*dim*dim,&invJ_a));
1829     if (dim==3) {
1830       PetscCall(PetscMalloc1(nip_glb,&zz));
1831     }
1832     if (ctx->use_energy_tensor_trick) {
1833       PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe));
1834       PetscCall(PetscObjectSetName((PetscObject) fe, "energy"));
1835     }
1836     /* init each grids static data - no batch */
1837     for (grid=0, outer_ipidx=0, outer_ej=0 ; grid < ctx->num_grids ; grid++) { // OpenMP (once)
1838       Vec             v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1839       PetscSection    e_section;
1840       DM              dmEnergy;
1841       PetscInt        cStart, cEnd, ej;
1842 
1843       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1844       // prep energy trick, get v^2 / 2 vector
1845       if (ctx->use_energy_tensor_trick) {
1846         PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1847         Vec            glob_v2;
1848         PetscReal      *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};
1849 
1850         PetscCall(DMClone(ctx->plex[grid], &dmEnergy));
1851         PetscCall(PetscObjectSetName((PetscObject) dmEnergy, "energy"));
1852         PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe));
1853         PetscCall(DMCreateDS(dmEnergy));
1854         PetscCall(DMGetSection(dmEnergy, &e_section));
1855         PetscCall(DMGetGlobalVector(dmEnergy,&glob_v2));
1856         PetscCall(PetscObjectSetName((PetscObject) glob_v2, "trick"));
1857         c2_0[0] = &data[0];
1858         PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void**)c2_0, INSERT_ALL_VALUES, glob_v2));
1859         PetscCall(DMGetLocalVector(dmEnergy, &v2_2));
1860         PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */
1861         PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1862         PetscCall(DMGlobalToLocalEnd  (dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1863         PetscCall(DMViewFromOptions(dmEnergy,NULL, "-energy_dm_view"));
1864         PetscCall(VecViewFromOptions(glob_v2,NULL, "-energy_vec_view"));
1865         PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2));
1866       }
1867       /* append part of the IP data for each grid */
1868       for (ej = 0 ; ej < numCells[grid]; ++ej, ++outer_ej) {
1869         PetscScalar *coefs = NULL;
1870         PetscReal    vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1871         invJe = invJ_a + outer_ej*Nq*dim*dim;
1872         PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej+cStart, quad, vj, Jdummy, invJe, detJj));
1873         if (ctx->use_energy_tensor_trick) {
1874           PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs));
1875         }
1876         /* create static point data */
1877         for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1878           const PetscInt  gidx = outer_ipidx;
1879           const PetscReal *invJ = &invJe[qj*dim*dim];
1880           ww    [gidx] = detJj[qj] * quadWeights[qj];
1881           if (dim==2) ww    [gidx] *=              vj[qj * dim + 0];  /* cylindrical coordinate, w/o 2pi */
1882           // get xx, yy, zz
1883           if (ctx->use_energy_tensor_trick) {
1884             double                  refSpaceDer[3],eGradPhi[3];
1885             const PetscReal * const DD = Tf[0]->T[1];
1886             const PetscReal         *Dq = &DD[qj*Nb*dim];
1887             for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1888             for (int b = 0; b < Nb; ++b) {
1889               for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b*dim+d]*PetscRealPart(coefs[b]);
1890             }
1891             xx[gidx] = 1e10;
1892             if (ctx->use_relativistic_corrections) {
1893               double dg2_c2 = 0;
1894               //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1895               for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1896               dg2_c2 *= (double)c02;
1897               if (dg2_c2 >= .999) {
1898                 xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1899                 yy[gidx] = vj[qj * dim + 1];
1900                 if (dim==3) zz[gidx] = vj[qj * dim + 2];
1901                 PetscCall(PetscPrintf(ctx->comm,"Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n",PetscSqrtReal(xx[gidx]*xx[gidx] + yy[gidx]*yy[gidx] + zz[gidx]*zz[gidx]), ej, qj, dg2_c2, xx[gidx],yy[gidx],zz[gidx]));
1902               } else {
1903                 PetscReal fact = c02/PetscSqrtReal(1. - dg2_c2);
1904                 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1905                 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1906               }
1907             }
1908             if (xx[gidx] == 1e10) {
1909               for (int d = 0; d < dim; ++d) {
1910                 for (int e = 0 ; e < dim; ++e) {
1911                   eGradPhi[d] += invJ[e*dim+d]*refSpaceDer[e];
1912                 }
1913               }
1914               xx[gidx] = eGradPhi[0];
1915               yy[gidx] = eGradPhi[1];
1916               if (dim==3) zz[gidx] = eGradPhi[2];
1917             }
1918           } else {
1919             xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1920             yy[gidx] = vj[qj * dim + 1];
1921             if (dim==3) zz[gidx] = vj[qj * dim + 2];
1922           }
1923         } /* q */
1924         if (ctx->use_energy_tensor_trick) {
1925           PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs));
1926         }
1927       } /* ej */
1928       if (ctx->use_energy_tensor_trick) {
1929         PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2));
1930         PetscCall(DMDestroy(&dmEnergy));
1931       }
1932     } /* grid */
1933     if (ctx->use_energy_tensor_trick) {
1934       PetscCall(PetscFEDestroy(&fe));
1935     }
1936     /* cache static data */
1937     if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
1938 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS)
1939       PetscReal invMass[LANDAU_MAX_SPECIES],nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1940       for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1941         for (PetscInt ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++) {
1942           invMass[ii]  = ctx->m_0/ctx->masses[ii];
1943           nu_alpha[ii] = PetscSqr(ctx->charges[ii]/ctx->m_0)*ctx->m_0/ctx->masses[ii];
1944           nu_beta[ii]  = PetscSqr(ctx->charges[ii]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
1945         }
1946       }
1947       if (ctx->deviceType == LANDAU_CUDA) {
1948 #if defined(PETSC_HAVE_CUDA)
1949         PetscCall(LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1950                                         nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1951 #else
1952         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type cuda not built");
1953 #endif
1954       } else if (ctx->deviceType == LANDAU_KOKKOS) {
1955 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1956         PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1957                                           nu_alpha, nu_beta, invMass,invJ_a,xx,yy,zz,ww,&ctx->SData_d));
1958 #else
1959         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type kokkos not built");
1960 #endif
1961       }
1962 #endif
1963       /* free */
1964       PetscCall(PetscFree4(ww,xx,yy,invJ_a));
1965       if (dim==3) {
1966         PetscCall(PetscFree(zz));
1967       }
1968     } else { /* CPU version, just copy in, only use part */
1969       ctx->SData_d.w = (void*)ww;
1970       ctx->SData_d.x = (void*)xx;
1971       ctx->SData_d.y = (void*)yy;
1972       ctx->SData_d.z = (void*)zz;
1973       ctx->SData_d.invJ = (void*)invJ_a;
1974     }
1975     PetscCall(PetscLogEventEnd(ctx->events[7],0,0,0,0));
1976   } // initialize
1977   PetscFunctionReturn(0);
1978 }
1979 
1980 /* < v, u > */
1981 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1982                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1983                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1984                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1985 {
1986   g0[0] = 1.;
1987 }
1988 
1989 /* < v, u > */
1990 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1991                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1992                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1993                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1994 {
1995   static double ttt = 1;
1996   g0[0] = ttt++;
1997 }
1998 
1999 /* < v, u > */
2000 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2001                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2002                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2003                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
2004 {
2005   g0[0] = 2.*PETSC_PI*x[0];
2006 }
2007 
2008 static PetscErrorCode MatrixNfDestroy(void *ptr)
2009 {
2010   PetscInt *nf = (PetscInt *)ptr;
2011   PetscFunctionBegin;
2012   PetscCall(PetscFree(nf));
2013   PetscFunctionReturn(0);
2014 }
2015 
2016 static PetscErrorCode LandauCreateMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx)
2017 {
2018   PetscInt       *idxs=NULL;
2019   Mat            subM[LANDAU_MAX_GRIDS];
2020 
2021   PetscFunctionBegin;
2022   if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2023     PetscFunctionReturn(0);
2024   }
2025   // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is'
2026   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2027     PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids]*ctx->batch_sz, &idxs));
2028   }
2029   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2030     const PetscInt *values, n = ctx->mat_offset[grid+1] - ctx->mat_offset[grid];
2031     Mat             gMat;
2032     DM              massDM;
2033     PetscDS         prob;
2034     Vec             tvec;
2035     // get "mass" matrix for reordering
2036     PetscCall(DMClone(ctx->plex[grid], &massDM));
2037     PetscCall(DMCopyFields(ctx->plex[grid], massDM));
2038     PetscCall(DMCreateDS(massDM));
2039     PetscCall(DMGetDS(massDM, &prob));
2040     for (int ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2041       PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL));
2042     }
2043     PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only"));
2044     PetscCall(DMSetFromOptions(massDM));
2045     PetscCall(DMCreateMatrix(massDM, &gMat));
2046     PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false"));
2047     PetscCall(MatSetOption(gMat,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2048     PetscCall(MatSetOption(gMat,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE));
2049     PetscCall(DMCreateLocalVector(ctx->plex[grid],&tvec));
2050     PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx));
2051     PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view"));
2052     PetscCall(DMDestroy(&massDM));
2053     PetscCall(VecDestroy(&tvec));
2054     subM[grid] = gMat;
2055     if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2056       MatOrderingType rtype = MATORDERINGRCM;
2057       IS              isrow,isicol;
2058       PetscCall(MatGetOrdering(gMat,rtype,&isrow,&isicol));
2059       PetscCall(ISInvertPermutation(isrow,PETSC_DECIDE,&grid_batch_is_inv[grid]));
2060       PetscCall(ISGetIndices(isrow, &values));
2061       for (PetscInt b_id=0 ; b_id < ctx->batch_sz ; b_id++) { // add batch size DMs for this species grid
2062 #if !defined(LANDAU_SPECIES_MAJOR)
2063         PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id*N;
2064         for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
2065 #else
2066         PetscInt n0 = ctx->mat_offset[grid]*ctx->batch_sz + b_id*n;
2067         for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
2068 #endif
2069       }
2070       PetscCall(ISRestoreIndices(isrow, &values));
2071       PetscCall(ISDestroy(&isrow));
2072       PetscCall(ISDestroy(&isicol));
2073     }
2074   }
2075   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2076     PetscCall(ISCreateGeneral(comm,ctx->mat_offset[ctx->num_grids]*ctx->batch_sz,idxs,PETSC_OWN_POINTER,&ctx->batch_is));
2077   }
2078   // get a block matrix
2079   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2080     Mat               B = subM[grid];
2081     PetscInt          nloc, nzl, colbuf[1024], row;
2082     PetscCall(MatGetSize(B, &nloc, NULL));
2083     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2084       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2085       const PetscInt    *cols;
2086       const PetscScalar *vals;
2087       for (int i=0 ; i<nloc ; i++) {
2088         PetscCall(MatGetRow(B,i,&nzl,&cols,&vals));
2089         PetscCheck(nzl<=1024,comm, PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
2090         for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2091         row = i + moffset;
2092         PetscCall(MatSetValues(ctx->J,1,&row,nzl,colbuf,vals,INSERT_VALUES));
2093         PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals));
2094       }
2095     }
2096   }
2097   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2098     PetscCall(MatDestroy(&subM[grid]));
2099   }
2100   PetscCall(MatAssemblyBegin(ctx->J,MAT_FINAL_ASSEMBLY));
2101   PetscCall(MatAssemblyEnd(ctx->J,MAT_FINAL_ASSEMBLY));
2102 
2103   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2104     Mat            mat_block_order;
2105     PetscCall(MatCreateSubMatrix(ctx->J,ctx->batch_is,ctx->batch_is,MAT_INITIAL_MATRIX,&mat_block_order)); // use MatPermute
2106     PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_field_major_mat_view"));
2107     PetscCall(MatDestroy(&ctx->J));
2108     ctx->J = mat_block_order;
2109     // override ops to make KSP work in field major space
2110     ctx->seqaij_mult                  = mat_block_order->ops->mult;
2111     mat_block_order->ops->mult        = LandauMatMult;
2112     mat_block_order->ops->multadd     = LandauMatMultAdd;
2113     ctx->seqaij_solve                 = NULL;
2114     ctx->seqaij_getdiagonal           = mat_block_order->ops->getdiagonal;
2115     mat_block_order->ops->getdiagonal = LandauMatGetDiagonal;
2116     ctx->seqaij_multtranspose         = mat_block_order->ops->multtranspose;
2117     mat_block_order->ops->multtranspose = LandauMatMultTranspose;
2118     PetscCall(VecDuplicate(X,&ctx->work_vec));
2119     PetscCall(VecScatterCreate(X, ctx->batch_is, ctx->work_vec, NULL, &ctx->plex_batch));
2120   }
2121 
2122   PetscFunctionReturn(0);
2123 }
2124 
2125 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
2126 /*@C
2127  DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh
2128 
2129  Collective on comm
2130 
2131  Input Parameters:
2132  +   comm  - The MPI communicator
2133  .   dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
2134  -   prefix - prefix for options (not tested)
2135 
2136  Output Parameter:
2137  .   pack  - The DM object representing the mesh
2138  +   X - A vector (user destroys)
2139  -   J - Optional matrix (object destroys)
2140 
2141  Level: beginner
2142 
2143  .keywords: mesh
2144  .seealso: DMPlexCreate(), DMPlexLandauDestroyVelocitySpace()
2145  @*/
2146 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
2147 {
2148   LandauCtx      *ctx;
2149   Vec            Xsub[LANDAU_MAX_GRIDS];
2150   IS             grid_batch_is_inv[LANDAU_MAX_GRIDS];
2151 
2152   PetscFunctionBegin;
2153   PetscCheckFalse(dim!=2 && dim!=3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
2154   PetscCheck(LANDAU_DIM == dim,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d",dim,LANDAU_DIM);
2155   PetscCall(PetscNew(&ctx));
2156   ctx->comm = comm; /* used for diagnostics and global errors */
2157   /* process options */
2158   PetscCall(ProcessOptions(ctx,prefix));
2159   if (dim==2) ctx->use_relativistic_corrections = PETSC_FALSE;
2160   /* Create Mesh */
2161   PetscCall(DMCompositeCreate(PETSC_COMM_SELF,pack));
2162   PetscCall(PetscLogEventBegin(ctx->events[13],0,0,0,0));
2163   PetscCall(PetscLogEventBegin(ctx->events[15],0,0,0,0));
2164   PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR)
2165   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2166     /* create FEM */
2167     PetscCall(SetupDS(ctx->plex[grid],dim,grid,ctx));
2168     /* set initial state */
2169     PetscCall(DMCreateGlobalVector(ctx->plex[grid],&Xsub[grid]));
2170     PetscCall(PetscObjectSetName((PetscObject) Xsub[grid], "u_orig"));
2171     /* initial static refinement, no solve */
2172     PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, ctx));
2173     /* forest refinement - forest goes in (if forest), plex comes out */
2174     if (ctx->use_p4est) {
2175       DM plex;
2176       PetscCall(adapt(grid,ctx,&Xsub[grid])); // forest goes in, plex comes out
2177       PetscCall(DMViewFromOptions(ctx->plex[grid],NULL,"-dm_landau_amr_dm_view")); // need to differentiate - todo
2178       PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view"));
2179       // convert to plex, all done with this level
2180       PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex));
2181       PetscCall(DMDestroy(&ctx->plex[grid]));
2182       ctx->plex[grid] = plex;
2183     }
2184 #if !defined(LANDAU_SPECIES_MAJOR)
2185     PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid]));
2186 #else
2187     for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2188       PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid]));
2189     }
2190 #endif
2191     PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx));
2192   }
2193 #if !defined(LANDAU_SPECIES_MAJOR)
2194   // stack the batched DMs, could do it all here!!! b_id=0
2195   for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2196     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2197       PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid]));
2198     }
2199   }
2200 #endif
2201   // create ctx->mat_offset
2202   ctx->mat_offset[0] = 0;
2203   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2204     PetscInt    n;
2205     PetscCall(VecGetLocalSize(Xsub[grid],&n));
2206     ctx->mat_offset[grid+1] = ctx->mat_offset[grid] + n;
2207   }
2208   // creat DM & Jac
2209   PetscCall(DMSetApplicationContext(*pack, ctx));
2210   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only"));
2211   PetscCall(DMSetFromOptions(*pack));
2212   PetscCall(DMCreateMatrix(*pack, &ctx->J));
2213   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false"));
2214   PetscCall(MatSetOption(ctx->J,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2215   PetscCall(MatSetOption(ctx->J,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE));
2216   PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac"));
2217   // construct initial conditions in X
2218   PetscCall(DMCreateGlobalVector(*pack,X));
2219   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2220     PetscInt n;
2221     PetscCall(VecGetLocalSize(Xsub[grid],&n));
2222     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2223       PetscScalar const *values;
2224       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2225       PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx));
2226       PetscCall(VecGetArrayRead(Xsub[grid],&values));
2227       for (int i=0, idx = moffset; i<n; i++, idx++) {
2228         PetscCall(VecSetValue(*X,idx,values[i],INSERT_VALUES));
2229       }
2230       PetscCall(VecRestoreArrayRead(Xsub[grid],&values));
2231     }
2232   }
2233   // cleanup
2234   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2235     PetscCall(VecDestroy(&Xsub[grid]));
2236   }
2237   /* check for correct matrix type */
2238   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2239     PetscBool flg;
2240     if (ctx->deviceType == LANDAU_CUDA) {
2241       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,""));
2242       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda or use '-dm_landau_device_type cpu'");
2243     } else if (ctx->deviceType == LANDAU_KOKKOS) {
2244       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,""));
2245 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2246       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2247 #else
2248       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2249 #endif
2250     }
2251   }
2252   PetscCall(PetscLogEventEnd(ctx->events[15],0,0,0,0));
2253   // create field major ordering
2254 
2255   ctx->work_vec   = NULL;
2256   ctx->plex_batch = NULL;
2257   ctx->batch_is   = NULL;
2258   for (int i=0;i<LANDAU_MAX_GRIDS;i++) grid_batch_is_inv[i] = NULL;
2259   PetscCall(PetscLogEventBegin(ctx->events[12],0,0,0,0));
2260   PetscCall(LandauCreateMatrix(comm, *X, grid_batch_is_inv, ctx));
2261   PetscCall(PetscLogEventEnd(ctx->events[12],0,0,0,0));
2262 
2263   // create AMR GPU assembly maps and static GPU data
2264   PetscCall(CreateStaticGPUData(dim,grid_batch_is_inv,ctx));
2265 
2266   PetscCall(PetscLogEventEnd(ctx->events[13],0,0,0,0));
2267 
2268   // create mass matrix
2269   PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL));
2270 
2271   if (J) *J = ctx->J;
2272 
2273   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2274     PetscContainer container;
2275     // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2276     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2277     PetscCall(PetscContainerSetPointer(container, (void *)ctx));
2278     PetscCall(PetscObjectCompose((PetscObject) ctx->J, "LandauCtx", (PetscObject) container));
2279     PetscCall(PetscContainerDestroy(&container));
2280     // batch solvers need to map -- can batch solvers work
2281     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2282     PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch));
2283     PetscCall(PetscObjectCompose((PetscObject) ctx->J, "plex_batch_is", (PetscObject) container));
2284     PetscCall(PetscContainerDestroy(&container));
2285   }
2286   // for batch solvers
2287   {
2288     PetscContainer  container;
2289     PetscInt        *pNf;
2290     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2291     PetscCall(PetscMalloc1(sizeof(*pNf), &pNf));
2292     *pNf = ctx->batch_sz;
2293     PetscCall(PetscContainerSetPointer(container, (void *)pNf));
2294     PetscCall(PetscContainerSetUserDestroy(container, MatrixNfDestroy));
2295     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject) container));
2296     PetscCall(PetscContainerDestroy(&container));
2297   }
2298 
2299   PetscFunctionReturn(0);
2300 }
2301 
2302 /*@
2303  DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh
2304 
2305  Collective on dm
2306 
2307  Input/Output Parameters:
2308  .   dm - the dm to destroy
2309 
2310  Level: beginner
2311 
2312  .keywords: mesh
2313  .seealso: DMPlexLandauCreateVelocitySpace()
2314  @*/
2315 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm)
2316 {
2317   LandauCtx      *ctx;
2318   PetscFunctionBegin;
2319   PetscCall(DMGetApplicationContext(*dm, &ctx));
2320   PetscCall(MatDestroy(&ctx->M));
2321   PetscCall(MatDestroy(&ctx->J));
2322   for (PetscInt ii=0;ii<ctx->num_species;ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii]));
2323   PetscCall(ISDestroy(&ctx->batch_is));
2324   PetscCall(VecDestroy(&ctx->work_vec));
2325   PetscCall(VecScatterDestroy(&ctx->plex_batch));
2326   if (ctx->deviceType == LANDAU_CUDA) {
2327 #if defined(PETSC_HAVE_CUDA)
2328     PetscCall(LandauCUDAStaticDataClear(&ctx->SData_d));
2329 #else
2330     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
2331 #endif
2332   } else if (ctx->deviceType == LANDAU_KOKKOS) {
2333 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2334     PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d));
2335 #else
2336     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
2337 #endif
2338   } else {
2339     if (ctx->SData_d.x) { /* in a CPU run */
2340       PetscReal *invJ = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
2341       LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
2342       PetscCall(PetscFree4(ww,xx,yy,invJ));
2343       if (zz) {
2344         PetscCall(PetscFree(zz));
2345       }
2346       if (coo_elem_offsets) {
2347         PetscCall(PetscFree3(coo_elem_offsets,coo_elem_fullNb,coo_elem_point_offsets)); // could be NULL
2348       }
2349     }
2350   }
2351 
2352   if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2353     PetscCall(PetscPrintf(ctx->comm, "TSStep               N  1.0 %10.3e\n",ctx->times[LANDAU_EX2_TSSOLVE]));
2354     PetscCall(PetscPrintf(ctx->comm, "2:           Solve:  %10.3e with %" PetscInt_FMT " threads\n",ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL],ctx->batch_sz));
2355     PetscCall(PetscPrintf(ctx->comm, "3:          Landau:  %10.3e\n",ctx->times[LANDAU_MATRIX_TOTAL]));
2356     PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian       %" PetscInt_FMT " 1.0 %10.3e\n",(PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT],ctx->times[LANDAU_JACOBIAN]));
2357     PetscCall(PetscPrintf(ctx->comm, "Landau Operator       N 1.0  %10.3e\n",ctx->times[LANDAU_OPERATOR]));
2358     PetscCall(PetscPrintf(ctx->comm, "Landau Mass           N 1.0  %10.3e\n",ctx->times[LANDAU_MASS]));
2359     PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU)       N 1.0  %10.3e\n",ctx->times[LANDAU_F_DF]));
2360     PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU)         N 1.0  %10.3e\n",ctx->times[LANDAU_KERNEL]));
2361     PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum        X 1.0 %10.3e\n",ctx->times[KSP_FACTOR]));
2362     PetscCall(PetscPrintf(ctx->comm, "MatSolve              X 1.0 %10.3e\n",ctx->times[KSP_SOLVE]));
2363   }
2364   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2365     PetscCall(DMDestroy(&ctx->plex[grid]));
2366   }
2367   PetscFree(ctx);
2368   PetscCall(DMDestroy(dm));
2369   PetscFunctionReturn(0);
2370 }
2371 
2372 /* < v, ru > */
2373 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2374                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2375                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2376                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2377 {
2378   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2379   f0[0] = u[ii];
2380 }
2381 
2382 /* < v, ru > */
2383 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2384                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2385                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2386                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2387 {
2388   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2389   f0[0] = x[jj]*u[ii]; /* x momentum */
2390 }
2391 
2392 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2393                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2394                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2395                     PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2396 {
2397   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2398   double tmp1 = 0.;
2399   for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2400   f0[0] = tmp1*u[ii];
2401 }
2402 
2403 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
2404 {
2405   const PetscReal *c2_0_arr = ((PetscReal*)actx);
2406   const PetscReal c02 = c2_0_arr[0];
2407 
2408   PetscFunctionBegin;
2409   for (int s = 0 ; s < Nf ; s++) {
2410     PetscReal tmp1 = 0.;
2411     for (int i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2412 #if defined(PETSC_USE_DEBUG)
2413     u[s] = PetscSqrtReal(1. + tmp1/c02);//  u[0] = PetscSqrtReal(1. + xx);
2414 #else
2415     {
2416       PetscReal xx = tmp1/c02;
2417       u[s] = xx/(PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2418     }
2419 #endif
2420   }
2421   PetscFunctionReturn(0);
2422 }
2423 
2424 /* < v, ru > */
2425 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2426                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2427                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2428                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2429 {
2430   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2431   f0[0] = 2.*PETSC_PI*x[0]*u[ii];
2432 }
2433 
2434 /* < v, ru > */
2435 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2436                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2437                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2438                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2439 {
2440   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2441   f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii];
2442 }
2443 
2444 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2445                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2446                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2447                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2448 {
2449   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2450   f0[0] =  2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii];
2451 }
2452 
2453 /*@
2454  DMPlexLandauPrintNorms - collects moments and prints them
2455 
2456  Collective on dm
2457 
2458  Input Parameters:
2459  +   X  - the state
2460  -   stepi - current step to print
2461 
2462  Level: beginner
2463 
2464  .keywords: mesh
2465  .seealso: DMPlexLandauCreateVelocitySpace()
2466  @*/
2467 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi)
2468 {
2469   LandauCtx      *ctx;
2470   PetscDS        prob;
2471   DM             pack;
2472   PetscInt       cStart, cEnd, dim, ii, i0, nDMs;
2473   PetscScalar    xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES];
2474   PetscScalar    xmomentum[LANDAU_MAX_SPECIES],  ymomentum[LANDAU_MAX_SPECIES],  zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2475   Vec            *globXArray;
2476 
2477   PetscFunctionBegin;
2478   PetscCall(VecGetDM(X, &pack));
2479   PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM");
2480   PetscCall(DMGetDimension(pack, &dim));
2481   PetscCheck(dim == 2 || dim == 3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]",dim);
2482   PetscCall(DMGetApplicationContext(pack, &ctx));
2483   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2484   /* print momentum and energy */
2485   PetscCall(DMCompositeGetNumberDM(pack,&nDMs));
2486   PetscCheck(nDMs == ctx->num_grids*ctx->batch_sz,PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT,nDMs,ctx->num_grids*ctx->batch_sz);
2487   PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray));
2488   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
2489   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
2490     Vec Xloc = globXArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2491     PetscCall(DMGetDS(ctx->plex[grid], &prob));
2492     for (ii=ctx->species_offset[grid],i0=0;ii<ctx->species_offset[grid+1];ii++,i0++) {
2493       PetscScalar user[2] = { (PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
2494       PetscCall(PetscDSSetConstants(prob, 2, user));
2495       if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */
2496         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden));
2497         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2498         density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2499         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom));
2500         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2501         zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2502         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2));
2503         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2504         energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2505         zmomentumtot += zmomentum[ii];
2506         energytot  += energy[ii];
2507         densitytot += density[ii];
2508         PetscCall(PetscPrintf(ctx->comm, "%3D) species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii])));
2509       } else { /* 2/3Xloc + 3V */
2510         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den));
2511         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2512         density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2513         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom));
2514         user[1] = 0;
2515         PetscCall(PetscDSSetConstants(prob, 2, user));
2516         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2517         xmomentum[ii]  = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2518         user[1] = 1;
2519         PetscCall(PetscDSSetConstants(prob, 2, user));
2520         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2521         ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2522         user[1] = 2;
2523         PetscCall(PetscDSSetConstants(prob, 2, user));
2524         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2525         zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2526         if (ctx->use_relativistic_corrections) {
2527           /* gamma * M * f */
2528           if (ii==0 && grid==0) { // do all at once
2529             Vec            Mf, globGamma, *globMfArray, *globGammaArray;
2530             PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {gamma_n_f};
2531             PetscReal      *c2_0[1], data[1];
2532 
2533             PetscCall(VecDuplicate(X,&globGamma));
2534             PetscCall(VecDuplicate(X,&Mf));
2535             PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globMfArray));
2536             PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globGammaArray));
2537             /* M * f */
2538             PetscCall(MatMult(ctx->M,X,Mf));
2539             /* gamma */
2540             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2541             for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2542               Vec v1 = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2543               data[0] = PetscSqr(C_0(ctx->v_0));
2544               c2_0[0] = &data[0];
2545               PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void**)c2_0, INSERT_ALL_VALUES, v1));
2546             }
2547             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2548             /* gamma * Mf */
2549             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2550             PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2551             for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice
2552               PetscInt Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], N, bs;
2553               Vec      Mfsub = globMfArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], Gsub = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], v1, v2;
2554               // get each component
2555               PetscCall(VecGetSize(Mfsub,&N));
2556               PetscCall(VecCreate(ctx->comm,&v1));
2557               PetscCall(VecSetSizes(v1,PETSC_DECIDE,N/Nf));
2558               PetscCall(VecCreate(ctx->comm,&v2));
2559               PetscCall(VecSetSizes(v2,PETSC_DECIDE,N/Nf));
2560               PetscCall(VecSetFromOptions(v1)); // ???
2561               PetscCall(VecSetFromOptions(v2));
2562               // get each component
2563               PetscCall(VecGetBlockSize(Gsub,&bs));
2564               PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub",bs,Nf);
2565               PetscCall(VecGetBlockSize(Mfsub,&bs));
2566               PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT,bs,Nf);
2567               for (int i=0, ix=ctx->species_offset[grid] ; i<Nf ; i++, ix++) {
2568                 PetscScalar val;
2569                 PetscCall(VecStrideGather(Gsub,i,v1,INSERT_VALUES));
2570                 PetscCall(VecStrideGather(Mfsub,i,v2,INSERT_VALUES));
2571                 PetscCall(VecDot(v1,v2,&val));
2572                 energy[ix] = PetscRealPart(val)*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ix];
2573               }
2574               PetscCall(VecDestroy(&v1));
2575               PetscCall(VecDestroy(&v2));
2576             } /* grids */
2577             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2578             PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2579             PetscCall(PetscFree(globGammaArray));
2580             PetscCall(PetscFree(globMfArray));
2581             PetscCall(VecDestroy(&globGamma));
2582             PetscCall(VecDestroy(&Mf));
2583           }
2584         } else {
2585           PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2));
2586           PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2587           energy[ii]    = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2588         }
2589         PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(xmomentum[ii]),PetscRealPart(ymomentum[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii])));
2590         xmomentumtot += xmomentum[ii];
2591         ymomentumtot += ymomentum[ii];
2592         zmomentumtot += zmomentum[ii];
2593         energytot    += energy[ii];
2594         densitytot   += density[ii];
2595       }
2596       if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n");
2597     }
2598   }
2599   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
2600   PetscCall(PetscFree(globXArray));
2601   /* totals */
2602   PetscCall(DMPlexGetHeightStratum(ctx->plex[0],0,&cStart,&cEnd));
2603   if (ctx->num_species>1) {
2604     if (dim==2) {
2605       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart));
2606     } else {
2607       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart));
2608     }
2609   } else PetscCall(PetscPrintf(ctx->comm, " -- %" PetscInt_FMT " cells",cEnd-cStart));
2610   PetscCall(PetscPrintf(ctx->comm,"\n"));
2611   PetscFunctionReturn(0);
2612 }
2613 
2614 /*@
2615  DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2616 
2617  Collective on pack
2618 
2619  Input Parameters:
2620 . pack     - the DM object
2621 
2622  Output Parameters:
2623 . Amat - The mass matrix (optional), mass matrix is added to the DM context
2624 
2625  Level: beginner
2626 
2627  .keywords: mesh
2628  .seealso: DMPlexLandauCreateVelocitySpace()
2629  @*/
2630 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat)
2631 {
2632   DM             mass_pack,massDM[LANDAU_MAX_GRIDS];
2633   PetscDS        prob;
2634   PetscInt       ii,dim,N1=1,N2;
2635   LandauCtx      *ctx;
2636   Mat            packM,subM[LANDAU_MAX_GRIDS];
2637 
2638   PetscFunctionBegin;
2639   PetscValidHeaderSpecific(pack,DM_CLASSID,1);
2640   if (Amat) PetscValidPointer(Amat,2);
2641   PetscCall(DMGetApplicationContext(pack, &ctx));
2642   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2643   PetscCall(PetscLogEventBegin(ctx->events[14],0,0,0,0));
2644   PetscCall(DMGetDimension(pack, &dim));
2645   PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject) pack),&mass_pack));
2646   /* create pack mass matrix */
2647   for (PetscInt grid=0, ix=0 ; grid<ctx->num_grids ; grid++) {
2648     PetscCall(DMClone(ctx->plex[grid], &massDM[grid]));
2649     PetscCall(DMCopyFields(ctx->plex[grid], massDM[grid]));
2650     PetscCall(DMCreateDS(massDM[grid]));
2651     PetscCall(DMGetDS(massDM[grid], &prob));
2652     for (ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2653       if (dim==3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL));
2654       else        PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL));
2655     }
2656 #if !defined(LANDAU_SPECIES_MAJOR)
2657     PetscCall(DMCompositeAddDM(mass_pack,massDM[grid]));
2658 #else
2659     for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2660       PetscCall(DMCompositeAddDM(mass_pack,massDM[grid]));
2661     }
2662 #endif
2663     PetscCall(DMCreateMatrix(massDM[grid], &subM[grid]));
2664   }
2665 #if !defined(LANDAU_SPECIES_MAJOR)
2666   // stack the batched DMs
2667   for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2668     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2669       PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2670     }
2671   }
2672 #endif
2673   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only"));
2674   PetscCall(DMSetFromOptions(mass_pack));
2675   PetscCall(DMCreateMatrix(mass_pack, &packM));
2676   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false"));
2677   PetscCall(MatSetOption(packM,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2678   PetscCall(MatSetOption(packM,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE));
2679   PetscCall(DMDestroy(&mass_pack));
2680   /* make mass matrix for each block */
2681   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2682     Vec locX;
2683     DM  plex = massDM[grid];
2684     PetscCall(DMGetLocalVector(plex, &locX));
2685     /* Mass matrix is independent of the input, so no need to fill locX */
2686     PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx));
2687     PetscCall(DMRestoreLocalVector(plex, &locX));
2688     PetscCall(DMDestroy(&massDM[grid]));
2689   }
2690   PetscCall(MatGetSize(ctx->J, &N1, NULL));
2691   PetscCall(MatGetSize(packM, &N2, NULL));
2692   PetscCheck(N1 == N2,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT,N1,N2);
2693   /* assemble block diagonals */
2694   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2695     Mat               B = subM[grid];
2696     PetscInt          nloc, nzl, colbuf[1024], row;
2697     PetscCall(MatGetSize(B, &nloc, NULL));
2698     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2699       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2700       const PetscInt    *cols;
2701       const PetscScalar *vals;
2702       for (int i=0 ; i<nloc ; i++) {
2703         PetscCall(MatGetRow(B,i,&nzl,&cols,&vals));
2704         PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
2705         for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2706         row = i + moffset;
2707         PetscCall(MatSetValues(packM,1,&row,nzl,colbuf,vals,INSERT_VALUES));
2708         PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals));
2709       }
2710     }
2711   }
2712   // cleanup
2713   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2714     PetscCall(MatDestroy(&subM[grid]));
2715   }
2716   PetscCall(MatAssemblyBegin(packM,MAT_FINAL_ASSEMBLY));
2717   PetscCall(MatAssemblyEnd(packM,MAT_FINAL_ASSEMBLY));
2718   PetscCall(PetscObjectSetName((PetscObject)packM, "mass"));
2719   PetscCall(MatViewFromOptions(packM,NULL,"-dm_landau_mass_view"));
2720   ctx->M = packM;
2721   if (Amat) *Amat = packM;
2722   PetscCall(PetscLogEventEnd(ctx->events[14],0,0,0,0));
2723   PetscFunctionReturn(0);
2724 }
2725 
2726 /*@
2727  DMPlexLandauIFunction - TS residual calculation
2728 
2729  Collective on ts
2730 
2731  Input Parameters:
2732 +   TS  - The time stepping context
2733 .   time_dummy - current time (not used)
2734 .   X - Current state
2735 .   X_t - Time derivative of current state
2736 -   actx - Landau context
2737 
2738  Output Parameter:
2739 .   F  - The residual
2740 
2741  Level: beginner
2742 
2743  .keywords: mesh
2744  .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauIJacobian()
2745  @*/
2746 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2747 {
2748   LandauCtx      *ctx=(LandauCtx*)actx;
2749   PetscInt       dim;
2750   DM             pack;
2751 #if defined(PETSC_HAVE_THREADSAFETY)
2752   double         starttime, endtime;
2753 #endif
2754 
2755   PetscFunctionBegin;
2756   PetscCall(TSGetDM(ts,&pack));
2757   PetscCall(DMGetApplicationContext(pack, &ctx));
2758   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2759   if (ctx->stage) {
2760     PetscCall(PetscLogStagePush(ctx->stage));
2761   }
2762   PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0));
2763   PetscCall(PetscLogEventBegin(ctx->events[0],0,0,0,0));
2764 #if defined(PETSC_HAVE_THREADSAFETY)
2765   starttime = MPI_Wtime();
2766 #endif
2767   PetscCall(DMGetDimension(pack, &dim));
2768   if (!ctx->aux_bool) {
2769     PetscCall(PetscInfo(ts, "Create Landau Jacobian t=%g X=%p %s\n",time_dummy,X_t,ctx->aux_bool ? " -- seems to be in line search" : ""));
2770     PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx));
2771     PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view"));
2772     ctx->aux_bool = PETSC_TRUE;
2773   } else {
2774     PetscCall(PetscInfo(ts, "Skip forming Jacobian, has not changed (should check norm)\n"));
2775   }
2776   /* mat vec for op */
2777   PetscCall(MatMult(ctx->J,X,F)); /* C*f */
2778   /* add time term */
2779   if (X_t) {
2780     PetscCall(MatMultAdd(ctx->M,X_t,F,F));
2781   }
2782 #if defined(PETSC_HAVE_THREADSAFETY)
2783   if (ctx->stage) {
2784     endtime = MPI_Wtime();
2785     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2786     ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2787     ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2788   }
2789 #endif
2790   PetscCall(PetscLogEventEnd(ctx->events[0],0,0,0,0));
2791   PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0));
2792   if (ctx->stage) {
2793     PetscCall(PetscLogStagePop());
2794 #if defined(PETSC_HAVE_THREADSAFETY)
2795     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2796 #endif
2797   }
2798   PetscFunctionReturn(0);
2799 }
2800 
2801 /*@
2802  DMPlexLandauIJacobian - TS Jacobian construction
2803 
2804  Collective on ts
2805 
2806  Input Parameters:
2807 +   TS  - The time stepping context
2808 .   time_dummy - current time (not used)
2809 .   X - Current state
2810 .   U_tdummy - Time derivative of current state (not used)
2811 .   shift - shift for du/dt term
2812 -   actx - Landau context
2813 
2814  Output Parameters:
2815 +   Amat  - Jacobian
2816 -   Pmat  - same as Amat
2817 
2818  Level: beginner
2819 
2820  .keywords: mesh
2821  .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauIFunction()
2822  @*/
2823 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2824 {
2825   LandauCtx      *ctx=NULL;
2826   PetscInt       dim;
2827   DM             pack;
2828 #if defined(PETSC_HAVE_THREADSAFETY)
2829   double         starttime, endtime;
2830 #endif
2831   PetscFunctionBegin;
2832   PetscCall(TSGetDM(ts,&pack));
2833   PetscCall(DMGetApplicationContext(pack, &ctx));
2834   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2835   PetscCheckFalse(Amat!=Pmat || Amat!=ctx->J,ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2836   PetscCall(DMGetDimension(pack, &dim));
2837   /* get collision Jacobian into A */
2838   if (ctx->stage) {
2839     PetscCall(PetscLogStagePush(ctx->stage));
2840   }
2841   PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0));
2842   PetscCall(PetscLogEventBegin(ctx->events[9],0,0,0,0));
2843 #if defined(PETSC_HAVE_THREADSAFETY)
2844   starttime = MPI_Wtime();
2845 #endif
2846   PetscCall(PetscInfo(ts, "Adding just mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift));
2847   PetscCheckFalse(shift==0.0,ctx->comm, PETSC_ERR_PLIB, "zero shift");
2848   PetscCheck(ctx->aux_bool,ctx->comm, PETSC_ERR_PLIB, "wrong state");
2849   if (!ctx->use_matrix_mass) {
2850     PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx));
2851     PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view"));
2852   } else { /* add mass */
2853     PetscCall(MatAXPY(Pmat,shift,ctx->M,SAME_NONZERO_PATTERN));
2854   }
2855   ctx->aux_bool = PETSC_FALSE;
2856 #if defined(PETSC_HAVE_THREADSAFETY)
2857   if (ctx->stage) {
2858     endtime = MPI_Wtime();
2859     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2860     ctx->times[LANDAU_MASS] += (endtime - starttime);
2861   }
2862 #endif
2863   PetscCall(PetscLogEventEnd(ctx->events[9],0,0,0,0));
2864   PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0));
2865   if (ctx->stage) {
2866     PetscCall(PetscLogStagePop());
2867 #if defined(PETSC_HAVE_THREADSAFETY)
2868     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2869 #endif
2870   }
2871   PetscFunctionReturn(0);
2872 }
2873