xref: /petsc/src/ts/utils/dmplexlandau/plexland.c (revision bcee047adeeb73090d7e36cc71e39fc287cdbb97)
1 #include <../src/mat/impls/aij/seq/aij.h>
2 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/
3 #include <petsclandau.h>              /*I "petsclandau.h"   I*/
4 #include <petscts.h>
5 #include <petscdmforest.h>
6 #include <petscdmcomposite.h>
7 
8 /* Landau collision operator */
9 
10 /* relativistic terms */
11 #if defined(PETSC_USE_REAL_SINGLE)
12   #define SPEED_OF_LIGHT 2.99792458e8F
13   #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
14 #else
15   #define SPEED_OF_LIGHT 2.99792458e8
16   #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
17 #endif
18 
19 #include "land_tensors.h"
20 
21 #if defined(PETSC_HAVE_OPENMP)
22   #include <omp.h>
23 #endif
24 
25 static PetscErrorCode LandauGPUMapsDestroy(void *ptr)
26 {
27   P4estVertexMaps *maps = (P4estVertexMaps *)ptr;
28   PetscFunctionBegin;
29   // free device data
30   if (maps[0].deviceType != LANDAU_CPU) {
31 #if defined(PETSC_HAVE_KOKKOS)
32     if (maps[0].deviceType == LANDAU_KOKKOS) {
33       PetscCall(LandauKokkosDestroyMatMaps(maps, maps[0].numgrids)); // implies Kokkos does
34     }
35 #endif
36   }
37   // free host data
38   for (PetscInt grid = 0; grid < maps[0].numgrids; grid++) {
39     PetscCall(PetscFree(maps[grid].c_maps));
40     PetscCall(PetscFree(maps[grid].gIdx));
41   }
42   PetscCall(PetscFree(maps));
43 
44   PetscFunctionReturn(PETSC_SUCCESS);
45 }
46 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
47 {
48   PetscReal v2 = 0;
49   PetscFunctionBegin;
50   /* compute v^2 / 2 */
51   for (int i = 0; i < dim; ++i) v2 += x[i] * x[i];
52   /* evaluate the Maxwellian */
53   u[0] = v2 / 2;
54   PetscFunctionReturn(PETSC_SUCCESS);
55 }
56 
57 /* needs double */
58 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
59 {
60   PetscReal *c2_0_arr = ((PetscReal *)actx);
61   double     u2 = 0, c02 = (double)*c2_0_arr, xx;
62 
63   PetscFunctionBegin;
64   /* compute u^2 / 2 */
65   for (int i = 0; i < dim; ++i) u2 += x[i] * x[i];
66   /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
67   xx = u2 / c02;
68 #if defined(PETSC_USE_DEBUG)
69   u[0] = PetscSqrtReal(1. + xx);
70 #else
71   u[0] = xx / (PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
72 #endif
73   PetscFunctionReturn(PETSC_SUCCESS);
74 }
75 
76 /*
77  LandauFormJacobian_Internal - Evaluates Jacobian matrix.
78 
79  Input Parameters:
80  .  globX - input vector
81  .  actx - optional user-defined context
82  .  dim - dimension
83 
84  Output Parameter:
85  .  J0acP - Jacobian matrix filled, not created
86  */
87 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
88 {
89   LandauCtx         *ctx = (LandauCtx *)a_ctx;
90   PetscInt           numCells[LANDAU_MAX_GRIDS], Nq, Nb;
91   PetscQuadrature    quad;
92   PetscReal          Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
93   PetscScalar       *cellClosure = NULL;
94   const PetscScalar *xdata       = NULL;
95   PetscDS            prob;
96   PetscContainer     container;
97   P4estVertexMaps   *maps;
98   Mat                subJ[LANDAU_MAX_GRIDS * LANDAU_MAX_BATCH_SZ];
99 
100   PetscFunctionBegin;
101   PetscValidHeaderSpecific(a_X, VEC_CLASSID, 1);
102   PetscValidHeaderSpecific(JacP, MAT_CLASSID, 2);
103   PetscValidPointer(ctx, 5);
104   /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
105   PetscCheck(ctx->plex[0] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
106   PetscCall(PetscLogEventBegin(ctx->events[10], 0, 0, 0, 0));
107   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
108   PetscCall(PetscObjectQuery((PetscObject)JacP, "assembly_maps", (PetscObject *)&container));
109   if (container) {
110     PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "maps but no GPU assembly");
111     PetscCall(PetscContainerGetPointer(container, (void **)&maps));
112     PetscCheck(maps, ctx->comm, PETSC_ERR_ARG_WRONG, "empty GPU matrix container");
113     for (PetscInt i = 0; i < ctx->num_grids * ctx->batch_sz; i++) subJ[i] = NULL;
114   } else {
115     PetscCheck(!ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "No maps but GPU assembly");
116     for (PetscInt tid = 0; tid < ctx->batch_sz; tid++) {
117       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[LAND_PACK_IDX(tid, grid)]));
118     }
119     maps = NULL;
120   }
121   // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
122   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
123   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
124   PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb));
125   PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND);
126   PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND);
127   // get metadata for collecting dynamic data
128   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
129     PetscInt cStart, cEnd;
130     PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
131     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
132     numCells[grid] = cEnd - cStart; // grids can have different topology
133   }
134   PetscCall(PetscLogEventEnd(ctx->events[10], 0, 0, 0, 0));
135   if (shift == 0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
136     DM pack;
137     PetscCall(VecGetDM(a_X, &pack));
138     PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM");
139     PetscCall(PetscLogEventBegin(ctx->events[1], 0, 0, 0, 0));
140     for (PetscInt fieldA = 0; fieldA < ctx->num_species; fieldA++) {
141       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
142       if (dim == 2) Eq_m[fieldA] *= 2 * PETSC_PI;                                                  /* add the 2pi term that is not in Landau */
143     }
144     if (!ctx->gpu_assembly) {
145       Vec         *locXArray, *globXArray;
146       PetscScalar *cellClosure_it;
147       PetscInt     cellClosure_sz = 0, nDMs, Nf[LANDAU_MAX_GRIDS];
148       PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
149       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
150         PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
151         PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
152         PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
153       }
154       /* count cellClosure size */
155       PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
156       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) cellClosure_sz += Nb * Nf[grid] * numCells[grid];
157       PetscCall(PetscMalloc1(cellClosure_sz * ctx->batch_sz, &cellClosure));
158       cellClosure_it = cellClosure;
159       PetscCall(PetscMalloc(sizeof(*locXArray) * nDMs, &locXArray));
160       PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
161       PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
162       PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray));
163       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP (once)
164         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
165           Vec      locX = locXArray[LAND_PACK_IDX(b_id, grid)], globX = globXArray[LAND_PACK_IDX(b_id, grid)], locX2;
166           PetscInt cStart, cEnd, ei;
167           PetscCall(VecDuplicate(locX, &locX2));
168           PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2));
169           PetscCall(DMGlobalToLocalEnd(ctx->plex[grid], globX, INSERT_VALUES, locX2));
170           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
171           for (ei = cStart; ei < cEnd; ++ei) {
172             PetscScalar *coef = NULL;
173             PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
174             PetscCall(PetscMemcpy(cellClosure_it, coef, Nb * Nf[grid] * sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */
175             PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
176             cellClosure_it += Nb * Nf[grid];
177           }
178           PetscCall(VecDestroy(&locX2));
179         }
180       }
181       PetscCheck(cellClosure_it - cellClosure == cellClosure_sz * ctx->batch_sz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscCount_FMT " != cellClosure_sz = %" PetscInt_FMT, (PetscCount)(cellClosure_it - cellClosure),
182                  cellClosure_sz * ctx->batch_sz);
183       PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
184       PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray));
185       PetscCall(PetscFree(locXArray));
186       PetscCall(PetscFree(globXArray));
187       xdata = NULL;
188     } else {
189       PetscMemType mtype;
190       if (ctx->jacobian_field_major_order) { // get data in batch ordering
191         PetscCall(VecScatterBegin(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD));
192         PetscCall(VecScatterEnd(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD));
193         PetscCall(VecGetArrayReadAndMemType(ctx->work_vec, &xdata, &mtype));
194       } else {
195         PetscCall(VecGetArrayReadAndMemType(a_X, &xdata, &mtype));
196       }
197       PetscCheck(mtype == PETSC_MEMTYPE_HOST || ctx->deviceType != LANDAU_CPU, ctx->comm, PETSC_ERR_ARG_WRONG, "CPU run with device data: use -mat_type aij");
198       cellClosure = NULL;
199     }
200     PetscCall(PetscLogEventEnd(ctx->events[1], 0, 0, 0, 0));
201   } else xdata = cellClosure = NULL;
202 
203   /* do it */
204   if (ctx->deviceType == LANDAU_KOKKOS) {
205 #if defined(PETSC_HAVE_KOKKOS)
206     PetscCall(LandauKokkosJacobian(ctx->plex, Nq, Nb, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP));
207 #else
208     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
209 #endif
210   } else {               /* CPU version */
211     PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
212     PetscInt         ip_offset[LANDAU_MAX_GRIDS + 1], ipf_offset[LANDAU_MAX_GRIDS + 1], elem_offset[LANDAU_MAX_GRIDS + 1], IPf_sz_glb, IPf_sz_tot, num_grids = ctx->num_grids, Nf[LANDAU_MAX_GRIDS];
213     PetscReal       *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
214     PetscReal       *nu_alpha = (PetscReal *)ctx->SData_d.alpha, *nu_beta = (PetscReal *)ctx->SData_d.beta, *invMass = (PetscReal *)ctx->SData_d.invMass;
215     PetscReal(*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal(*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas;
216     PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
217     PetscScalar *coo_vals = NULL;
218     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
219       PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
220       PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
221       PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
222     }
223     /* count IPf size, etc */
224     PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
225     const PetscReal *const BB = Tf[0]->T[0], *const DD = Tf[0]->T[1];
226     ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
227     for (PetscInt grid = 0; grid < num_grids; grid++) {
228       PetscInt nfloc        = ctx->species_offset[grid + 1] - ctx->species_offset[grid];
229       elem_offset[grid + 1] = elem_offset[grid] + numCells[grid];
230       ip_offset[grid + 1]   = ip_offset[grid] + numCells[grid] * Nq;
231       ipf_offset[grid + 1]  = ipf_offset[grid] + Nq * nfloc * numCells[grid];
232     }
233     IPf_sz_glb = ipf_offset[num_grids];
234     IPf_sz_tot = IPf_sz_glb * ctx->batch_sz;
235     // prep COO
236     {
237       PetscCall(PetscMalloc1(ctx->SData_d.coo_size, &coo_vals)); // allocate every time?
238       PetscCall(PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n", (PetscInt)ctx->SData_d.coo_size));
239     }
240     if (shift == 0.0) { /* compute dynamic data f and df and init data for Jacobian */
241 #if defined(PETSC_HAVE_THREADSAFETY)
242       double starttime, endtime;
243       starttime = MPI_Wtime();
244 #endif
245       PetscCall(PetscLogEventBegin(ctx->events[8], 0, 0, 0, 0));
246       PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim == 3 ? IPf_sz_tot : 0, &dudz));
247       // F df/dx
248       for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) {                        // for each element
249         const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem; // b_id == OMP thd_id in batch
250         // find my grid:
251         PetscInt grid = 0;
252         while (b_elem_idx >= elem_offset[grid + 1]) grid++; // yuck search for grid
253         {
254           const PetscInt loc_nip = numCells[grid] * Nq, loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
255           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
256           PetscScalar   *coef, coef_buff[LANDAU_MAX_SPECIES * LANDAU_MAX_NQND];
257           PetscReal     *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim]; // ingJ is static data on batch 0
258           PetscInt       b, f, q;
259           if (cellClosure) {
260             coef = &cellClosure[b_id * IPf_sz_glb + ipf_offset[grid] + loc_elem * Nb * loc_Nf]; // this is const
261           } else {
262             coef = coef_buff;
263             for (f = 0; f < loc_Nf; ++f) {
264               LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
265               for (b = 0; b < Nb; ++b) {
266                 PetscInt idx = Idxs[b];
267                 if (idx >= 0) {
268                   coef[f * Nb + b] = xdata[idx + moffset];
269                 } else {
270                   idx              = -idx - 1;
271                   coef[f * Nb + b] = 0;
272                   for (q = 0; q < maps[grid].num_face; q++) {
273                     PetscInt    id    = maps[grid].c_maps[idx][q].gid;
274                     PetscScalar scale = maps[grid].c_maps[idx][q].scale;
275                     coef[f * Nb + b] += scale * xdata[id + moffset];
276                   }
277                 }
278               }
279             }
280           }
281           /* get f and df */
282           for (PetscInt qi = 0; qi < Nq; qi++) {
283             const PetscReal *invJ = &invJe[qi * dim * dim];
284             const PetscReal *Bq   = &BB[qi * Nb];
285             const PetscReal *Dq   = &DD[qi * Nb * dim];
286             PetscReal        u_x[LANDAU_DIM];
287             /* get f & df */
288             for (f = 0; f < loc_Nf; ++f) {
289               const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid] + f * loc_nip + loc_elem * Nq + qi;
290               PetscInt       b, e;
291               PetscReal      refSpaceDer[LANDAU_DIM];
292               ff[idx] = 0.0;
293               for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
294               for (b = 0; b < Nb; ++b) {
295                 const PetscInt cidx = b;
296                 ff[idx] += Bq[cidx] * PetscRealPart(coef[f * Nb + cidx]);
297                 for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx * dim + d] * PetscRealPart(coef[f * Nb + cidx]);
298               }
299               for (int d = 0; d < LANDAU_DIM; ++d) {
300                 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) u_x[d] += invJ[e * dim + d] * refSpaceDer[e];
301               }
302               dudx[idx] = u_x[0];
303               dudy[idx] = u_x[1];
304 #if LANDAU_DIM == 3
305               dudz[idx] = u_x[2];
306 #endif
307             }
308           } // q
309         }   // grid
310       }     // grid*batch
311       PetscCall(PetscLogEventEnd(ctx->events[8], 0, 0, 0, 0));
312 #if defined(PETSC_HAVE_THREADSAFETY)
313       endtime = MPI_Wtime();
314       if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
315 #endif
316     } // Jacobian setup
317     // assemble Jacobian (or mass)
318     for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) { // for each element
319       const PetscInt b_Nelem      = elem_offset[num_grids];
320       const PetscInt glb_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem;
321       PetscInt       grid = 0;
322 #if defined(PETSC_HAVE_THREADSAFETY)
323       double starttime, endtime;
324       starttime = MPI_Wtime();
325 #endif
326       while (glb_elem_idx >= elem_offset[grid + 1]) grid++;
327       {
328         const PetscInt   loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
329         const PetscInt   moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset), totDim = loc_Nf * Nq, elemMatSize = totDim * totDim;
330         PetscScalar     *elemMat;
331         const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim];
332         PetscCall(PetscMalloc1(elemMatSize, &elemMat));
333         PetscCall(PetscMemzero(elemMat, elemMatSize * sizeof(*elemMat)));
334         if (shift == 0.0) { // Jacobian
335           PetscCall(PetscLogEventBegin(ctx->events[4], 0, 0, 0, 0));
336         } else { // mass
337           PetscCall(PetscLogEventBegin(ctx->events[16], 0, 0, 0, 0));
338         }
339         for (PetscInt qj = 0; qj < Nq; ++qj) {
340           const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
341           PetscReal      g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
342           PetscInt       d, d2, dp, d3, IPf_idx;
343           if (shift == 0.0) { // Jacobian
344             const PetscReal *const invJj = &invJe[qj * dim * dim];
345             PetscReal              gg2[LANDAU_MAX_SPECIES][LANDAU_DIM], gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
346             const PetscReal        vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
347             // create g2 & g3
348             for (d = 0; d < LANDAU_DIM; d++) { // clear accumulation data D & K
349               gg2_temp[d] = 0;
350               for (d2 = 0; d2 < LANDAU_DIM; d2++) gg3_temp[d][d2] = 0;
351             }
352             /* inner beta reduction */
353             IPf_idx = 0;
354             for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
355               PetscInt nip_loc_r = numCells[grid_r] * Nq, Nfloc_r = Nf[grid_r];
356               for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
357                 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
358                   const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
359                   PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
360 #if LANDAU_DIM == 2
361                   PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
362                   LandauTensor2D(vj, x, y, Ud, Uk, mask);
363 #else
364                   PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2] - z) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
365                   if (ctx->use_relativistic_corrections) {
366                     LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
367                   } else {
368                     LandauTensor3D(vj, x, y, z, U, mask);
369                   }
370 #endif
371                   for (int f = 0; f < Nfloc_r; ++f) {
372                     const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid_r] + f * nip_loc_r + ei_r * Nq + qi; // IPf_idx + f*nip_loc_r + loc_fdf_idx;
373                     temp1[0] += dudx[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r];
374                     temp1[1] += dudy[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r];
375 #if LANDAU_DIM == 3
376                     temp1[2] += dudz[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r];
377 #endif
378                     temp2 += ff[idx] * nu_beta[f + f_off] * (*lambdas)[grid][grid_r];
379                   }
380                   temp1[0] *= wi;
381                   temp1[1] *= wi;
382 #if LANDAU_DIM == 3
383                   temp1[2] *= wi;
384 #endif
385                   temp2 *= wi;
386 #if LANDAU_DIM == 2
387                   for (d2 = 0; d2 < 2; d2++) {
388                     for (d3 = 0; d3 < 2; ++d3) {
389                       /* K = U * grad(f): g2=e: i,A */
390                       gg2_temp[d2] += Uk[d2][d3] * temp1[d3];
391                       /* D = -U * (I \kron (fx)): g3=f: i,j,A */
392                       gg3_temp[d2][d3] += Ud[d2][d3] * temp2;
393                     }
394                   }
395 #else
396                   for (d2 = 0; d2 < 3; ++d2) {
397                     for (d3 = 0; d3 < 3; ++d3) {
398                       /* K = U * grad(f): g2 = e: i,A */
399                       gg2_temp[d2] += U[d2][d3] * temp1[d3];
400                       /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
401                       gg3_temp[d2][d3] += U[d2][d3] * temp2;
402                     }
403                   }
404 #endif
405                 } // qi
406               }   // ei_r
407               IPf_idx += nip_loc_r * Nfloc_r;
408             } /* grid_r - IPs */
409             PetscCheck(IPf_idx == IPf_sz_glb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT, IPf_idx, IPf_sz_glb);
410             // add alpha and put in gg2/3
411             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
412               for (d2 = 0; d2 < LANDAU_DIM; d2++) {
413                 gg2[fieldA][d2] = gg2_temp[d2] * nu_alpha[fieldA + f_off];
414                 for (d3 = 0; d3 < LANDAU_DIM; d3++) gg3[fieldA][d2][d3] = -gg3_temp[d2][d3] * nu_alpha[fieldA + f_off] * invMass[fieldA + f_off];
415               }
416             }
417             /* add electric field term once per IP */
418             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) gg2[fieldA][LANDAU_DIM - 1] += Eq_m[fieldA + f_off];
419             /* Jacobian transform - g2, g3 */
420             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
421               for (d = 0; d < dim; ++d) {
422                 g2[fieldA][d] = 0.0;
423                 for (d2 = 0; d2 < dim; ++d2) {
424                   g2[fieldA][d] += invJj[d * dim + d2] * gg2[fieldA][d2];
425                   g3[fieldA][d][d2] = 0.0;
426                   for (d3 = 0; d3 < dim; ++d3) {
427                     for (dp = 0; dp < dim; ++dp) g3[fieldA][d][d2] += invJj[d * dim + d3] * gg3[fieldA][d3][dp] * invJj[d2 * dim + dp];
428                   }
429                   g3[fieldA][d][d2] *= wj;
430                 }
431                 g2[fieldA][d] *= wj;
432               }
433             }
434           } else { // mass
435             PetscReal wj = ww[jpidx_glb];
436             /* Jacobian transform - g0 */
437             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
438               if (dim == 2) {
439                 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
440               } else {
441                 g0[fieldA] = wj * shift; // move this to below and remove g0
442               }
443             }
444           }
445           /* FE matrix construction */
446           {
447             PetscInt         fieldA, d, f, d2, g;
448             const PetscReal *BJq = &BB[qj * Nb], *DIq = &DD[qj * Nb * dim];
449             /* assemble - on the diagonal (I,I) */
450             for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
451               for (f = 0; f < Nb; f++) {
452                 const PetscInt i = fieldA * Nb + f; /* Element matrix row */
453                 for (g = 0; g < Nb; ++g) {
454                   const PetscInt j    = fieldA * Nb + g; /* Element matrix column */
455                   const PetscInt fOff = i * totDim + j;
456                   if (shift == 0.0) {
457                     for (d = 0; d < dim; ++d) {
458                       elemMat[fOff] += DIq[f * dim + d] * g2[fieldA][d] * BJq[g];
459                       for (d2 = 0; d2 < dim; ++d2) elemMat[fOff] += DIq[f * dim + d] * g3[fieldA][d][d2] * DIq[g * dim + d2];
460                     }
461                   } else { // mass
462                     elemMat[fOff] += BJq[f] * g0[fieldA] * BJq[g];
463                   }
464                 }
465               }
466             }
467           }
468         }                   /* qj loop */
469         if (shift == 0.0) { // Jacobian
470           PetscCall(PetscLogEventEnd(ctx->events[4], 0, 0, 0, 0));
471         } else {
472           PetscCall(PetscLogEventEnd(ctx->events[16], 0, 0, 0, 0));
473         }
474 #if defined(PETSC_HAVE_THREADSAFETY)
475         endtime = MPI_Wtime();
476         if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
477 #endif
478         /* assemble matrix */
479         if (!container) {
480           PetscInt cStart;
481           PetscCall(PetscLogEventBegin(ctx->events[6], 0, 0, 0, 0));
482           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL));
483           PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[LAND_PACK_IDX(b_id, grid)], loc_elem + cStart, elemMat, ADD_VALUES));
484           PetscCall(PetscLogEventEnd(ctx->events[6], 0, 0, 0, 0));
485         } else { // GPU like assembly for debugging
486           PetscInt    fieldA, q, f, g, d, nr, nc, rows0[LANDAU_MAX_Q_FACE] = {0}, cols0[LANDAU_MAX_Q_FACE] = {0}, rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
487           PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE] = {0}, row_scale[LANDAU_MAX_Q_FACE] = {0}, col_scale[LANDAU_MAX_Q_FACE] = {0};
488           LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = (LandauIdx(*)[LANDAU_MAX_NQND + 1]) ctx->SData_d.coo_elem_point_offsets;
489           /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
490           for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
491             LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
492             for (f = 0; f < Nb; f++) {
493               PetscInt idx = Idxs[f];
494               if (idx >= 0) {
495                 nr           = 1;
496                 rows0[0]     = idx;
497                 row_scale[0] = 1.;
498               } else {
499                 idx = -idx - 1;
500                 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
501                   if (maps[grid].c_maps[idx][q].gid < 0) break;
502                   rows0[q]     = maps[grid].c_maps[idx][q].gid;
503                   row_scale[q] = maps[grid].c_maps[idx][q].scale;
504                 }
505               }
506               for (g = 0; g < Nb; ++g) {
507                 idx = Idxs[g];
508                 if (idx >= 0) {
509                   nc           = 1;
510                   cols0[0]     = idx;
511                   col_scale[0] = 1.;
512                 } else {
513                   idx = -idx - 1;
514                   nc  = maps[grid].num_face;
515                   for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
516                     if (maps[grid].c_maps[idx][q].gid < 0) break;
517                     cols0[q]     = maps[grid].c_maps[idx][q].gid;
518                     col_scale[q] = maps[grid].c_maps[idx][q].scale;
519                   }
520                 }
521                 const PetscInt    i   = fieldA * Nb + f; /* Element matrix row */
522                 const PetscInt    j   = fieldA * Nb + g; /* Element matrix column */
523                 const PetscScalar Aij = elemMat[i * totDim + j];
524                 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
525                   const int fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
526                   const int idx0 = b_id * coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
527                   for (int q = 0, idx2 = idx0; q < nr; q++) {
528                     for (int d = 0; d < nc; d++, idx2++) coo_vals[idx2] = row_scale[q] * col_scale[d] * Aij;
529                   }
530                 } else {
531                   for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
532                   for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
533                   for (q = 0; q < nr; q++) {
534                     for (d = 0; d < nc; d++) vals[q * nc + d] = row_scale[q] * col_scale[d] * Aij;
535                   }
536                   PetscCall(MatSetValues(JacP, nr, rows, nc, cols, vals, ADD_VALUES));
537                 }
538               }
539             }
540           }
541         }
542         if (loc_elem == -1) {
543           PetscCall(PetscPrintf(ctx->comm, "CPU Element matrix\n"));
544           for (int d = 0; d < totDim; ++d) {
545             for (int f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm, " %12.5e", (double)PetscRealPart(elemMat[d * totDim + f])));
546             PetscCall(PetscPrintf(ctx->comm, "\n"));
547           }
548           exit(12);
549         }
550         PetscCall(PetscFree(elemMat));
551       }                 /* grid */
552     }                   /* outer element & batch loop */
553     if (shift == 0.0) { // mass
554       PetscCall(PetscFree4(ff, dudx, dudy, dudz));
555     }
556     if (!container) {                                         // 'CPU' assembly move nest matrix to global JacP
557       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP
558         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
559           const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
560           PetscInt           nloc, nzl, colbuf[1024], row;
561           const PetscInt    *cols;
562           const PetscScalar *vals;
563           Mat                B = subJ[LAND_PACK_IDX(b_id, grid)];
564           PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
565           PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
566           PetscCall(MatGetSize(B, &nloc, NULL));
567           for (int i = 0; i < nloc; i++) {
568             PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
569             PetscCheck(nzl <= 1024, PetscObjectComm((PetscObject)B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT, nzl);
570             for (int j = 0; j < nzl; j++) colbuf[j] = moffset + cols[j];
571             row = moffset + i;
572             PetscCall(MatSetValues(JacP, 1, &row, nzl, colbuf, vals, ADD_VALUES));
573             PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
574           }
575           PetscCall(MatDestroy(&B));
576         }
577       }
578     }
579     if (coo_vals) {
580       PetscCall(MatSetValuesCOO(JacP, coo_vals, ADD_VALUES));
581       PetscCall(PetscFree(coo_vals));
582     }
583   } /* CPU version */
584   PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY));
585   PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY));
586   /* clean up */
587   if (cellClosure) PetscCall(PetscFree(cellClosure));
588   if (xdata) PetscCall(VecRestoreArrayReadAndMemType(a_X, &xdata));
589   PetscFunctionReturn(PETSC_SUCCESS);
590 }
591 
592 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
593 {
594   PetscReal r = abc[0], z = abc[1];
595 
596   PetscFunctionBegin;
597   xyz[0] = r;
598   xyz[1] = z;
599   if (dim == 3) xyz[2] = abc[2];
600 
601   PetscFunctionReturn(PETSC_SUCCESS);
602 }
603 
604 /* create DMComposite of meshes for each species group */
605 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack)
606 {
607   PetscFunctionBegin;
608   { /* p4est, quads */
609     /* Create plex mesh of Landau domain */
610     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
611       PetscReal par_radius = ctx->radius_par[grid], perp_radius = ctx->radius_perp[grid];
612       if (!ctx->sphere && !ctx->simplex) { // 2 or 3D (only 3D option)
613         PetscReal      lo[] = {-perp_radius, -par_radius, -par_radius}, hi[] = {perp_radius, par_radius, par_radius};
614         DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim == 2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
615         if (dim == 2) lo[0] = 0;
616         else {
617           lo[1] = -perp_radius;
618           hi[1] = perp_radius; // 3D y is a perp
619         }
620         PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, ctx->cells0, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid])); // todo: make composite and create dm[grid] here
621         PetscCall(DMLocalizeCoordinates(ctx->plex[grid]));                                                                           /* needed for periodic */
622         if (dim == 3) PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "cube"));
623         else PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "half-plane"));
624       } else if (dim == 2) {
625         size_t len;
626         PetscCall(PetscStrlen(ctx->filename, &len));
627         if (len) {
628           Vec          coords;
629           PetscScalar *x;
630           PetscInt     N;
631           char         str[] = "-dm_landau_view_file_0";
632           str[21] += grid;
633           PetscCall(DMPlexCreateFromFile(comm_self, ctx->filename, "plexland.c", PETSC_TRUE, &ctx->plex[grid]));
634           PetscCall(DMPlexOrient(ctx->plex[grid]));
635           PetscCall(DMGetCoordinatesLocal(ctx->plex[grid], &coords));
636           PetscCall(VecGetSize(coords, &N));
637           PetscCall(VecGetArray(coords, &x));
638           /* scale by domain size */
639           for (PetscInt i = 0; i < N; i += 2) {
640             x[i + 0] *= ctx->radius_perp[grid];
641             x[i + 1] *= ctx->radius_par[grid];
642           }
643           PetscCall(VecRestoreArray(coords, &x));
644           PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], ctx->filename));
645           PetscCall(PetscInfo(ctx->plex[grid], "%d) Read %s mesh file (%s)", (int)grid, ctx->filename, str));
646           PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, str));
647         } else {
648           PetscInt       numCells = ctx->simplex ? 12 : 6, cell_size = ctx->simplex ? 3 : 4, j;
649           const PetscInt numVerts    = 11;
650           PetscInt       cellsT[][4] = {
651             {0,  1, 6, 5 },
652             {1,  2, 7, 6 },
653             {2,  3, 8, 7 },
654             {3,  4, 9, 8 },
655             {5,  6, 7, 10},
656             {10, 7, 8, 9 }
657           };
658           PetscInt cellsS[][3] = {
659             {0,  1, 6 },
660             {1,  2, 6 },
661             {6,  2, 7 },
662             {7,  2, 8 },
663             {8,  2, 3 },
664             {8,  3, 4 },
665             {0,  6, 5 },
666             {5,  6, 7 },
667             {5,  7, 10},
668             {10, 7, 9 },
669             {9,  7, 8 },
670             {9,  8, 4 }
671           };
672           const PetscInt *pcell = (const PetscInt *)(ctx->simplex ? &cellsS[0][0] : &cellsT[0][0]);
673           PetscReal       coords[11][2], *flatCoords = (PetscReal *)&coords[0][0];
674           PetscReal       rad = ctx->radius[grid];
675           for (j = 0; j < 5; j++) { // outside edge
676             PetscReal z, r, theta = -PETSC_PI / 2 + (j % 5) * PETSC_PI / 4;
677             r            = rad * PetscCosReal(theta);
678             coords[j][0] = r;
679             z            = rad * PetscSinReal(theta);
680             coords[j][1] = z;
681           }
682           coords[j][0]   = 0;
683           coords[j++][1] = -rad * ctx->sphere_inner_radius_90degree;
684           coords[j][0]   = rad * ctx->sphere_inner_radius_45degree;
685           coords[j++][1] = -rad * ctx->sphere_inner_radius_45degree;
686           coords[j][0]   = rad * ctx->sphere_inner_radius_90degree;
687           coords[j++][1] = 0;
688           coords[j][0]   = rad * ctx->sphere_inner_radius_45degree;
689           coords[j++][1] = rad * ctx->sphere_inner_radius_45degree;
690           coords[j][0]   = 0;
691           coords[j++][1] = rad * ctx->sphere_inner_radius_90degree;
692           coords[j][0]   = 0;
693           coords[j++][1] = 0;
694           PetscCall(DMPlexCreateFromCellListPetsc(comm_self, 2, numCells, numVerts, cell_size, ctx->interpolate, pcell, 2, flatCoords, &ctx->plex[grid]));
695           PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "semi-circle"));
696           PetscCall(PetscInfo(ctx->plex[grid], "\t%" PetscInt_FMT ") Make circle %s mesh", grid, ctx->simplex ? "simplex" : "tensor"));
697         }
698       } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support 3V cubed sphere or simplex");
699       PetscCall(DMSetFromOptions(ctx->plex[grid]));
700     } // grid loop
701     PetscCall(PetscObjectSetOptionsPrefix((PetscObject)pack, prefix));
702     { /* convert to p4est (or whatever), wait for discretization to create pack */
703       char      convType[256];
704       PetscBool flg;
705 
706       PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
707       PetscCall(PetscOptionsFList("-dm_landau_type", "Convert DMPlex to another format (p4est)", "plexland.c", DMList, DMPLEX, convType, 256, &flg));
708       PetscOptionsEnd();
709       if (flg) {
710         ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
711         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
712           DM dmforest;
713           PetscCall(DMConvert(ctx->plex[grid], convType, &dmforest));
714           if (dmforest) {
715             PetscBool isForest;
716             PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dmforest, prefix));
717             PetscCall(DMIsForest(dmforest, &isForest));
718             if (isForest) {
719               if (ctx->sphere) PetscCall(DMForestSetBaseCoordinateMapping(dmforest, GeometryDMLandau, ctx));
720               PetscCall(DMDestroy(&ctx->plex[grid]));
721               ctx->plex[grid] = dmforest; // Forest for adaptivity
722             } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
723           } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
724         }
725       } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
726     }
727   } /* non-file */
728   PetscCall(DMSetDimension(pack, dim));
729   PetscCall(PetscObjectSetName((PetscObject)pack, "Mesh"));
730   PetscCall(DMSetApplicationContext(pack, ctx));
731 
732   PetscFunctionReturn(PETSC_SUCCESS);
733 }
734 
735 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx)
736 {
737   PetscInt     ii, i0;
738   char         buf[256];
739   PetscSection section;
740 
741   PetscFunctionBegin;
742   for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
743     if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e"));
744     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii));
745     /* Setup Discretization - FEM */
746     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, ctx->simplex, NULL, PETSC_DECIDE, &ctx->fe[ii]));
747     PetscCall(PetscObjectSetName((PetscObject)ctx->fe[ii], buf));
748     PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject)ctx->fe[ii]));
749   }
750   PetscCall(DMCreateDS(ctx->plex[grid]));
751   PetscCall(DMGetSection(ctx->plex[grid], &section));
752   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
753     if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se"));
754     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii));
755     PetscCall(PetscSectionSetComponentName(section, i0, 0, buf));
756   }
757   PetscFunctionReturn(PETSC_SUCCESS);
758 }
759 
760 /* Define a Maxwellian function for testing out the operator. */
761 
762 /* Using cartesian velocity space coordinates, the particle */
763 /* density, [1/m^3], is defined according to */
764 
765 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
766 
767 /* Using some constant, c, we normalize the velocity vector into a */
768 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
769 
770 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
771 
772 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
773 /* for finding the particle within the interval in a box dx^3 around x is */
774 
775 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
776 
777 typedef struct {
778   PetscReal v_0;
779   PetscReal kT_m;
780   PetscReal n;
781   PetscReal shift;
782 } MaxwellianCtx;
783 
784 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
785 {
786   MaxwellianCtx *mctx = (MaxwellianCtx *)actx;
787   PetscInt       i;
788   PetscReal      v2 = 0, theta = 2 * mctx->kT_m / (mctx->v_0 * mctx->v_0), shift; /* theta = 2kT/mc^2 */
789   PetscFunctionBegin;
790   /* compute the exponents, v^2 */
791   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
792   /* evaluate the Maxwellian */
793   if (mctx->shift < 0) shift = -mctx->shift;
794   else {
795     u[0]  = mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
796     shift = mctx->shift;
797   }
798   if (shift != 0.) {
799     v2 = 0;
800     for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i];
801     v2 += (x[dim - 1] - shift) * (x[dim - 1] - shift);
802     /* evaluate the shifted Maxwellian */
803     u[0] += mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
804   }
805   PetscFunctionReturn(PETSC_SUCCESS);
806 }
807 
808 /*@
809  DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state
810 
811  Collective
812 
813  Input Parameters:
814  .   dm - The mesh (local)
815  +   time - Current time
816  -   temps - Temperatures of each species (global)
817  .   ns - Number density of each species (global)
818  -   grid - index into current grid - just used for offset into temp and ns
819  .   b_id - batch index
820  -   n_batch - number of batches
821  +   actx - Landau context
822 
823  Output Parameter:
824  .   X  - The state (local to this grid)
825 
826  Level: beginner
827 
828  .keywords: mesh
829 .seealso: `DMPlexLandauCreateVelocitySpace()`
830  @*/
831 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
832 {
833   LandauCtx *ctx = (LandauCtx *)actx;
834   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *);
835   PetscInt       dim;
836   MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
837 
838   PetscFunctionBegin;
839   PetscCall(DMGetDimension(dm, &dim));
840   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
841   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
842     mctxs[i0]      = &data[i0];
843     data[i0].v_0   = ctx->v_0;                             // v_0 same for all grids
844     data[i0].kT_m  = ctx->k * temps[ii] / ctx->masses[ii]; /* kT/m */
845     data[i0].n     = ns[ii];
846     initu[i0]      = maxwellian;
847     data[i0].shift = 0;
848   }
849   data[0].shift = ctx->electronShift;
850   /* need to make ADD_ALL_VALUES work - TODO */
851   PetscCall(DMProjectFunction(dm, time, initu, (void **)mctxs, INSERT_ALL_VALUES, X));
852   PetscFunctionReturn(PETSC_SUCCESS);
853 }
854 
855 /*
856  LandauSetInitialCondition - Adds Maxwellians with context
857 
858  Collective
859 
860  Input Parameters:
861  .   dm - The mesh
862  -   grid - index into current grid - just used for offset into temp and ns
863  .   b_id - batch index
864  -   n_batch - number of batches
865  +   actx - Landau context with T and n
866 
867  Output Parameter:
868  .   X  - The state
869 
870  Level: beginner
871 
872  .keywords: mesh
873 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()`
874  */
875 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
876 {
877   LandauCtx *ctx = (LandauCtx *)actx;
878   PetscFunctionBegin;
879   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
880   PetscCall(VecZeroEntries(X));
881   PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, n_batch, ctx));
882   PetscFunctionReturn(PETSC_SUCCESS);
883 }
884 
885 // adapt a level once. Forest in/out
886 #if defined(PETSC_USE_INFO)
887 static const char *s_refine_names[] = {"RE", "Z1", "Origin", "Z2", "Uniform"};
888 #endif
889 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
890 {
891   DM              forest, plex, adaptedDM = NULL;
892   PetscDS         prob;
893   PetscBool       isForest;
894   PetscQuadrature quad;
895   PetscInt        Nq, Nb, *Nb2, cStart, cEnd, c, dim, qj, k;
896   DMLabel         adaptLabel = NULL;
897 
898   PetscFunctionBegin;
899   forest = ctx->plex[grid];
900   PetscCall(DMCreateDS(forest));
901   PetscCall(DMGetDS(forest, &prob));
902   PetscCall(DMGetDimension(forest, &dim));
903   PetscCall(DMIsForest(forest, &isForest));
904   PetscCheck(isForest, ctx->comm, PETSC_ERR_ARG_WRONG, "! Forest");
905   PetscCall(DMConvert(forest, DMPLEX, &plex));
906   PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd));
907   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "adapt", &adaptLabel));
908   PetscCall(PetscFEGetQuadrature(fem, &quad));
909   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
910   PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND);
911   PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb));
912   PetscCall(PetscDSGetDimensions(prob, &Nb2));
913   PetscCheck(Nb2[0] == Nb, ctx->comm, PETSC_ERR_ARG_WRONG, " Nb = %" PetscInt_FMT " != Nb (%d)", Nb, (int)Nb2[0]);
914   PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND);
915   PetscCall(PetscInfo(sol, "%" PetscInt_FMT ") Refine phase: %s\n", grid, s_refine_names[type]));
916   if (type == 4) {
917     for (c = cStart; c < cEnd; c++) PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
918   } else if (type == 2) {
919     PetscInt  rCellIdx[8], nr = 0, nrmax = (dim == 3) ? 8 : 2;
920     PetscReal minRad = PETSC_INFINITY, r;
921     for (c = cStart; c < cEnd; c++) {
922       PetscReal tt, v0[LANDAU_MAX_NQND * 3], detJ[LANDAU_MAX_NQND];
923       PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ));
924       for (qj = 0; qj < Nq; ++qj) {
925         tt = PetscSqr(v0[dim * qj + 0]) + PetscSqr(v0[dim * qj + 1]) + PetscSqr(((dim == 3) ? v0[dim * qj + 2] : 0));
926         r  = PetscSqrtReal(tt);
927         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON * 10.) {
928           minRad         = r;
929           nr             = 0;
930           rCellIdx[nr++] = c;
931           PetscCall(PetscInfo(sol, "\t\t%" PetscInt_FMT ") Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", grid, (double)r, c, qj + 1, Nq));
932         } else if ((r - minRad) < PETSC_SQRT_MACHINE_EPSILON * 100. && nr < nrmax) {
933           for (k = 0; k < nr; k++)
934             if (c == rCellIdx[k]) break;
935           if (k == nr) {
936             rCellIdx[nr++] = c;
937             PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", grid, (double)r, c, qj + 1, Nq, (double)(r - minRad)));
938           }
939         }
940       }
941     }
942     for (k = 0; k < nr; k++) PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE));
943     PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n", grid, nr, rCellIdx[0], rCellIdx[1], (double)minRad));
944   } else if (type == 0 || type == 1 || type == 3) { /* refine along r=0 axis */
945     PetscScalar *coef = NULL;
946     Vec          coords;
947     PetscInt     csize, Nv, d, nz, nrefined = 0;
948     DM           cdm;
949     PetscSection cs;
950     PetscCall(DMGetCoordinatesLocal(forest, &coords));
951     PetscCall(DMGetCoordinateDM(forest, &cdm));
952     PetscCall(DMGetLocalSection(cdm, &cs));
953     for (c = cStart; c < cEnd; c++) {
954       PetscInt doit = 0, outside = 0;
955       PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef));
956       Nv = csize / dim;
957       for (nz = d = 0; d < Nv; d++) {
958         PetscReal z = PetscRealPart(coef[d * dim + (dim - 1)]), x = PetscSqr(PetscRealPart(coef[d * dim + 0])) + ((dim == 3) ? PetscSqr(PetscRealPart(coef[d * dim + 1])) : 0);
959         x = PetscSqrtReal(x);
960         if (type == 0) {
961           if (ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON && (z < -PETSC_MACHINE_EPSILON * 10. || z > ctx->re_radius + PETSC_MACHINE_EPSILON * 10.)) outside++; /* first pass don't refine bottom */
962         } else if (type == 1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) {
963           outside++; /* don't refine outside electron refine radius */
964           PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type]));
965         } else if (type == 3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) {
966           outside++; /* refine r=0 cells on refinement front */
967           PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type]));
968         }
969         if (x < PETSC_MACHINE_EPSILON * 10. && (type != 0 || ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON)) nz++;
970       }
971       PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef));
972       if (doit || (outside < Nv && nz)) {
973         PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
974         nrefined++;
975       }
976     }
977     PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " cells\n", grid, nrefined));
978   }
979   PetscCall(DMDestroy(&plex));
980   PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM));
981   PetscCall(DMLabelDestroy(&adaptLabel));
982   *newForest = adaptedDM;
983   if (adaptedDM) {
984     if (isForest) {
985       PetscCall(DMForestSetAdaptivityForest(adaptedDM, NULL)); // ????
986     }
987     PetscCall(DMConvert(adaptedDM, DMPLEX, &plex));
988     PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd));
989     PetscCall(PetscInfo(sol, "\t\t\t\t%" PetscInt_FMT ") %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n", grid, cEnd - cStart, Nq * (cEnd - cStart)));
990     PetscCall(DMDestroy(&plex));
991   } else *newForest = NULL;
992   PetscFunctionReturn(PETSC_SUCCESS);
993 }
994 
995 // forest goes in (ctx->plex[grid]), plex comes out
996 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
997 {
998   PetscInt adaptIter;
999 
1000   PetscFunctionBegin;
1001   PetscInt type, limits[5] = {(grid == 0) ? ctx->numRERefine : 0, (grid == 0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid == 0) ? ctx->nZRefine2 : 0, ctx->postAMRRefine[grid]};
1002   for (type = 0; type < 5; type++) {
1003     for (adaptIter = 0; adaptIter < limits[type]; adaptIter++) {
1004       DM newForest = NULL;
1005       PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest));
1006       if (newForest) {
1007         PetscCall(DMDestroy(&ctx->plex[grid]));
1008         PetscCall(VecDestroy(uu));
1009         PetscCall(DMCreateGlobalVector(newForest, uu));
1010         PetscCall(PetscObjectSetName((PetscObject)*uu, "uAMR"));
1011         PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, 1, ctx));
1012         ctx->plex[grid] = newForest;
1013       } else {
1014         PetscCall(PetscInfo(*uu, "No refinement\n"));
1015       }
1016     }
1017   }
1018   PetscFunctionReturn(PETSC_SUCCESS);
1019 }
1020 
1021 // make log(Lambdas) from NRL Plasma formulary
1022 static PetscErrorCode makeLambdas(LandauCtx *ctx)
1023 {
1024   PetscFunctionBegin;
1025   for (PetscInt gridi = 0; gridi < ctx->num_grids; gridi++) {
1026     int       iii   = ctx->species_offset[gridi];
1027     PetscReal Ti_ev = (ctx->thermal_temps[iii] / 1.1604525e7) * 1000; // convert (back) to eV
1028     PetscReal ni    = ctx->n[iii] * ctx->n_0;
1029     for (PetscInt gridj = gridi; gridj < ctx->num_grids; gridj++) {
1030       PetscInt  jjj = ctx->species_offset[gridj];
1031       PetscReal Zj  = ctx->charges[jjj] / 1.6022e-19;
1032       if (gridi == 0) {
1033         if (gridj == 0) { // lam_ee
1034           ctx->lambdas[gridi][gridj] = 23.5 - PetscLogReal(PetscSqrtReal(ni) * PetscPowReal(Ti_ev, -1.25)) - PetscSqrtReal(1e-5 + PetscSqr(PetscLogReal(Ti_ev) - 2) / 16);
1035         } else { // lam_ei == lam_ie
1036           if (10 * Zj * Zj > Ti_ev) {
1037             ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(PetscSqrtReal(ni) * Zj * PetscPowReal(Ti_ev, -1.5));
1038           } else {
1039             ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 24 - PetscLogReal(PetscSqrtReal(ni) / Ti_ev);
1040           }
1041         }
1042       } else { // lam_ii'
1043         PetscReal mui = ctx->masses[iii] / 1.6720e-27, Zi = ctx->charges[iii] / 1.6022e-19;
1044         PetscReal Tj_ev            = (ctx->thermal_temps[jjj] / 1.1604525e7) * 1000; // convert (back) to eV
1045         PetscReal muj              = ctx->masses[jjj] / 1.6720e-27;
1046         PetscReal nj               = ctx->n[jjj] * ctx->n_0;
1047         ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(Zi * Zj * (mui + muj) / (mui * Tj_ev + muj * Ti_ev) * PetscSqrtReal(ni * Zi * Zi / Ti_ev + nj * Zj * Zj / Tj_ev));
1048       }
1049     }
1050   }
1051   //PetscReal v0 = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */
1052   PetscFunctionReturn(PETSC_SUCCESS);
1053 }
1054 
1055 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1056 {
1057   PetscBool flg, fileflg;
1058   PetscInt  ii, nt, nm, nc, num_species_grid[LANDAU_MAX_GRIDS], non_dim_grid;
1059   PetscReal lnLam = 10;
1060   DM        dummy;
1061 
1062   PetscFunctionBegin;
1063   PetscCall(DMCreate(ctx->comm, &dummy));
1064   /* get options - initialize context */
1065   ctx->verbose        = 1; // should be 0 for silent compliance
1066   ctx->batch_sz       = 1;
1067   ctx->batch_view_idx = 0;
1068   ctx->interpolate    = PETSC_TRUE;
1069   ctx->gpu_assembly   = PETSC_TRUE;
1070   ctx->norm_state     = 0;
1071   ctx->electronShift  = 0;
1072   ctx->M              = NULL;
1073   ctx->J              = NULL;
1074   /* geometry and grids */
1075   ctx->sphere    = PETSC_FALSE;
1076   ctx->use_p4est = PETSC_FALSE;
1077   ctx->simplex   = PETSC_FALSE;
1078   for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1079     ctx->radius[grid]             = 5.; /* thermal radius (velocity) */
1080     ctx->radius_perp[grid]        = 5.; /* thermal radius (velocity) */
1081     ctx->radius_par[grid]         = 5.; /* thermal radius (velocity) */
1082     ctx->numAMRRefine[grid]       = 0;
1083     ctx->postAMRRefine[grid]      = 0;
1084     ctx->species_offset[grid + 1] = 1; // one species default
1085     num_species_grid[grid]        = 0;
1086     ctx->plex[grid]               = NULL; /* cache as expensive to Convert */
1087   }
1088   ctx->species_offset[0] = 0;
1089   ctx->re_radius         = 0.;
1090   ctx->vperp0_radius1    = 0;
1091   ctx->vperp0_radius2    = 0;
1092   ctx->nZRefine1         = 0;
1093   ctx->nZRefine2         = 0;
1094   ctx->numRERefine       = 0;
1095   num_species_grid[0]    = 1; // one species default
1096   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1097   ctx->charges[0]       = -1;                       /* electron charge (MKS) */
1098   ctx->masses[0]        = 1 / 1835.469965278441013; /* temporary value in proton mass */
1099   ctx->n[0]             = 1;
1100   ctx->v_0              = 1; /* thermal velocity, we could start with a scale != 1 */
1101   ctx->thermal_temps[0] = 1;
1102   /* constants, etc. */
1103   ctx->epsilon0 = 8.8542e-12;     /* permittivity of free space (MKS) F/m */
1104   ctx->k        = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1105   ctx->n_0      = 1.e20;          /* typical plasma n, but could set it to 1 */
1106   ctx->Ez       = 0;
1107   for (PetscInt grid = 0; grid < LANDAU_NUM_TIMERS; grid++) ctx->times[grid] = 0;
1108   for (PetscInt ii = 0; ii < LANDAU_DIM; ii++) ctx->cells0[ii] = 2;
1109   if (LANDAU_DIM == 2) ctx->cells0[0] = 1;
1110   ctx->use_matrix_mass                = PETSC_FALSE;
1111   ctx->use_relativistic_corrections   = PETSC_FALSE;
1112   ctx->use_energy_tensor_trick        = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1113   ctx->SData_d.w                      = NULL;
1114   ctx->SData_d.x                      = NULL;
1115   ctx->SData_d.y                      = NULL;
1116   ctx->SData_d.z                      = NULL;
1117   ctx->SData_d.invJ                   = NULL;
1118   ctx->jacobian_field_major_order     = PETSC_FALSE;
1119   ctx->SData_d.coo_elem_offsets       = NULL;
1120   ctx->SData_d.coo_elem_point_offsets = NULL;
1121   ctx->SData_d.coo_elem_fullNb        = NULL;
1122   ctx->SData_d.coo_size               = 0;
1123   PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1124 #if defined(PETSC_HAVE_KOKKOS)
1125   ctx->deviceType = LANDAU_KOKKOS;
1126   PetscCall(PetscStrncpy(ctx->filename, "kokkos", sizeof(ctx->filename)));
1127 #else
1128   ctx->deviceType = LANDAU_CPU;
1129   PetscCall(PetscStrncpy(ctx->filename, "cpu", sizeof(ctx->filename)));
1130 #endif
1131   PetscCall(PetscOptionsString("-dm_landau_device_type", "Use kernels on 'cpu' 'kokkos'", "plexland.c", ctx->filename, ctx->filename, sizeof(ctx->filename), NULL));
1132   PetscCall(PetscStrcmp("cpu", ctx->filename, &flg));
1133   if (flg) {
1134     ctx->deviceType = LANDAU_CPU;
1135   } else {
1136     PetscCall(PetscStrcmp("kokkos", ctx->filename, &flg));
1137     if (flg) ctx->deviceType = LANDAU_KOKKOS;
1138     else SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_device_type %s", ctx->filename);
1139   }
1140   ctx->filename[0] = '\0';
1141   PetscCall(PetscOptionsString("-dm_landau_filename", "file to read mesh from", "plexland.c", ctx->filename, ctx->filename, sizeof(ctx->filename), &fileflg));
1142   PetscCall(PetscOptionsReal("-dm_landau_electron_shift", "Shift in thermal velocity of electrons", "none", ctx->electronShift, &ctx->electronShift, NULL));
1143   PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL));
1144   PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL));
1145   PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "LANDAU_MAX_BATCH_SZ %" PetscInt_FMT " < ctx->batch_sz %" PetscInt_FMT, (PetscInt)LANDAU_MAX_BATCH_SZ, ctx->batch_sz);
1146   PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL));
1147   PetscCheck(ctx->batch_view_idx < ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT, ctx->batch_view_idx, ctx->batch_sz);
1148   PetscCall(PetscOptionsReal("-dm_landau_Ez", "Initial parallel electric field in unites of Conner-Hastie critical field", "plexland.c", ctx->Ez, &ctx->Ez, NULL));
1149   PetscCall(PetscOptionsReal("-dm_landau_n_0", "Normalization constant for number density", "plexland.c", ctx->n_0, &ctx->n_0, NULL));
1150   PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL));
1151   PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL));
1152   PetscCall(PetscOptionsBool("-dm_landau_simplex", "Use simplex elements", "plexland.c", ctx->simplex, &ctx->simplex, NULL));
1153   if (LANDAU_DIM == 2 && ctx->use_relativistic_corrections) ctx->use_relativistic_corrections = PETSC_FALSE; // should warn
1154   PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick,
1155                              &ctx->use_energy_tensor_trick, NULL));
1156 
1157   /* get num species with temperature, set defaults */
1158   for (ii = 1; ii < LANDAU_MAX_SPECIES; ii++) {
1159     ctx->thermal_temps[ii] = 1;
1160     ctx->charges[ii]       = 1;
1161     ctx->masses[ii]        = 1;
1162     ctx->n[ii]             = 1;
1163   }
1164   nt = LANDAU_MAX_SPECIES;
1165   PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg));
1166   if (flg) {
1167     PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n", nt));
1168     ctx->num_species = nt;
1169   } else SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1170   for (ii = 0; ii < ctx->num_species; ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1171   nm = LANDAU_MAX_SPECIES - 1;
1172   PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg));
1173   PetscCheck(!flg || nm == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species - 1);
1174   nm = LANDAU_MAX_SPECIES;
1175   PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg));
1176   PetscCheck(!flg || nm == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species);
1177   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1178   ctx->masses[0] = 9.10938356e-31;                                           /* electron mass kg (should be about right already) */
1179   nc             = LANDAU_MAX_SPECIES - 1;
1180   PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg));
1181   if (flg) PetscCheck(nc == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num charges %" PetscInt_FMT " != num species %" PetscInt_FMT, nc, ctx->num_species - 1);
1182   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1183   /* geometry and grids */
1184   nt = LANDAU_MAX_GRIDS;
1185   PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid", "Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid", "plexland.c", num_species_grid, &nt, &flg));
1186   if (flg) {
1187     ctx->num_grids = nt;
1188     for (ii = nt = 0; ii < ctx->num_grids; ii++) nt += num_species_grid[ii];
1189     PetscCheck(ctx->num_species == nt, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %d)", nt, ctx->num_species,
1190                ctx->num_grids, LANDAU_MAX_GRIDS);
1191   } else {
1192     if (ctx->num_species > LANDAU_MAX_GRIDS) {
1193       num_species_grid[0] = 1;
1194       num_species_grid[1] = ctx->num_species - 1;
1195       ctx->num_grids      = 2;
1196     } else {
1197       ctx->num_grids = ctx->num_species;
1198       for (ii = 0; ii < ctx->num_grids; ii++) num_species_grid[ii] = 1;
1199     }
1200   }
1201   for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids; ii++) ctx->species_offset[ii + 1] = ctx->species_offset[ii] + num_species_grid[ii];
1202   PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????", ctx->species_offset[ctx->num_grids],
1203              ctx->num_species);
1204   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1205     int iii                  = ctx->species_offset[grid];                                          // normalize with first (arbitrary) species on grid
1206     ctx->thermal_speed[grid] = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */
1207   }
1208   // get lambdas here because we need them for t_0 etc
1209   PetscCall(PetscOptionsReal("-dm_landau_ln_lambda", "Universal cross section parameter. Default uses NRL formulas", "plexland.c", lnLam, &lnLam, &flg));
1210   if (flg) {
1211     for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1212       for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) ctx->lambdas[gridj][grid] = lnLam; /* cross section ratio large - small angle collisions */
1213     }
1214   } else {
1215     PetscCall(makeLambdas(ctx));
1216   }
1217   non_dim_grid = 0;
1218   PetscCall(PetscOptionsInt("-dm_landau_normalization_grid", "Index of grid to use for setting v_0, m_0, t_0. (Not recommended)", "plexland.c", non_dim_grid, &non_dim_grid, &flg));
1219   if (non_dim_grid != 0) PetscCall(PetscInfo(dummy, "Normalization grid set to %" PetscInt_FMT ", but non-default not well verified\n", non_dim_grid));
1220   PetscCheck(non_dim_grid >= 0 && non_dim_grid < ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "Normalization grid wrong: %" PetscInt_FMT, non_dim_grid);
1221   ctx->v_0 = ctx->thermal_speed[non_dim_grid]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1222   ctx->m_0 = ctx->masses[non_dim_grid];        /* arbitrary reference mass, electrons */
1223   ctx->t_0 = 8 * PETSC_PI * PetscSqr(ctx->epsilon0 * ctx->m_0 / PetscSqr(ctx->charges[non_dim_grid])) / ctx->lambdas[non_dim_grid][non_dim_grid] / ctx->n_0 * PetscPowReal(ctx->v_0, 3); /* note, this t_0 makes nu[non_dim_grid,non_dim_grid]=1 */
1224   /* domain */
1225   nt = LANDAU_MAX_GRIDS;
1226   PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius", "Phase space size in units of thermal velocity of grid", "plexland.c", ctx->radius, &nt, &flg));
1227   if (flg) {
1228     PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1229     while (nt--) ctx->radius_par[nt] = ctx->radius_perp[nt] = ctx->radius[nt];
1230   } else {
1231     nt = LANDAU_MAX_GRIDS;
1232     PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_par", "Parallel velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_par, &nt, &flg));
1233     if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_par: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1234     PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_perp", "Perpendicular velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_perp, &nt, &flg));
1235     if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_perp: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1236   }
1237   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1238     if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c - need to set par and perp with this -- todo */
1239       if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1240       else ctx->radius[grid] = -ctx->radius[grid];
1241       ctx->radius[grid] = ctx->radius[grid] * SPEED_OF_LIGHT / ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1242       PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n", (double)ctx->radius[grid], grid));
1243     }
1244     ctx->radius[grid] *= ctx->thermal_speed[grid] / ctx->v_0;      // scale domain by thermal radius relative to v_0
1245     ctx->radius_perp[grid] *= ctx->thermal_speed[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0
1246     ctx->radius_par[grid] *= ctx->thermal_speed[grid] / ctx->v_0;  // scale domain by thermal radius relative to v_0
1247   }
1248   /* amr parameters */
1249   if (!fileflg) {
1250     nt = LANDAU_MAX_GRIDS;
1251     PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg));
1252     PetscCheck(!flg || nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1253     nt = LANDAU_MAX_GRIDS;
1254     PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg));
1255     for (ii = 1; ii < ctx->num_grids; ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1256     PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg));
1257     PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_pre", "Number of levels to refine along v_perp=0 before origin refine", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg));
1258     PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_post", "Number of levels to refine along v_perp=0 after origin refine", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg));
1259     PetscCall(PetscOptionsReal("-dm_landau_re_radius", "velocity range to refine on positive (z>0) r=0 axis for runaways", "plexland.c", ctx->re_radius, &ctx->re_radius, &flg));
1260     PetscCall(PetscOptionsReal("-dm_landau_z_radius_pre", "velocity range to refine r=0 axis (for electrons)", "plexland.c", ctx->vperp0_radius1, &ctx->vperp0_radius1, &flg));
1261     PetscCall(PetscOptionsReal("-dm_landau_z_radius_post", "velocity range to refine r=0 axis (for electrons) after origin AMR", "plexland.c", ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg));
1262     /* spherical domain (not used) */
1263     PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, NULL));
1264     if (ctx->sphere || ctx->simplex) {
1265       ctx->sphere_inner_radius_90degree = 0.40;
1266       ctx->sphere_inner_radius_45degree = 0.35;
1267       PetscCall(PetscOptionsReal("-dm_landau_sphere_inner_radius_90degree_scale", "Scaling of radius for inner circle on 90 degree grid", "plexland.c", ctx->sphere_inner_radius_90degree, &ctx->sphere_inner_radius_90degree, NULL));
1268       PetscCall(PetscOptionsReal("-dm_landau_sphere_inner_radius_45degree_scale", "Scaling of radius for inner circle on 45 degree grid", "plexland.c", ctx->sphere_inner_radius_45degree, &ctx->sphere_inner_radius_45degree, NULL));
1269     } else {
1270       nt = LANDAU_DIM;
1271       PetscCall(PetscOptionsIntArray("-dm_landau_num_cells", "Number of cells in each dimension of base grid", "plexland.c", ctx->cells0, &nt, &flg));
1272     }
1273   }
1274   /* processing options */
1275   PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL));
1276   PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering (DEPRECATED)", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL));
1277   if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly");
1278   PetscCheck(!ctx->jacobian_field_major_order, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED");
1279   PetscOptionsEnd();
1280 
1281   for (ii = ctx->num_species; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0;
1282   if (ctx->verbose != 0) {
1283     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n", (double)ctx->masses[0], (double)(ctx->masses[1] / 1.6720e-27), (double)(ctx->num_species > 2 ? ctx->masses[2] / 1.6720e-27 : 0)));
1284     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0], (double)(-ctx->charges[1] / ctx->charges[0]), (double)(ctx->num_species > 2 ? -ctx->charges[2] / ctx->charges[0] : 0)));
1285     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "n:             e: %10.3e                           i: %10.3e %10.3e\n", (double)ctx->n[0], (double)ctx->n[1], (double)(ctx->num_species > 2 ? ctx->n[2] : 0)));
1286     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "thermal T (K): e=%10.3e i=%10.3e %10.3e. Normalization grid %d: v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e %" PetscInt_FMT " batched, view batch %" PetscInt_FMT "\n", (double)ctx->thermal_temps[0],
1287                           (double)ctx->thermal_temps[1], (double)((ctx->num_species > 2) ? ctx->thermal_temps[2] : 0), (int)non_dim_grid, (double)ctx->v_0, (double)(ctx->v_0 / SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->batch_sz, ctx->batch_view_idx));
1288     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Domain radius (AMR levels) grid %d: par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", 0, (double)ctx->radius_par[0], (double)ctx->radius_perp[0], ctx->numAMRRefine[0]));
1289     for (ii = 1; ii < ctx->num_grids; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, ", %" PetscInt_FMT ": par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", ii, (double)ctx->radius_par[ii], (double)ctx->radius_perp[ii], ctx->numAMRRefine[ii]));
1290     if (ctx->use_relativistic_corrections) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nUse relativistic corrections\n"));
1291     else PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
1292   }
1293   PetscCall(DMDestroy(&dummy));
1294   {
1295     PetscMPIInt rank;
1296     PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
1297     ctx->stage = 0;
1298     PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13]));   /* 13 */
1299     PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2]));  /* 2 */
1300     PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12]));   /* 12 */
1301     PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15]));  /* 15 */
1302     PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */
1303     PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */
1304     PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]));  /* 0 */
1305     PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9]));      /* 9 */
1306     PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10]));       /* 10 */
1307     PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7]));  /* 7 */
1308     PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1]));  /* 1 */
1309     PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3]));     /* 3 */
1310     PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8]));  /* 8 */
1311     PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4]));  /* 4 */
1312     PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */
1313     PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]));     /* 5 */
1314     PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6]));    /* 6 */
1315 
1316     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1317       PetscCall(PetscOptionsClearValue(NULL, "-snes_converged_reason"));
1318       PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason"));
1319       PetscCall(PetscOptionsClearValue(NULL, "-snes_monitor"));
1320       PetscCall(PetscOptionsClearValue(NULL, "-ksp_monitor"));
1321       PetscCall(PetscOptionsClearValue(NULL, "-ts_monitor"));
1322       PetscCall(PetscOptionsClearValue(NULL, "-ts_view"));
1323       PetscCall(PetscOptionsClearValue(NULL, "-ts_adapt_monitor"));
1324       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_dm_view"));
1325       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_vec_view"));
1326       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_dm_view"));
1327       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_view"));
1328       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_jacobian_view"));
1329       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mat_view"));
1330       PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_converged_reason"));
1331       PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_monitor"));
1332       PetscCall(PetscOptionsClearValue(NULL, "-"));
1333       PetscCall(PetscOptionsClearValue(NULL, "-info"));
1334     }
1335   }
1336   PetscFunctionReturn(PETSC_SUCCESS);
1337 }
1338 
1339 static PetscErrorCode CreateStaticData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx)
1340 {
1341   PetscSection     section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
1342   PetscQuadrature  quad;
1343   const PetscReal *quadWeights;
1344   PetscReal        invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1345   PetscInt         numCells[LANDAU_MAX_GRIDS], Nq, Nb, Nf[LANDAU_MAX_GRIDS], ncellsTot = 0, MAP_BF_SIZE = 64 * LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_Q_FACE * LANDAU_MAX_SPECIES;
1346   PetscTabulation *Tf;
1347   PetscDS          prob;
1348 
1349   PetscFunctionBegin;
1350   PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb));
1351   PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND);
1352   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1353     for (PetscInt ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++) {
1354       invMass[ii]  = ctx->m_0 / ctx->masses[ii];
1355       nu_alpha[ii] = PetscSqr(ctx->charges[ii] / ctx->m_0) * ctx->m_0 / ctx->masses[ii];
1356       nu_beta[ii]  = PetscSqr(ctx->charges[ii] / ctx->epsilon0) / (8 * PETSC_PI) * ctx->t_0 * ctx->n_0 / PetscPowReal(ctx->v_0, 3);
1357     }
1358   }
1359   if (ctx->verbose == 4) {
1360     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "nu_alpha: "));
1361     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1362       int iii = ctx->species_offset[grid];
1363       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_alpha[ii]));
1364     }
1365     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_beta: "));
1366     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1367       int iii = ctx->species_offset[grid];
1368       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_beta[ii]));
1369     }
1370     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_alpha[i]*nu_beta[j]*lambda[i][j]:\n"));
1371     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1372       int iii = ctx->species_offset[grid];
1373       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) {
1374         for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) {
1375           int jjj = ctx->species_offset[gridj];
1376           for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)(nu_alpha[ii] * nu_beta[jj] * ctx->lambdas[grid][gridj])));
1377         }
1378         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
1379       }
1380     }
1381     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "lambda[i][j]:\n"));
1382     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1383       int iii = ctx->species_offset[grid];
1384       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) {
1385         for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) {
1386           int jjj = ctx->species_offset[gridj];
1387           for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)ctx->lambdas[grid][gridj]));
1388         }
1389         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
1390       }
1391     }
1392   }
1393   PetscCall(DMGetDS(ctx->plex[0], &prob));    // same DS for all grids
1394   PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
1395   /* DS, Tab and quad is same on all grids */
1396   PetscCheck(ctx->plex[0], ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
1397   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
1398   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights));
1399   PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND);
1400   /* setup each grid */
1401   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1402     PetscInt cStart, cEnd;
1403     PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
1404     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1405     numCells[grid] = cEnd - cStart; // grids can have different topology
1406     PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
1407     PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
1408     PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
1409     ncellsTot += numCells[grid];
1410   }
1411   /* create GPU assembly data */
1412   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1413     PetscContainer container;
1414     PetscScalar   *elemMatrix, *elMat;
1415     pointInterpolationP4est(*pointMaps)[LANDAU_MAX_Q_FACE];
1416     P4estVertexMaps *maps;
1417     const PetscInt  *plex_batch = NULL, elMatSz = Nb * Nb * ctx->num_species * ctx->num_species;
1418     LandauIdx       *coo_elem_offsets = NULL, *coo_elem_fullNb = NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = NULL;
1419     /* create GPU assembly data */
1420     PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n", 1));
1421     PetscCall(PetscLogEventBegin(ctx->events[2], 0, 0, 0, 0));
1422     PetscCall(PetscMalloc(sizeof(*maps) * ctx->num_grids, &maps));
1423     PetscCall(PetscMalloc(sizeof(*pointMaps) * MAP_BF_SIZE, &pointMaps));
1424     PetscCall(PetscMalloc(sizeof(*elemMatrix) * elMatSz, &elemMatrix));
1425 
1426     {                                                                                                                             // setup COO assembly -- put COO metadata directly in ctx->SData_d
1427       PetscCall(PetscMalloc3(ncellsTot + 1, &coo_elem_offsets, ncellsTot, &coo_elem_fullNb, ncellsTot, &coo_elem_point_offsets)); // array of integer pointers
1428       coo_elem_offsets[0] = 0;                                                                                                    // finish later
1429       PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n", ncellsTot));
1430       ctx->SData_d.coo_n_cellsTot         = ncellsTot;
1431       ctx->SData_d.coo_elem_offsets       = (void *)coo_elem_offsets;
1432       ctx->SData_d.coo_elem_fullNb        = (void *)coo_elem_fullNb;
1433       ctx->SData_d.coo_elem_point_offsets = (void *)coo_elem_point_offsets;
1434     }
1435 
1436     ctx->SData_d.coo_max_fullnb = 0;
1437     for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1438       PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc * Nb;
1439       if (grid_batch_is_inv[grid]) PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch));
1440       PetscCheck(!plex_batch, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED");
1441       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1442       // make maps
1443       maps[grid].d_self       = NULL;
1444       maps[grid].num_elements = numCells[grid];
1445       maps[grid].num_face     = (PetscInt)(pow(Nq, 1. / ((double)dim)) + .001);                 // Q
1446       maps[grid].num_face     = (PetscInt)(pow(maps[grid].num_face, (double)(dim - 1)) + .001); // Q^2
1447       maps[grid].num_reduced  = 0;
1448       maps[grid].deviceType   = ctx->deviceType;
1449       maps[grid].numgrids     = ctx->num_grids;
1450       // count reduced and get
1451       PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx));
1452       for (int ej = cStart, eidx = 0; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1453         if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx + 1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1454         for (int fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1455           int fullNb = 0;
1456           for (int q = 0; q < Nb; ++q) {
1457             PetscInt     numindices, *indices;
1458             PetscScalar *valuesOrig = elMat = elemMatrix;
1459             PetscCall(PetscArrayzero(elMat, totDim * totDim));
1460             elMat[(fieldA * Nb + q) * totDim + fieldA * Nb + q] = 1;
1461             PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **)&elMat));
1462             if (ctx->simplex) {
1463               PetscCheck(numindices == Nb, ctx->comm, PETSC_ERR_ARG_WRONG, "numindices != Nb numindices=%d Nb=%d", (int)numindices, (int)Nb);
1464               for (int q = 0; q < numindices; ++q) { maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[q]; }
1465               fullNb++;
1466             } else {
1467               for (PetscInt f = 0; f < numindices; ++f) { // look for a non-zero on the diagonal (is this too complicated for simplices?)
1468                 if (PetscAbs(PetscRealPart(elMat[f * numindices + f])) > PETSC_MACHINE_EPSILON) {
1469                   // found it
1470                   if (PetscAbs(PetscRealPart(elMat[f * numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1471                     if (plex_batch) {
1472                       maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)plex_batch[indices[f]];
1473                     } else {
1474                       maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f];
1475                     }
1476                     fullNb++;
1477                   } else { //found a constraint
1478                     int            jj                = 0;
1479                     PetscReal      sum               = 0;
1480                     const PetscInt ff                = f;
1481                     maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1
1482                     PetscCheck(!ctx->simplex, ctx->comm, PETSC_ERR_ARG_WRONG, "No constraints with simplex");
1483                     do {                                                                                              // constraints are continuous in Plex - exploit that here
1484                       int ii;                                                                                         // get 'scale'
1485                       for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1486                         if (ff + ii < numindices) {                                                                   // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1487                           pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f * numindices + ff + ii]);
1488                         }
1489                       }
1490                       sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1491                       // get 'gid'
1492                       if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1493                       else {
1494                         if (plex_batch) {
1495                           pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1496                         } else {
1497                           pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1498                         }
1499                         fullNb++;
1500                       }
1501                     } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1502                     while (jj < maps[grid].num_face) {
1503                       pointMaps[maps[grid].num_reduced][jj].scale = 0;
1504                       pointMaps[maps[grid].num_reduced][jj].gid   = -1;
1505                       jj++;
1506                     }
1507                     if (PetscAbs(sum - 1.0) > 10 * PETSC_MACHINE_EPSILON) { // debug
1508                       int       d, f;
1509                       PetscReal tmp = 0;
1510                       PetscCall(PetscPrintf(PETSC_COMM_SELF, "\t\t%d.%d.%d) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%d)\n", eidx, q, fieldA, (double)sum, LANDAU_MAX_Q_FACE, maps[grid].num_face));
1511                       for (d = 0, tmp = 0; d < numindices; ++d) {
1512                         if (tmp != 0 && PetscAbs(tmp - 1.0) > 10 * PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3d) %3" PetscInt_FMT ": ", d, indices[d]));
1513                         for (f = 0; f < numindices; ++f) tmp += PetscRealPart(elMat[d * numindices + f]);
1514                         if (tmp != 0) PetscCall(PetscPrintf(ctx->comm, " | %22.16e\n", (double)tmp));
1515                       }
1516                     }
1517                     maps[grid].num_reduced++;
1518                     PetscCheck(maps[grid].num_reduced < MAP_BF_SIZE, PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %" PetscInt_FMT, maps[grid].num_reduced, MAP_BF_SIZE);
1519                   }
1520                   break;
1521                 }
1522               }
1523             } // !simplex
1524             // cleanup
1525             PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **)&elMat));
1526             if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices * numindices, MPIU_SCALAR, &elMat));
1527           }
1528           {                                                        // setup COO assembly
1529             coo_elem_offsets[glb_elem_idx + 1] += fullNb * fullNb; // one species block, adds a block for each species, on this element in this grid
1530             if (fieldA == 0) {                                     // cache full Nb for this element, on this grid per species
1531               coo_elem_fullNb[glb_elem_idx] = fullNb;
1532               if (fullNb > ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1533             } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %d %d", coo_elem_fullNb[glb_elem_idx], fullNb);
1534           }
1535         } // field
1536       }   // cell
1537       // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1538       PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps));
1539       for (int ej = 0; ej < maps[grid].num_reduced; ++ej) {
1540         for (int q = 0; q < maps[grid].num_face; ++q) {
1541           maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1542           maps[grid].c_maps[ej][q].gid   = pointMaps[ej][q].gid;
1543         }
1544       }
1545 #if defined(PETSC_HAVE_KOKKOS)
1546       if (ctx->deviceType == LANDAU_KOKKOS) {
1547         PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, grid)); // implies Kokkos does
1548       }
1549 #endif
1550       if (plex_batch) {
1551         PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch));
1552         PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this
1553       }
1554     } /* grids */
1555     // finish COO
1556     { // setup COO assembly
1557       PetscInt *oor, *ooc;
1558       ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot] * ctx->batch_sz;
1559       PetscCall(PetscMalloc2(ctx->SData_d.coo_size, &oor, ctx->SData_d.coo_size, &ooc));
1560       for (int i = 0; i < ctx->SData_d.coo_size; i++) oor[i] = ooc[i] = -1;
1561       // get
1562       for (int grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1563         for (int ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1564           const int              fullNb           = coo_elem_fullNb[glb_elem_idx];
1565           const LandauIdx *const Idxs             = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1566           coo_elem_point_offsets[glb_elem_idx][0] = 0;
1567           for (int f = 0, cnt2 = 0; f < Nb; f++) {
1568             int idx                                     = Idxs[f];
1569             coo_elem_point_offsets[glb_elem_idx][f + 1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1570             if (idx >= 0) {
1571               cnt2++;
1572               coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1573             } else {
1574               idx = -idx - 1;
1575               for (int q = 0; q < maps[grid].num_face; q++) {
1576                 if (maps[grid].c_maps[idx][q].gid < 0) break;
1577                 cnt2++;
1578                 coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1579               }
1580             }
1581             PetscCheck(cnt2 <= fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %d < %d", fullNb, cnt2);
1582           }
1583           PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %d != fullNb=%d", coo_elem_point_offsets[glb_elem_idx][Nb], fullNb);
1584         }
1585       }
1586       // set
1587       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1588         for (int grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1589           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1590           for (int ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1591             const int fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
1592             // set (i,j)
1593             for (int fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1594               const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1595               int                    rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
1596               for (int f = 0; f < Nb; ++f) {
1597                 const int nr = coo_elem_point_offsets[glb_elem_idx][f + 1] - coo_elem_point_offsets[glb_elem_idx][f];
1598                 if (nr == 1) rows[0] = Idxs[f];
1599                 else {
1600                   const int idx = -Idxs[f] - 1;
1601                   for (int q = 0; q < nr; q++) rows[q] = maps[grid].c_maps[idx][q].gid;
1602                 }
1603                 for (int g = 0; g < Nb; ++g) {
1604                   const int nc = coo_elem_point_offsets[glb_elem_idx][g + 1] - coo_elem_point_offsets[glb_elem_idx][g];
1605                   if (nc == 1) cols[0] = Idxs[g];
1606                   else {
1607                     const int idx = -Idxs[g] - 1;
1608                     for (int q = 0; q < nc; q++) cols[q] = maps[grid].c_maps[idx][q].gid;
1609                   }
1610                   const int idx0 = b_id * coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1611                   for (int q = 0, idx = idx0; q < nr; q++) {
1612                     for (int d = 0; d < nc; d++, idx++) {
1613                       oor[idx] = rows[q] + moffset;
1614                       ooc[idx] = cols[d] + moffset;
1615                     }
1616                   }
1617                 }
1618               }
1619             }
1620           } // cell
1621         }   // grid
1622       }     // batch
1623       PetscCall(MatSetPreallocationCOO(ctx->J, ctx->SData_d.coo_size, oor, ooc));
1624       PetscCall(PetscFree2(oor, ooc));
1625     }
1626     PetscCall(PetscFree(pointMaps));
1627     PetscCall(PetscFree(elemMatrix));
1628     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
1629     PetscCall(PetscContainerSetPointer(container, (void *)maps));
1630     PetscCall(PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy));
1631     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "assembly_maps", (PetscObject)container));
1632     PetscCall(PetscContainerDestroy(&container));
1633     PetscCall(PetscLogEventEnd(ctx->events[2], 0, 0, 0, 0));
1634   } // end GPU assembly
1635   { /* create static point data, Jacobian called first, only one vertex copy */
1636     PetscReal *invJe, *ww, *xx, *yy, *zz = NULL, *invJ_a;
1637     PetscInt   outer_ipidx, outer_ej, grid, nip_glb = 0;
1638     PetscFE    fe;
1639     PetscCall(PetscLogEventBegin(ctx->events[7], 0, 0, 0, 0));
1640     PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n"));
1641     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) nip_glb += Nq * numCells[grid];
1642     /* collect f data, first time is for Jacobian, but make mass now */
1643     if (ctx->verbose != 0) {
1644       PetscInt ncells = 0, N;
1645       PetscCall(MatGetSize(ctx->J, &N, NULL));
1646       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ncells += numCells[grid];
1647       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n", 0, "FormLandau", nip_glb, ncells, Nb, Nq, dim, Nb,
1648                             ctx->num_species, Nb, dim, N));
1649     }
1650     PetscCall(PetscMalloc4(nip_glb, &ww, nip_glb, &xx, nip_glb, &yy, nip_glb * dim * dim, &invJ_a));
1651     if (dim == 3) PetscCall(PetscMalloc1(nip_glb, &zz));
1652     if (ctx->use_energy_tensor_trick) {
1653       PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, ctx->simplex, NULL, PETSC_DECIDE, &fe));
1654       PetscCall(PetscObjectSetName((PetscObject)fe, "energy"));
1655     }
1656     /* init each grids static data - no batch */
1657     for (grid = 0, outer_ipidx = 0, outer_ej = 0; grid < ctx->num_grids; grid++) { // OpenMP (once)
1658       Vec          v2_2 = NULL;                                                    // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1659       PetscSection e_section;
1660       DM           dmEnergy;
1661       PetscInt     cStart, cEnd, ej;
1662 
1663       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1664       // prep energy trick, get v^2 / 2 vector
1665       if (ctx->use_energy_tensor_trick) {
1666         PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1667         Vec        glob_v2;
1668         PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};
1669 
1670         PetscCall(DMClone(ctx->plex[grid], &dmEnergy));
1671         PetscCall(PetscObjectSetName((PetscObject)dmEnergy, "energy"));
1672         PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe));
1673         PetscCall(DMCreateDS(dmEnergy));
1674         PetscCall(DMGetSection(dmEnergy, &e_section));
1675         PetscCall(DMGetGlobalVector(dmEnergy, &glob_v2));
1676         PetscCall(PetscObjectSetName((PetscObject)glob_v2, "trick"));
1677         c2_0[0] = &data[0];
1678         PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void **)c2_0, INSERT_ALL_VALUES, glob_v2));
1679         PetscCall(DMGetLocalVector(dmEnergy, &v2_2));
1680         PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */
1681         PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1682         PetscCall(DMGlobalToLocalEnd(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1683         PetscCall(DMViewFromOptions(dmEnergy, NULL, "-energy_dm_view"));
1684         PetscCall(VecViewFromOptions(glob_v2, NULL, "-energy_vec_view"));
1685         PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2));
1686       }
1687       /* append part of the IP data for each grid */
1688       for (ej = 0; ej < numCells[grid]; ++ej, ++outer_ej) {
1689         PetscScalar *coefs = NULL;
1690         PetscReal    vj[LANDAU_MAX_NQND * LANDAU_DIM], detJj[LANDAU_MAX_NQND], Jdummy[LANDAU_MAX_NQND * LANDAU_DIM * LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1691         invJe = invJ_a + outer_ej * Nq * dim * dim;
1692         PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej + cStart, quad, vj, Jdummy, invJe, detJj));
1693         if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs));
1694         /* create static point data */
1695         for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1696           const PetscInt   gidx = outer_ipidx;
1697           const PetscReal *invJ = &invJe[qj * dim * dim];
1698           ww[gidx]              = detJj[qj] * quadWeights[qj];
1699           if (dim == 2) ww[gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */
1700           // get xx, yy, zz
1701           if (ctx->use_energy_tensor_trick) {
1702             double                 refSpaceDer[3], eGradPhi[3];
1703             const PetscReal *const DD = Tf[0]->T[1];
1704             const PetscReal       *Dq = &DD[qj * Nb * dim];
1705             for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1706             for (int b = 0; b < Nb; ++b) {
1707               for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b * dim + d] * PetscRealPart(coefs[b]);
1708             }
1709             xx[gidx] = 1e10;
1710             if (ctx->use_relativistic_corrections) {
1711               double dg2_c2 = 0;
1712               //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1713               for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1714               dg2_c2 *= (double)c02;
1715               if (dg2_c2 >= .999) {
1716                 xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1717                 yy[gidx] = vj[qj * dim + 1];
1718                 if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1719                 PetscCall(PetscPrintf(ctx->comm, "Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n", (double)PetscSqrtReal(xx[gidx] * xx[gidx] + yy[gidx] * yy[gidx] + zz[gidx] * zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx]));
1720               } else {
1721                 PetscReal fact = c02 / PetscSqrtReal(1. - dg2_c2);
1722                 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1723                 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1724               }
1725             }
1726             if (xx[gidx] == 1e10) {
1727               for (int d = 0; d < dim; ++d) {
1728                 for (int e = 0; e < dim; ++e) eGradPhi[d] += invJ[e * dim + d] * refSpaceDer[e];
1729               }
1730               xx[gidx] = eGradPhi[0];
1731               yy[gidx] = eGradPhi[1];
1732               if (dim == 3) zz[gidx] = eGradPhi[2];
1733             }
1734           } else {
1735             xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1736             yy[gidx] = vj[qj * dim + 1];
1737             if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1738           }
1739         } /* q */
1740         if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs));
1741       } /* ej */
1742       if (ctx->use_energy_tensor_trick) {
1743         PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2));
1744         PetscCall(DMDestroy(&dmEnergy));
1745       }
1746     } /* grid */
1747     if (ctx->use_energy_tensor_trick) PetscCall(PetscFEDestroy(&fe));
1748     /* cache static data */
1749     if (ctx->deviceType == LANDAU_KOKKOS) {
1750 #if defined(PETSC_HAVE_KOKKOS)
1751       PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, Nb, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, (PetscReal *)ctx->lambdas, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1752 #else
1753       SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type kokkos not built");
1754 #endif
1755       /* free */
1756       PetscCall(PetscFree4(ww, xx, yy, invJ_a));
1757       if (dim == 3) PetscCall(PetscFree(zz));
1758     } else {                                                                                                                                                                   /* CPU version, just copy in, only use part */
1759       PetscReal *nu_alpha_p = (PetscReal *)ctx->SData_d.alpha, *nu_beta_p = (PetscReal *)ctx->SData_d.beta, *invMass_p = (PetscReal *)ctx->SData_d.invMass, *lambdas_p = NULL; // why set these ?
1760       ctx->SData_d.w    = (void *)ww;
1761       ctx->SData_d.x    = (void *)xx;
1762       ctx->SData_d.y    = (void *)yy;
1763       ctx->SData_d.z    = (void *)zz;
1764       ctx->SData_d.invJ = (void *)invJ_a;
1765       PetscCall(PetscMalloc4(ctx->num_species, &nu_alpha_p, ctx->num_species, &nu_beta_p, ctx->num_species, &invMass_p, LANDAU_MAX_GRIDS * LANDAU_MAX_GRIDS, &lambdas_p));
1766       for (PetscInt ii = 0; ii < ctx->num_species; ii++) {
1767         nu_alpha_p[ii] = nu_alpha[ii];
1768         nu_beta_p[ii]  = nu_beta[ii];
1769         invMass_p[ii]  = invMass[ii];
1770       }
1771       ctx->SData_d.alpha   = (void *)nu_alpha_p;
1772       ctx->SData_d.beta    = (void *)nu_beta_p;
1773       ctx->SData_d.invMass = (void *)invMass_p;
1774       ctx->SData_d.lambdas = (void *)lambdas_p;
1775       for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1776         PetscReal(*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal(*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas;
1777         for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) { (*lambdas)[grid][gridj] = ctx->lambdas[grid][gridj]; }
1778       }
1779     }
1780     PetscCall(PetscLogEventEnd(ctx->events[7], 0, 0, 0, 0));
1781   } // initialize
1782   PetscFunctionReturn(PETSC_SUCCESS);
1783 }
1784 
1785 /* < v, u > */
1786 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1787 {
1788   g0[0] = 1.;
1789 }
1790 
1791 /* < v, u > */
1792 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1793 {
1794   static double ttt = 1e-12;
1795   g0[0]             = ttt++;
1796 }
1797 
1798 /* < v, u > */
1799 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1800 {
1801   g0[0] = 2. * PETSC_PI * x[0];
1802 }
1803 
1804 static PetscErrorCode MatrixNfDestroy(void *ptr)
1805 {
1806   PetscInt *nf = (PetscInt *)ptr;
1807   PetscFunctionBegin;
1808   PetscCall(PetscFree(nf));
1809   PetscFunctionReturn(PETSC_SUCCESS);
1810 }
1811 
1812 /*
1813  LandauCreateJacobianMatrix - creates ctx->J with without real data. Hard to keep sparse.
1814   - Like DMPlexLandauCreateMassMatrix. Should remove one and combine
1815   - has old support for field major ordering
1816  */
1817 static PetscErrorCode LandauCreateJacobianMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx)
1818 {
1819   PetscInt *idxs = NULL;
1820   Mat       subM[LANDAU_MAX_GRIDS];
1821 
1822   PetscFunctionBegin;
1823   if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1824     PetscFunctionReturn(PETSC_SUCCESS);
1825   }
1826   // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is' -- not used
1827   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, &idxs));
1828   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1829     const PetscInt *values, n = ctx->mat_offset[grid + 1] - ctx->mat_offset[grid];
1830     Mat             gMat;
1831     DM              massDM;
1832     PetscDS         prob;
1833     Vec             tvec;
1834     // get "mass" matrix for reordering
1835     PetscCall(DMClone(ctx->plex[grid], &massDM));
1836     PetscCall(DMCopyFields(ctx->plex[grid], massDM));
1837     PetscCall(DMCreateDS(massDM));
1838     PetscCall(DMGetDS(massDM, &prob));
1839     for (int ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL));
1840     PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); // this trick is need to both sparsify the matrix and avoid runtime error
1841     PetscCall(DMCreateMatrix(massDM, &gMat));
1842     PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
1843     PetscCall(MatSetOption(gMat, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
1844     PetscCall(MatSetOption(gMat, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
1845     PetscCall(DMCreateLocalVector(ctx->plex[grid], &tvec));
1846     PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx));
1847     PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view"));
1848     PetscCall(DMDestroy(&massDM));
1849     PetscCall(VecDestroy(&tvec));
1850     subM[grid] = gMat;
1851     if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1852       MatOrderingType rtype = MATORDERINGRCM;
1853       IS              isrow, isicol;
1854       PetscCall(MatGetOrdering(gMat, rtype, &isrow, &isicol));
1855       PetscCall(ISInvertPermutation(isrow, PETSC_DECIDE, &grid_batch_is_inv[grid]));
1856       PetscCall(ISGetIndices(isrow, &values));
1857       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
1858 #if !defined(LANDAU_SPECIES_MAJOR)
1859         PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id * N;
1860         for (int ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1861 #else
1862         PetscInt n0 = ctx->mat_offset[grid] * ctx->batch_sz + b_id * n;
1863         for (int ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1864 #endif
1865       }
1866       PetscCall(ISRestoreIndices(isrow, &values));
1867       PetscCall(ISDestroy(&isrow));
1868       PetscCall(ISDestroy(&isicol));
1869     }
1870   }
1871   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(ISCreateGeneral(comm, ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, idxs, PETSC_OWN_POINTER, &ctx->batch_is));
1872   // get a block matrix
1873   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1874     Mat      B = subM[grid];
1875     PetscInt nloc, nzl, *colbuf, row, COL_BF_SIZE = 1024;
1876     PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
1877     PetscCall(MatGetSize(B, &nloc, NULL));
1878     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1879       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1880       const PetscInt    *cols;
1881       const PetscScalar *vals;
1882       for (int i = 0; i < nloc; i++) {
1883         PetscCall(MatGetRow(B, i, &nzl, NULL, NULL));
1884         if (nzl > COL_BF_SIZE) {
1885           PetscCall(PetscFree(colbuf));
1886           PetscCall(PetscInfo(ctx->plex[grid], "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl));
1887           COL_BF_SIZE = nzl;
1888           PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
1889         }
1890         PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
1891         for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
1892         row = i + moffset;
1893         PetscCall(MatSetValues(ctx->J, 1, &row, nzl, colbuf, vals, INSERT_VALUES));
1894         PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
1895       }
1896     }
1897     PetscCall(PetscFree(colbuf));
1898   }
1899   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid]));
1900   PetscCall(MatAssemblyBegin(ctx->J, MAT_FINAL_ASSEMBLY));
1901   PetscCall(MatAssemblyEnd(ctx->J, MAT_FINAL_ASSEMBLY));
1902 
1903   // debug
1904   PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view"));
1905   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1906     Mat mat_block_order;
1907     PetscCall(MatCreateSubMatrix(ctx->J, ctx->batch_is, ctx->batch_is, MAT_INITIAL_MATRIX, &mat_block_order)); // use MatPermute
1908     PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_mat_view"));
1909     PetscCall(MatDestroy(&mat_block_order));
1910     PetscCall(VecScatterCreate(X, ctx->batch_is, X, NULL, &ctx->plex_batch));
1911     PetscCall(VecDuplicate(X, &ctx->work_vec));
1912   }
1913 
1914   PetscFunctionReturn(PETSC_SUCCESS);
1915 }
1916 
1917 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
1918 /*@C
1919  DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh
1920 
1921  Collective
1922 
1923  Input Parameters:
1924  +   comm  - The MPI communicator
1925  .   dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
1926  -   prefix - prefix for options (not tested)
1927 
1928  Output Parameter:
1929  .   pack  - The DM object representing the mesh
1930  +   X - A vector (user destroys)
1931  -   J - Optional matrix (object destroys)
1932 
1933  Level: beginner
1934 
1935  .keywords: mesh
1936 .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()`
1937  @*/
1938 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
1939 {
1940   LandauCtx *ctx;
1941   Vec        Xsub[LANDAU_MAX_GRIDS];
1942   IS         grid_batch_is_inv[LANDAU_MAX_GRIDS];
1943 
1944   PetscFunctionBegin;
1945   PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
1946   PetscCheck(LANDAU_DIM == dim, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d", dim, LANDAU_DIM);
1947   PetscCall(PetscNew(&ctx));
1948   ctx->comm = comm; /* used for diagnostics and global errors */
1949   /* process options */
1950   PetscCall(ProcessOptions(ctx, prefix));
1951   if (dim == 2) ctx->use_relativistic_corrections = PETSC_FALSE;
1952   /* Create Mesh */
1953   PetscCall(DMCompositeCreate(PETSC_COMM_SELF, pack));
1954   PetscCall(PetscLogEventBegin(ctx->events[13], 0, 0, 0, 0));
1955   PetscCall(PetscLogEventBegin(ctx->events[15], 0, 0, 0, 0));
1956   PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR)
1957   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1958     /* create FEM */
1959     PetscCall(SetupDS(ctx->plex[grid], dim, grid, ctx));
1960     /* set initial state */
1961     PetscCall(DMCreateGlobalVector(ctx->plex[grid], &Xsub[grid]));
1962     PetscCall(PetscObjectSetName((PetscObject)Xsub[grid], "u_orig"));
1963     /* initial static refinement, no solve */
1964     PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx));
1965     /* forest refinement - forest goes in (if forest), plex comes out */
1966     if (ctx->use_p4est) {
1967       DM plex;
1968       PetscCall(adapt(grid, ctx, &Xsub[grid]));                                      // forest goes in, plex comes out
1969       PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, "-dm_landau_amr_dm_view")); // need to differentiate - todo
1970       PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view"));
1971       // convert to plex, all done with this level
1972       PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex));
1973       PetscCall(DMDestroy(&ctx->plex[grid]));
1974       ctx->plex[grid] = plex;
1975     }
1976 #if !defined(LANDAU_SPECIES_MAJOR)
1977     PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
1978 #else
1979     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
1980       PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
1981     }
1982 #endif
1983     PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx));
1984   }
1985 #if !defined(LANDAU_SPECIES_MAJOR)
1986   // stack the batched DMs, could do it all here!!! b_id=0
1987   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
1988     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
1989   }
1990 #endif
1991   // create ctx->mat_offset
1992   ctx->mat_offset[0] = 0;
1993   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1994     PetscInt n;
1995     PetscCall(VecGetLocalSize(Xsub[grid], &n));
1996     ctx->mat_offset[grid + 1] = ctx->mat_offset[grid] + n;
1997   }
1998   // creat DM & Jac
1999   PetscCall(DMSetApplicationContext(*pack, ctx));
2000   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only"));
2001   PetscCall(DMCreateMatrix(*pack, &ctx->J));
2002   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
2003   PetscCall(MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2004   PetscCall(MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2005   PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac"));
2006   // construct initial conditions in X
2007   PetscCall(DMCreateGlobalVector(*pack, X));
2008   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2009     PetscInt n;
2010     PetscCall(VecGetLocalSize(Xsub[grid], &n));
2011     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2012       PetscScalar const *values;
2013       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2014       PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx->batch_sz, ctx));
2015       PetscCall(VecGetArrayRead(Xsub[grid], &values)); // Drop whole grid in Plex ordering
2016       for (int i = 0, idx = moffset; i < n; i++, idx++) PetscCall(VecSetValue(*X, idx, values[i], INSERT_VALUES));
2017       PetscCall(VecRestoreArrayRead(Xsub[grid], &values));
2018     }
2019   }
2020   // cleanup
2021   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(VecDestroy(&Xsub[grid]));
2022   /* check for correct matrix type */
2023   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2024     PetscBool flg;
2025     if (ctx->deviceType == LANDAU_KOKKOS) {
2026       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, MATAIJKOKKOS, ""));
2027 #if defined(PETSC_HAVE_KOKKOS)
2028       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2029 #else
2030       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2031 #endif
2032     }
2033   }
2034   PetscCall(PetscLogEventEnd(ctx->events[15], 0, 0, 0, 0));
2035 
2036   // create field major ordering
2037   ctx->work_vec   = NULL;
2038   ctx->plex_batch = NULL;
2039   ctx->batch_is   = NULL;
2040   for (int i = 0; i < LANDAU_MAX_GRIDS; i++) grid_batch_is_inv[i] = NULL;
2041   PetscCall(PetscLogEventBegin(ctx->events[12], 0, 0, 0, 0));
2042   PetscCall(LandauCreateJacobianMatrix(comm, *X, grid_batch_is_inv, ctx));
2043   PetscCall(PetscLogEventEnd(ctx->events[12], 0, 0, 0, 0));
2044 
2045   // create AMR GPU assembly maps and static GPU data
2046   PetscCall(CreateStaticData(dim, grid_batch_is_inv, ctx));
2047 
2048   PetscCall(PetscLogEventEnd(ctx->events[13], 0, 0, 0, 0));
2049 
2050   // create mass matrix
2051   PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL));
2052 
2053   if (J) *J = ctx->J;
2054 
2055   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2056     PetscContainer container;
2057     // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2058     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2059     PetscCall(PetscContainerSetPointer(container, (void *)ctx));
2060     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "LandauCtx", (PetscObject)container));
2061     PetscCall(PetscContainerDestroy(&container));
2062     // batch solvers need to map -- can batch solvers work
2063     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2064     PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch));
2065     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "plex_batch_is", (PetscObject)container));
2066     PetscCall(PetscContainerDestroy(&container));
2067   }
2068   // for batch solvers
2069   {
2070     PetscContainer container;
2071     PetscInt      *pNf;
2072     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2073     PetscCall(PetscMalloc1(sizeof(*pNf), &pNf));
2074     *pNf = ctx->batch_sz;
2075     PetscCall(PetscContainerSetPointer(container, (void *)pNf));
2076     PetscCall(PetscContainerSetUserDestroy(container, MatrixNfDestroy));
2077     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject)container));
2078     PetscCall(PetscContainerDestroy(&container));
2079   }
2080 
2081   PetscFunctionReturn(PETSC_SUCCESS);
2082 }
2083 
2084 /*@
2085  DMPlexLandauAccess - Access to the distribution function with user callback
2086 
2087  Collective
2088 
2089  Input Parameters:
2090  .   pack - the DMComposite
2091  +   func - call back function
2092  .   user_ctx - user context
2093 
2094  Input/Output Parameter:
2095  .   X - Vector to data to
2096 
2097  Level: advanced
2098 
2099  .keywords: mesh
2100 .seealso: `DMPlexLandauCreateVelocitySpace()`
2101  @*/
2102 PetscErrorCode DMPlexLandauAccess(DM pack, Vec X, PetscErrorCode (*func)(DM, Vec, PetscInt, PetscInt, PetscInt, void *), void *user_ctx)
2103 {
2104   LandauCtx *ctx;
2105   PetscFunctionBegin;
2106   PetscCall(DMGetApplicationContext(pack, &ctx)); // uses ctx->num_grids; ctx->plex[grid]; ctx->batch_sz; ctx->mat_offset
2107   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2108     PetscInt dim, n;
2109     PetscCall(DMGetDimension(pack, &dim));
2110     for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) {
2111       Vec      vec;
2112       PetscInt vf[1] = {i0};
2113       IS       vis;
2114       DM       vdm;
2115       PetscCall(DMCreateSubDM(ctx->plex[grid], 1, vf, &vis, &vdm));
2116       PetscCall(DMSetApplicationContext(vdm, ctx)); // the user might want this
2117       PetscCall(DMCreateGlobalVector(vdm, &vec));
2118       PetscCall(VecGetSize(vec, &n));
2119       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2120         const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2121         PetscCall(VecZeroEntries(vec));
2122         /* Add your data with 'dm' for species 'sp' to 'vec' */
2123         PetscCall(func(vdm, vec, i0, grid, b_id, user_ctx));
2124         /* add to global */
2125         PetscScalar const *values;
2126         const PetscInt    *offsets;
2127         PetscCall(VecGetArrayRead(vec, &values));
2128         PetscCall(ISGetIndices(vis, &offsets));
2129         for (int i = 0; i < n; i++) PetscCall(VecSetValue(X, moffset + offsets[i], values[i], ADD_VALUES));
2130         PetscCall(VecRestoreArrayRead(vec, &values));
2131         PetscCall(ISRestoreIndices(vis, &offsets));
2132       } // batch
2133       PetscCall(VecDestroy(&vec));
2134       PetscCall(ISDestroy(&vis));
2135       PetscCall(DMDestroy(&vdm));
2136     }
2137   } // grid
2138   PetscFunctionReturn(PETSC_SUCCESS);
2139 }
2140 
2141 /*@
2142  DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh
2143 
2144  Collective
2145 
2146  Input/Output Parameters:
2147  .   dm - the dm to destroy
2148 
2149  Level: beginner
2150 
2151  .keywords: mesh
2152 .seealso: `DMPlexLandauCreateVelocitySpace()`
2153  @*/
2154 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm)
2155 {
2156   LandauCtx *ctx;
2157   PetscFunctionBegin;
2158   PetscCall(DMGetApplicationContext(*dm, &ctx));
2159   PetscCall(MatDestroy(&ctx->M));
2160   PetscCall(MatDestroy(&ctx->J));
2161   for (PetscInt ii = 0; ii < ctx->num_species; ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii]));
2162   PetscCall(ISDestroy(&ctx->batch_is));
2163   PetscCall(VecDestroy(&ctx->work_vec));
2164   PetscCall(VecScatterDestroy(&ctx->plex_batch));
2165   if (ctx->deviceType == LANDAU_KOKKOS) {
2166 #if defined(PETSC_HAVE_KOKKOS)
2167     PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d));
2168 #else
2169     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
2170 #endif
2171   } else {
2172     if (ctx->SData_d.x) { /* in a CPU run */
2173       PetscReal *invJ = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
2174       LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = (LandauIdx(*)[LANDAU_MAX_NQND + 1]) ctx->SData_d.coo_elem_point_offsets;
2175       PetscCall(PetscFree4(ww, xx, yy, invJ));
2176       if (zz) PetscCall(PetscFree(zz));
2177       if (coo_elem_offsets) {
2178         PetscCall(PetscFree3(coo_elem_offsets, coo_elem_fullNb, coo_elem_point_offsets)); // could be NULL
2179       }
2180       PetscCall(PetscFree4(ctx->SData_d.alpha, ctx->SData_d.beta, ctx->SData_d.invMass, ctx->SData_d.lambdas));
2181     }
2182   }
2183 
2184   if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2185     PetscCall(PetscPrintf(ctx->comm, "TSStep               N  1.0 %10.3e\n", ctx->times[LANDAU_EX2_TSSOLVE]));
2186     PetscCall(PetscPrintf(ctx->comm, "2:           Solve:  %10.3e with %" PetscInt_FMT " threads\n", ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL], ctx->batch_sz));
2187     PetscCall(PetscPrintf(ctx->comm, "3:          Landau:  %10.3e\n", ctx->times[LANDAU_MATRIX_TOTAL]));
2188     PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian       %" PetscInt_FMT " 1.0 %10.3e\n", (PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT], ctx->times[LANDAU_JACOBIAN]));
2189     PetscCall(PetscPrintf(ctx->comm, "Landau Operator       N 1.0  %10.3e\n", ctx->times[LANDAU_OPERATOR]));
2190     PetscCall(PetscPrintf(ctx->comm, "Landau Mass           N 1.0  %10.3e\n", ctx->times[LANDAU_MASS]));
2191     PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU)       N 1.0  %10.3e\n", ctx->times[LANDAU_F_DF]));
2192     PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU)         N 1.0  %10.3e\n", ctx->times[LANDAU_KERNEL]));
2193     PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum        X 1.0 %10.3e\n", ctx->times[KSP_FACTOR]));
2194     PetscCall(PetscPrintf(ctx->comm, "MatSolve              X 1.0 %10.3e\n", ctx->times[KSP_SOLVE]));
2195   }
2196   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMDestroy(&ctx->plex[grid]));
2197   PetscCall(PetscFree(ctx));
2198   PetscCall(DMDestroy(dm));
2199   PetscFunctionReturn(PETSC_SUCCESS);
2200 }
2201 
2202 /* < v, ru > */
2203 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2204 {
2205   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2206   f0[0]       = u[ii];
2207 }
2208 
2209 /* < v, ru > */
2210 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2211 {
2212   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2213   f0[0] = x[jj] * u[ii]; /* x momentum */
2214 }
2215 
2216 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2217 {
2218   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2219   double   tmp1 = 0.;
2220   for (i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2221   f0[0] = tmp1 * u[ii];
2222 }
2223 
2224 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
2225 {
2226   const PetscReal *c2_0_arr = ((PetscReal *)actx);
2227   const PetscReal  c02      = c2_0_arr[0];
2228 
2229   PetscFunctionBegin;
2230   for (int s = 0; s < Nf; s++) {
2231     PetscReal tmp1 = 0.;
2232     for (int i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2233 #if defined(PETSC_USE_DEBUG)
2234     u[s] = PetscSqrtReal(1. + tmp1 / c02); //  u[0] = PetscSqrtReal(1. + xx);
2235 #else
2236     {
2237       PetscReal xx = tmp1 / c02;
2238       u[s]         = xx / (PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2239     }
2240 #endif
2241   }
2242   PetscFunctionReturn(PETSC_SUCCESS);
2243 }
2244 
2245 /* < v, ru > */
2246 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2247 {
2248   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2249   f0[0]       = 2. * PETSC_PI * x[0] * u[ii];
2250 }
2251 
2252 /* < v, ru > */
2253 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2254 {
2255   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2256   f0[0]       = 2. * PETSC_PI * x[0] * x[1] * u[ii];
2257 }
2258 
2259 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2260 {
2261   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2262   f0[0]       = 2. * PETSC_PI * x[0] * (x[0] * x[0] + x[1] * x[1]) * u[ii];
2263 }
2264 
2265 /*@
2266  DMPlexLandauPrintNorms - collects moments and prints them
2267 
2268  Collective
2269 
2270  Input Parameters:
2271  +   X  - the state
2272  -   stepi - current step to print
2273 
2274  Level: beginner
2275 
2276  .keywords: mesh
2277 .seealso: `DMPlexLandauCreateVelocitySpace()`
2278  @*/
2279 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi)
2280 {
2281   LandauCtx  *ctx;
2282   PetscDS     prob;
2283   DM          pack;
2284   PetscInt    cStart, cEnd, dim, ii, i0, nDMs;
2285   PetscScalar xmomentumtot = 0, ymomentumtot = 0, zmomentumtot = 0, energytot = 0, densitytot = 0, tt[LANDAU_MAX_SPECIES];
2286   PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2287   Vec        *globXArray;
2288 
2289   PetscFunctionBegin;
2290   PetscCall(VecGetDM(X, &pack));
2291   PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM");
2292   PetscCall(DMGetDimension(pack, &dim));
2293   PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]", dim);
2294   PetscCall(DMGetApplicationContext(pack, &ctx));
2295   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2296   /* print momentum and energy */
2297   PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
2298   PetscCheck(nDMs == ctx->num_grids * ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT, nDMs, ctx->num_grids * ctx->batch_sz);
2299   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
2300   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
2301   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2302     Vec Xloc = globXArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2303     PetscCall(DMGetDS(ctx->plex[grid], &prob));
2304     for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
2305       PetscScalar user[2] = {(PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
2306       PetscCall(PetscDSSetConstants(prob, 2, user));
2307       if (dim == 2) { /* 2/3X + 3V (cylindrical coordinates) */
2308         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden));
2309         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2310         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2311         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom));
2312         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2313         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2314         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2));
2315         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2316         energy[ii] = tt[0] * 0.5 * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2317         zmomentumtot += zmomentum[ii];
2318         energytot += energy[ii];
2319         densitytot += density[ii];
2320         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii])));
2321       } else { /* 2/3Xloc + 3V */
2322         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den));
2323         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2324         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2325         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom));
2326         user[1] = 0;
2327         PetscCall(PetscDSSetConstants(prob, 2, user));
2328         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2329         xmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2330         user[1]       = 1;
2331         PetscCall(PetscDSSetConstants(prob, 2, user));
2332         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2333         ymomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2334         user[1]       = 2;
2335         PetscCall(PetscDSSetConstants(prob, 2, user));
2336         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2337         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2338         if (ctx->use_relativistic_corrections) {
2339           /* gamma * M * f */
2340           if (ii == 0 && grid == 0) { // do all at once
2341             Vec Mf, globGamma, *globMfArray, *globGammaArray;
2342             PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {gamma_n_f};
2343             PetscReal *c2_0[1], data[1];
2344 
2345             PetscCall(VecDuplicate(X, &globGamma));
2346             PetscCall(VecDuplicate(X, &Mf));
2347             PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globMfArray));
2348             PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globGammaArray));
2349             /* M * f */
2350             PetscCall(MatMult(ctx->M, X, Mf));
2351             /* gamma */
2352             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2353             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2354               Vec v1  = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2355               data[0] = PetscSqr(C_0(ctx->v_0));
2356               c2_0[0] = &data[0];
2357               PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void **)c2_0, INSERT_ALL_VALUES, v1));
2358             }
2359             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2360             /* gamma * Mf */
2361             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2362             PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2363             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice
2364               PetscInt Nf    = ctx->species_offset[grid + 1] - ctx->species_offset[grid], N, bs;
2365               Vec      Mfsub = globMfArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], Gsub = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], v1, v2;
2366               // get each component
2367               PetscCall(VecGetSize(Mfsub, &N));
2368               PetscCall(VecCreate(ctx->comm, &v1));
2369               PetscCall(VecSetSizes(v1, PETSC_DECIDE, N / Nf));
2370               PetscCall(VecCreate(ctx->comm, &v2));
2371               PetscCall(VecSetSizes(v2, PETSC_DECIDE, N / Nf));
2372               PetscCall(VecSetFromOptions(v1)); // ???
2373               PetscCall(VecSetFromOptions(v2));
2374               // get each component
2375               PetscCall(VecGetBlockSize(Gsub, &bs));
2376               PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub", bs, Nf);
2377               PetscCall(VecGetBlockSize(Mfsub, &bs));
2378               PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT, bs, Nf);
2379               for (int i = 0, ix = ctx->species_offset[grid]; i < Nf; i++, ix++) {
2380                 PetscScalar val;
2381                 PetscCall(VecStrideGather(Gsub, i, v1, INSERT_VALUES)); // this is not right -- TODO
2382                 PetscCall(VecStrideGather(Mfsub, i, v2, INSERT_VALUES));
2383                 PetscCall(VecDot(v1, v2, &val));
2384                 energy[ix] = PetscRealPart(val) * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ix];
2385               }
2386               PetscCall(VecDestroy(&v1));
2387               PetscCall(VecDestroy(&v2));
2388             } /* grids */
2389             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2390             PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2391             PetscCall(PetscFree(globGammaArray));
2392             PetscCall(PetscFree(globMfArray));
2393             PetscCall(VecDestroy(&globGamma));
2394             PetscCall(VecDestroy(&Mf));
2395           }
2396         } else {
2397           PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2));
2398           PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2399           energy[ii] = 0.5 * tt[0] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2400         }
2401         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(xmomentum[ii]), (double)PetscRealPart(ymomentum[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii])));
2402         xmomentumtot += xmomentum[ii];
2403         ymomentumtot += ymomentum[ii];
2404         zmomentumtot += zmomentum[ii];
2405         energytot += energy[ii];
2406         densitytot += density[ii];
2407       }
2408       if (ctx->num_species > 1) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
2409     }
2410   }
2411   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
2412   PetscCall(PetscFree(globXArray));
2413   /* totals */
2414   PetscCall(DMPlexGetHeightStratum(ctx->plex[0], 0, &cStart, &cEnd));
2415   if (ctx->num_species > 1) {
2416     if (dim == 2) {
2417       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2418                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2419     } else {
2420       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(xmomentumtot), (double)PetscRealPart(ymomentumtot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2421                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2422     }
2423   } else PetscCall(PetscPrintf(PETSC_COMM_WORLD, " -- %" PetscInt_FMT " cells", cEnd - cStart));
2424   PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
2425   PetscFunctionReturn(PETSC_SUCCESS);
2426 }
2427 
2428 /*@
2429  DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2430   - puts mass matrix into ctx->M
2431 
2432  Collective
2433 
2434  Input Parameter:
2435 . pack     - the DM object. Puts matrix in Landau context M field
2436 
2437  Output Parameter:
2438 . Amat - The mass matrix (optional), mass matrix is added to the DM context
2439 
2440  Level: beginner
2441 
2442  .keywords: mesh
2443 .seealso: `DMPlexLandauCreateVelocitySpace()`
2444  @*/
2445 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat)
2446 {
2447   DM         mass_pack, massDM[LANDAU_MAX_GRIDS];
2448   PetscDS    prob;
2449   PetscInt   ii, dim, N1 = 1, N2;
2450   LandauCtx *ctx;
2451   Mat        packM, subM[LANDAU_MAX_GRIDS];
2452 
2453   PetscFunctionBegin;
2454   PetscValidHeaderSpecific(pack, DM_CLASSID, 1);
2455   if (Amat) PetscValidPointer(Amat, 2);
2456   PetscCall(DMGetApplicationContext(pack, &ctx));
2457   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2458   PetscCall(PetscLogEventBegin(ctx->events[14], 0, 0, 0, 0));
2459   PetscCall(DMGetDimension(pack, &dim));
2460   PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject)pack), &mass_pack));
2461   /* create pack mass matrix */
2462   for (PetscInt grid = 0, ix = 0; grid < ctx->num_grids; grid++) {
2463     PetscCall(DMClone(ctx->plex[grid], &massDM[grid]));
2464     PetscCall(DMCopyFields(ctx->plex[grid], massDM[grid]));
2465     PetscCall(DMCreateDS(massDM[grid]));
2466     PetscCall(DMGetDS(massDM[grid], &prob));
2467     for (ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) {
2468       if (dim == 3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL));
2469       else PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL));
2470     }
2471 #if !defined(LANDAU_SPECIES_MAJOR)
2472     PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2473 #else
2474     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
2475       PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2476     }
2477 #endif
2478     PetscCall(DMCreateMatrix(massDM[grid], &subM[grid]));
2479   }
2480 #if !defined(LANDAU_SPECIES_MAJOR)
2481   // stack the batched DMs
2482   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
2483     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2484   }
2485 #endif
2486   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only"));
2487   PetscCall(DMCreateMatrix(mass_pack, &packM));
2488   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
2489   PetscCall(MatSetOption(packM, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2490   PetscCall(MatSetOption(packM, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2491   PetscCall(DMDestroy(&mass_pack));
2492   /* make mass matrix for each block */
2493   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2494     Vec locX;
2495     DM  plex = massDM[grid];
2496     PetscCall(DMGetLocalVector(plex, &locX));
2497     /* Mass matrix is independent of the input, so no need to fill locX */
2498     PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx));
2499     PetscCall(DMRestoreLocalVector(plex, &locX));
2500     PetscCall(DMDestroy(&massDM[grid]));
2501   }
2502   PetscCall(MatGetSize(ctx->J, &N1, NULL));
2503   PetscCall(MatGetSize(packM, &N2, NULL));
2504   PetscCheck(N1 == N2, PetscObjectComm((PetscObject)pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT, N1, N2);
2505   /* assemble block diagonals */
2506   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2507     Mat      B = subM[grid];
2508     PetscInt nloc, nzl, *colbuf, COL_BF_SIZE = 1024, row;
2509     PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
2510     PetscCall(MatGetSize(B, &nloc, NULL));
2511     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2512       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2513       const PetscInt    *cols;
2514       const PetscScalar *vals;
2515       for (int i = 0; i < nloc; i++) {
2516         PetscCall(MatGetRow(B, i, &nzl, NULL, NULL));
2517         if (nzl > COL_BF_SIZE) {
2518           PetscCall(PetscFree(colbuf));
2519           PetscCall(PetscInfo(pack, "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl));
2520           COL_BF_SIZE = nzl;
2521           PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
2522         }
2523         PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
2524         for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
2525         row = i + moffset;
2526         PetscCall(MatSetValues(packM, 1, &row, nzl, colbuf, vals, INSERT_VALUES));
2527         PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
2528       }
2529     }
2530     PetscCall(PetscFree(colbuf));
2531   }
2532   // cleanup
2533   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid]));
2534   PetscCall(MatAssemblyBegin(packM, MAT_FINAL_ASSEMBLY));
2535   PetscCall(MatAssemblyEnd(packM, MAT_FINAL_ASSEMBLY));
2536   PetscCall(PetscObjectSetName((PetscObject)packM, "mass"));
2537   PetscCall(MatViewFromOptions(packM, NULL, "-dm_landau_mass_view"));
2538   ctx->M = packM;
2539   if (Amat) *Amat = packM;
2540   PetscCall(PetscLogEventEnd(ctx->events[14], 0, 0, 0, 0));
2541   PetscFunctionReturn(PETSC_SUCCESS);
2542 }
2543 
2544 /*@
2545  DMPlexLandauIFunction - TS residual calculation, confusingly this computes the Jacobian w/o mass
2546 
2547  Collective
2548 
2549  Input Parameters:
2550 +   TS  - The time stepping context
2551 .   time_dummy - current time (not used)
2552 .   X - Current state
2553 .   X_t - Time derivative of current state
2554 -   actx - Landau context
2555 
2556  Output Parameter:
2557 .   F  - The residual
2558 
2559  Level: beginner
2560 
2561  .keywords: mesh
2562 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()`
2563  @*/
2564 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2565 {
2566   LandauCtx *ctx = (LandauCtx *)actx;
2567   PetscInt   dim;
2568   DM         pack;
2569 #if defined(PETSC_HAVE_THREADSAFETY)
2570   double starttime, endtime;
2571 #endif
2572   PetscObjectState state;
2573 
2574   PetscFunctionBegin;
2575   PetscCall(TSGetDM(ts, &pack));
2576   PetscCall(DMGetApplicationContext(pack, &ctx));
2577   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2578   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2579   PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0));
2580   PetscCall(PetscLogEventBegin(ctx->events[0], 0, 0, 0, 0));
2581 #if defined(PETSC_HAVE_THREADSAFETY)
2582   starttime = MPI_Wtime();
2583 #endif
2584   PetscCall(DMGetDimension(pack, &dim));
2585   PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2586   if (state != ctx->norm_state) {
2587     PetscCall(PetscInfo(ts, "Create Landau Jacobian t=%g J.state %" PetscInt64_FMT " --> %" PetscInt64_FMT "\n", (double)time_dummy, ctx->norm_state, state));
2588     PetscCall(MatZeroEntries(ctx->J));
2589     PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, 0.0, (void *)ctx));
2590     PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view"));
2591     PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2592     ctx->norm_state = state;
2593   } else {
2594     PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n", state));
2595   }
2596   /* mat vec for op */
2597   PetscCall(MatMult(ctx->J, X, F)); /* C*f */
2598   /* add time term */
2599   if (X_t) PetscCall(MatMultAdd(ctx->M, X_t, F, F));
2600 #if defined(PETSC_HAVE_THREADSAFETY)
2601   if (ctx->stage) {
2602     endtime = MPI_Wtime();
2603     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2604     ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2605     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2606     ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2607   }
2608 #endif
2609   PetscCall(PetscLogEventEnd(ctx->events[0], 0, 0, 0, 0));
2610   PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0));
2611   if (ctx->stage) PetscCall(PetscLogStagePop());
2612   PetscFunctionReturn(PETSC_SUCCESS);
2613 }
2614 
2615 /*@
2616  DMPlexLandauIJacobian - TS Jacobian construction, confusingly this adds mass
2617 
2618  Collective
2619 
2620  Input Parameters:
2621 +   TS  - The time stepping context
2622 .   time_dummy - current time (not used)
2623 .   X - Current state
2624 .   U_tdummy - Time derivative of current state (not used)
2625 .   shift - shift for du/dt term
2626 -   actx - Landau context
2627 
2628  Output Parameters:
2629 +   Amat  - Jacobian
2630 -   Pmat  - same as Amat
2631 
2632  Level: beginner
2633 
2634  .keywords: mesh
2635 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()`
2636  @*/
2637 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2638 {
2639   LandauCtx *ctx = NULL;
2640   PetscInt   dim;
2641   DM         pack;
2642 #if defined(PETSC_HAVE_THREADSAFETY)
2643   double starttime, endtime;
2644 #endif
2645   PetscObjectState state;
2646 
2647   PetscFunctionBegin;
2648   PetscCall(TSGetDM(ts, &pack));
2649   PetscCall(DMGetApplicationContext(pack, &ctx));
2650   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2651   PetscCheck(Amat == Pmat && Amat == ctx->J, ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2652   PetscCall(DMGetDimension(pack, &dim));
2653   /* get collision Jacobian into A */
2654   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2655   PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0));
2656   PetscCall(PetscLogEventBegin(ctx->events[9], 0, 0, 0, 0));
2657 #if defined(PETSC_HAVE_THREADSAFETY)
2658   starttime = MPI_Wtime();
2659 #endif
2660   PetscCall(PetscInfo(ts, "Adding mass to Jacobian t=%g, shift=%g\n", (double)time_dummy, (double)shift));
2661   PetscCheck(shift != 0.0, ctx->comm, PETSC_ERR_PLIB, "zero shift");
2662   PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2663   PetscCheck(state == ctx->norm_state, ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT "", ctx->norm_state, state);
2664   if (!ctx->use_matrix_mass) {
2665     PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, shift, (void *)ctx));
2666   } else { /* add mass */
2667     PetscCall(MatAXPY(Pmat, shift, ctx->M, SAME_NONZERO_PATTERN));
2668   }
2669 #if defined(PETSC_HAVE_THREADSAFETY)
2670   if (ctx->stage) {
2671     endtime = MPI_Wtime();
2672     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2673     ctx->times[LANDAU_MASS] += (endtime - starttime);
2674     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2675   }
2676 #endif
2677   PetscCall(PetscLogEventEnd(ctx->events[9], 0, 0, 0, 0));
2678   PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0));
2679   if (ctx->stage) PetscCall(PetscLogStagePop());
2680   PetscFunctionReturn(PETSC_SUCCESS);
2681 }
2682