xref: /petsc/src/ts/utils/dmplexlandau/plexland.c (revision d2522c19e8fa9bca20aaca277941d9a63e71db6a)
1 #include <../src/mat/impls/aij/seq/aij.h>
2 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/
3 #include <petsclandau.h>              /*I "petsclandau.h"   I*/
4 #include <petscts.h>
5 #include <petscdmforest.h>
6 #include <petscdmcomposite.h>
7 
8 /* Landau collision operator */
9 
10 /* relativistic terms */
11 #if defined(PETSC_USE_REAL_SINGLE)
12 #define SPEED_OF_LIGHT 2.99792458e8F
13 #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
14 #else
15 #define SPEED_OF_LIGHT 2.99792458e8
16 #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
17 #endif
18 
19 #define PETSC_THREAD_SYNC
20 #include "land_tensors.h"
21 
22 #if defined(PETSC_HAVE_OPENMP)
23 #include <omp.h>
24 #endif
25 
26 static PetscErrorCode LandauGPUMapsDestroy(void *ptr) {
27   P4estVertexMaps *maps = (P4estVertexMaps *)ptr;
28   PetscFunctionBegin;
29   // free device data
30   if (maps[0].deviceType != LANDAU_CPU) {
31 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
32     if (maps[0].deviceType == LANDAU_KOKKOS) {
33       PetscCall(LandauKokkosDestroyMatMaps(maps, maps[0].numgrids)); // imples Kokkos does
34     }                                                                // else could be CUDA
35 #elif defined(PETSC_HAVE_CUDA)
36     if (maps[0].deviceType == LANDAU_CUDA) {
37       PetscCall(LandauCUDADestroyMatMaps(maps, maps[0].numgrids));
38     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %d ?????", maps->deviceType);
39 #endif
40   }
41   // free host data
42   for (PetscInt grid = 0; grid < maps[0].numgrids; grid++) {
43     PetscCall(PetscFree(maps[grid].c_maps));
44     PetscCall(PetscFree(maps[grid].gIdx));
45   }
46   PetscCall(PetscFree(maps));
47 
48   PetscFunctionReturn(0);
49 }
50 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) {
51   PetscReal v2 = 0;
52   PetscFunctionBegin;
53   /* compute v^2 / 2 */
54   for (int i = 0; i < dim; ++i) v2 += x[i] * x[i];
55   /* evaluate the Maxwellian */
56   u[0] = v2 / 2;
57   PetscFunctionReturn(0);
58 }
59 
60 /* needs double */
61 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) {
62   PetscReal *c2_0_arr = ((PetscReal *)actx);
63   double     u2 = 0, c02 = (double)*c2_0_arr, xx;
64 
65   PetscFunctionBegin;
66   /* compute u^2 / 2 */
67   for (int i = 0; i < dim; ++i) u2 += x[i] * x[i];
68   /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
69   xx = u2 / c02;
70 #if defined(PETSC_USE_DEBUG)
71   u[0] = PetscSqrtReal(1. + xx);
72 #else
73   u[0] = xx / (PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
74 #endif
75   PetscFunctionReturn(0);
76 }
77 
78 /*
79  LandauFormJacobian_Internal - Evaluates Jacobian matrix.
80 
81  Input Parameters:
82  .  globX - input vector
83  .  actx - optional user-defined context
84  .  dim - dimension
85 
86  Output Parameters:
87  .  J0acP - Jacobian matrix filled, not created
88  */
89 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx) {
90   LandauCtx         *ctx = (LandauCtx *)a_ctx;
91   PetscInt           numCells[LANDAU_MAX_GRIDS], Nq, Nb;
92   PetscQuadrature    quad;
93   PetscReal          Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
94   PetscScalar       *cellClosure = NULL;
95   const PetscScalar *xdata       = NULL;
96   PetscDS            prob;
97   PetscContainer     container;
98   P4estVertexMaps   *maps;
99   Mat                subJ[LANDAU_MAX_GRIDS * LANDAU_MAX_BATCH_SZ];
100 
101   PetscFunctionBegin;
102   PetscValidHeaderSpecific(a_X, VEC_CLASSID, 1);
103   PetscValidHeaderSpecific(JacP, MAT_CLASSID, 2);
104   PetscValidPointer(ctx, 5);
105   /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
106   PetscCheck(ctx->plex[0] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
107   PetscCall(PetscLogEventBegin(ctx->events[10], 0, 0, 0, 0));
108   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
109   PetscCall(PetscObjectQuery((PetscObject)JacP, "assembly_maps", (PetscObject *)&container));
110   if (container) {
111     PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "maps but no GPU assembly");
112     PetscCall(PetscContainerGetPointer(container, (void **)&maps));
113     PetscCheck(maps, ctx->comm, PETSC_ERR_ARG_WRONG, "empty GPU matrix container");
114     for (PetscInt i = 0; i < ctx->num_grids * ctx->batch_sz; i++) subJ[i] = NULL;
115   } else {
116     PetscCheck(!ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "No maps but GPU assembly");
117     for (PetscInt tid = 0; tid < ctx->batch_sz; tid++) {
118       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[LAND_PACK_IDX(tid, grid)])); }
119     }
120     maps = NULL;
121   }
122   // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
123   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
124   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
125   Nb = Nq;
126   PetscCheck(Nq <= LANDAU_MAX_NQ, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)", Nq, LANDAU_MAX_NQ);
127   // get metadata for collecting dynamic data
128   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
129     PetscInt cStart, cEnd;
130     PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
131     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
132     numCells[grid] = cEnd - cStart; // grids can have different topology
133   }
134   PetscCall(PetscLogEventEnd(ctx->events[10], 0, 0, 0, 0));
135   if (shift == 0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
136     DM pack;
137     PetscCall(VecGetDM(a_X, &pack));
138     PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM");
139     PetscCall(PetscLogEventBegin(ctx->events[1], 0, 0, 0, 0));
140     for (PetscInt fieldA = 0; fieldA < ctx->num_species; fieldA++) {
141       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
142       if (dim == 2) Eq_m[fieldA] *= 2 * PETSC_PI;                                                  /* add the 2pi term that is not in Landau */
143     }
144     if (!ctx->gpu_assembly) {
145       Vec         *locXArray, *globXArray;
146       PetscScalar *cellClosure_it;
147       PetscInt     cellClosure_sz = 0, nDMs, Nf[LANDAU_MAX_GRIDS];
148       PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
149       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
150         PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
151         PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
152         PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
153       }
154       /* count cellClosure size */
155       PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
156       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) cellClosure_sz += Nb * Nf[grid] * numCells[grid];
157       PetscCall(PetscMalloc1(cellClosure_sz * ctx->batch_sz, &cellClosure));
158       cellClosure_it = cellClosure;
159       PetscCall(PetscMalloc(sizeof(*locXArray) * nDMs, &locXArray));
160       PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
161       PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
162       PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray));
163       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP (once)
164         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
165           Vec      locX = locXArray[LAND_PACK_IDX(b_id, grid)], globX = globXArray[LAND_PACK_IDX(b_id, grid)], locX2;
166           PetscInt cStart, cEnd, ei;
167           PetscCall(VecDuplicate(locX, &locX2));
168           PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2));
169           PetscCall(DMGlobalToLocalEnd(ctx->plex[grid], globX, INSERT_VALUES, locX2));
170           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
171           for (ei = cStart; ei < cEnd; ++ei) {
172             PetscScalar *coef = NULL;
173             PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
174             PetscCall(PetscMemcpy(cellClosure_it, coef, Nb * Nf[grid] * sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */
175             PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
176             cellClosure_it += Nb * Nf[grid];
177           }
178           PetscCall(VecDestroy(&locX2));
179         }
180       }
181       PetscCheck(cellClosure_it - cellClosure == cellClosure_sz * ctx->batch_sz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscCount_FMT " != cellClosure_sz = %" PetscInt_FMT, (PetscCount)(cellClosure_it - cellClosure),
182                  cellClosure_sz * ctx->batch_sz);
183       PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
184       PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray));
185       PetscCall(PetscFree(locXArray));
186       PetscCall(PetscFree(globXArray));
187       xdata = NULL;
188     } else {
189       PetscMemType mtype;
190       if (ctx->jacobian_field_major_order) { // get data in batch ordering
191         PetscCall(VecScatterBegin(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD));
192         PetscCall(VecScatterEnd(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD));
193         PetscCall(VecGetArrayReadAndMemType(ctx->work_vec, &xdata, &mtype));
194       } else {
195         PetscCall(VecGetArrayReadAndMemType(a_X, &xdata, &mtype));
196       }
197       PetscCheck(mtype == PETSC_MEMTYPE_HOST || ctx->deviceType != LANDAU_CPU, ctx->comm, PETSC_ERR_ARG_WRONG, "CPU run with device data: use -mat_type aij");
198       cellClosure = NULL;
199     }
200     PetscCall(PetscLogEventEnd(ctx->events[1], 0, 0, 0, 0));
201   } else xdata = cellClosure = NULL;
202 
203   /* do it */
204   if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
205     if (ctx->deviceType == LANDAU_CUDA) {
206 #if defined(PETSC_HAVE_CUDA)
207       PetscCall(LandauCUDAJacobian(ctx->plex, Nq, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP));
208 #else
209       SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "cuda");
210 #endif
211     } else if (ctx->deviceType == LANDAU_KOKKOS) {
212 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
213       PetscCall(LandauKokkosJacobian(ctx->plex, Nq, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP));
214 #else
215       SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
216 #endif
217     }
218   } else {               /* CPU version */
219     PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
220     PetscInt         ip_offset[LANDAU_MAX_GRIDS + 1], ipf_offset[LANDAU_MAX_GRIDS + 1], elem_offset[LANDAU_MAX_GRIDS + 1], IPf_sz_glb, IPf_sz_tot, num_grids = ctx->num_grids, Nf[LANDAU_MAX_GRIDS];
221     PetscReal       *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
222     PetscReal        Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
223     PetscSection     section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
224     PetscScalar     *coo_vals = NULL;
225     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
226       PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
227       PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
228       PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
229     }
230     /* count IPf size, etc */
231     PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
232     const PetscReal *const BB = Tf[0]->T[0], *const DD = Tf[0]->T[1];
233     ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
234     for (PetscInt grid = 0; grid < num_grids; grid++) {
235       PetscInt nfloc        = ctx->species_offset[grid + 1] - ctx->species_offset[grid];
236       elem_offset[grid + 1] = elem_offset[grid] + numCells[grid];
237       ip_offset[grid + 1]   = ip_offset[grid] + numCells[grid] * Nq;
238       ipf_offset[grid + 1]  = ipf_offset[grid] + Nq * nfloc * numCells[grid];
239     }
240     IPf_sz_glb = ipf_offset[num_grids];
241     IPf_sz_tot = IPf_sz_glb * ctx->batch_sz;
242     // prep COO
243     if (ctx->coo_assembly) {
244       PetscCall(PetscMalloc1(ctx->SData_d.coo_size, &coo_vals)); // allocate every time?
245       PetscCall(PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n", (PetscInt)ctx->SData_d.coo_size));
246     }
247     if (shift == 0.0) { /* compute dynamic data f and df and init data for Jacobian */
248 #if defined(PETSC_HAVE_THREADSAFETY)
249       double starttime, endtime;
250       starttime = MPI_Wtime();
251 #endif
252       PetscCall(PetscLogEventBegin(ctx->events[8], 0, 0, 0, 0));
253       for (PetscInt fieldA = 0; fieldA < ctx->num_species; fieldA++) {
254         invMass[fieldA] = ctx->m_0 / ctx->masses[fieldA];
255         Eq_m[fieldA]    = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
256         if (dim == 2) Eq_m[fieldA] *= 2 * PETSC_PI;                                                     /* add the 2pi term that is not in Landau */
257         nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA] / ctx->m_0) * ctx->m_0 / ctx->masses[fieldA];
258         nu_beta[fieldA]  = PetscSqr(ctx->charges[fieldA] / ctx->epsilon0) * ctx->lnLam / (8 * PETSC_PI) * ctx->t_0 * ctx->n_0 / PetscPowReal(ctx->v_0, 3);
259       }
260       PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim == 3 ? IPf_sz_tot : 0, &dudz));
261       // F df/dx
262       for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) {                        // for each element
263         const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem; // b_id == OMP thd_id in batch
264         // find my grid:
265         PetscInt       grid = 0;
266         while (b_elem_idx >= elem_offset[grid + 1]) grid++; // yuck search for grid
267         {
268           const PetscInt loc_nip = numCells[grid] * Nq, loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
269           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
270           PetscScalar   *coef, coef_buff[LANDAU_MAX_SPECIES * LANDAU_MAX_NQ];
271           PetscReal     *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim]; // ingJ is static data on batch 0
272           PetscInt       b, f, q;
273           if (cellClosure) {
274             coef = &cellClosure[b_id * IPf_sz_glb + ipf_offset[grid] + loc_elem * Nb * loc_Nf]; // this is const
275           } else {
276             coef = coef_buff;
277             for (f = 0; f < loc_Nf; ++f) {
278               LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
279               for (b = 0; b < Nb; ++b) {
280                 PetscInt idx = Idxs[b];
281                 if (idx >= 0) {
282                   coef[f * Nb + b] = xdata[idx + moffset];
283                 } else {
284                   idx              = -idx - 1;
285                   coef[f * Nb + b] = 0;
286                   for (q = 0; q < maps[grid].num_face; q++) {
287                     PetscInt    id    = maps[grid].c_maps[idx][q].gid;
288                     PetscScalar scale = maps[grid].c_maps[idx][q].scale;
289                     coef[f * Nb + b] += scale * xdata[id + moffset];
290                   }
291                 }
292               }
293             }
294           }
295           /* get f and df */
296           for (PetscInt qi = 0; qi < Nq; qi++) {
297             const PetscReal *invJ = &invJe[qi * dim * dim];
298             const PetscReal *Bq   = &BB[qi * Nb];
299             const PetscReal *Dq   = &DD[qi * Nb * dim];
300             PetscReal        u_x[LANDAU_DIM];
301             /* get f & df */
302             for (f = 0; f < loc_Nf; ++f) {
303               const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid] + f * loc_nip + loc_elem * Nq + qi;
304               PetscInt       b, e;
305               PetscReal      refSpaceDer[LANDAU_DIM];
306               ff[idx] = 0.0;
307               for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
308               for (b = 0; b < Nb; ++b) {
309                 const PetscInt cidx = b;
310                 ff[idx] += Bq[cidx] * PetscRealPart(coef[f * Nb + cidx]);
311                 for (int d = 0; d < dim; ++d) { refSpaceDer[d] += Dq[cidx * dim + d] * PetscRealPart(coef[f * Nb + cidx]); }
312               }
313               for (int d = 0; d < LANDAU_DIM; ++d) {
314                 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) { u_x[d] += invJ[e * dim + d] * refSpaceDer[e]; }
315               }
316               dudx[idx] = u_x[0];
317               dudy[idx] = u_x[1];
318 #if LANDAU_DIM == 3
319               dudz[idx] = u_x[2];
320 #endif
321             }
322           } // q
323         }   // grid
324       }     // grid*batch
325       PetscCall(PetscLogEventEnd(ctx->events[8], 0, 0, 0, 0));
326 #if defined(PETSC_HAVE_THREADSAFETY)
327       endtime = MPI_Wtime();
328       if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
329 #endif
330     } // Jacobian setup
331     // assemble Jacobian (or mass)
332     for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) { // for each element
333       const PetscInt b_Nelem      = elem_offset[num_grids];
334       const PetscInt glb_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem;
335       PetscInt       grid = 0;
336 #if defined(PETSC_HAVE_THREADSAFETY)
337       double starttime, endtime;
338       starttime = MPI_Wtime();
339 #endif
340       while (glb_elem_idx >= elem_offset[grid + 1]) grid++;
341       {
342         const PetscInt   loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
343         const PetscInt   moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset), totDim = loc_Nf * Nq, elemMatSize = totDim * totDim;
344         PetscScalar     *elemMat;
345         const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim];
346         PetscCall(PetscMalloc1(elemMatSize, &elemMat));
347         PetscCall(PetscMemzero(elemMat, elemMatSize * sizeof(*elemMat)));
348         if (shift == 0.0) { // Jacobian
349           PetscCall(PetscLogEventBegin(ctx->events[4], 0, 0, 0, 0));
350         } else { // mass
351           PetscCall(PetscLogEventBegin(ctx->events[16], 0, 0, 0, 0));
352         }
353         for (PetscInt qj = 0; qj < Nq; ++qj) {
354           const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
355           PetscReal      g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
356           PetscInt       d, d2, dp, d3, IPf_idx;
357           if (shift == 0.0) { // Jacobian
358             const PetscReal *const invJj = &invJe[qj * dim * dim];
359             PetscReal              gg2[LANDAU_MAX_SPECIES][LANDAU_DIM], gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
360             const PetscReal        vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
361             // create g2 & g3
362             for (d = 0; d < LANDAU_DIM; d++) { // clear accumulation data D & K
363               gg2_temp[d] = 0;
364               for (d2 = 0; d2 < LANDAU_DIM; d2++) gg3_temp[d][d2] = 0;
365             }
366             /* inner beta reduction */
367             IPf_idx = 0;
368             for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
369               PetscInt nip_loc_r = numCells[grid_r] * Nq, Nfloc_r = Nf[grid_r];
370               for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
371                 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
372                   const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
373                   PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
374 #if LANDAU_DIM == 2
375                   PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
376                   LandauTensor2D(vj, x, y, Ud, Uk, mask);
377 #else
378                   PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2] - z) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
379                   if (ctx->use_relativistic_corrections) {
380                     LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
381                   } else {
382                     LandauTensor3D(vj, x, y, z, U, mask);
383                   }
384 #endif
385                   for (int f = 0; f < Nfloc_r; ++f) {
386                     const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid_r] + f * nip_loc_r + ei_r * Nq + qi; // IPf_idx + f*nip_loc_r + loc_fdf_idx;
387                     temp1[0] += dudx[idx] * nu_beta[f + f_off] * invMass[f + f_off];
388                     temp1[1] += dudy[idx] * nu_beta[f + f_off] * invMass[f + f_off];
389 #if LANDAU_DIM == 3
390                     temp1[2] += dudz[idx] * nu_beta[f + f_off] * invMass[f + f_off];
391 #endif
392                     temp2 += ff[idx] * nu_beta[f + f_off];
393                   }
394                   temp1[0] *= wi;
395                   temp1[1] *= wi;
396 #if LANDAU_DIM == 3
397                   temp1[2] *= wi;
398 #endif
399                   temp2 *= wi;
400 #if LANDAU_DIM == 2
401                   for (d2 = 0; d2 < 2; d2++) {
402                     for (d3 = 0; d3 < 2; ++d3) {
403                       /* K = U * grad(f): g2=e: i,A */
404                       gg2_temp[d2] += Uk[d2][d3] * temp1[d3];
405                       /* D = -U * (I \kron (fx)): g3=f: i,j,A */
406                       gg3_temp[d2][d3] += Ud[d2][d3] * temp2;
407                     }
408                   }
409 #else
410                   for (d2 = 0; d2 < 3; ++d2) {
411                     for (d3 = 0; d3 < 3; ++d3) {
412                       /* K = U * grad(f): g2 = e: i,A */
413                       gg2_temp[d2] += U[d2][d3] * temp1[d3];
414                       /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
415                       gg3_temp[d2][d3] += U[d2][d3] * temp2;
416                     }
417                   }
418 #endif
419                 } // qi
420               }   // ei_r
421               IPf_idx += nip_loc_r * Nfloc_r;
422             } /* grid_r - IPs */
423             PetscCheck(IPf_idx == IPf_sz_glb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT, IPf_idx, IPf_sz_glb);
424             // add alpha and put in gg2/3
425             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
426               for (d2 = 0; d2 < LANDAU_DIM; d2++) {
427                 gg2[fieldA][d2] = gg2_temp[d2] * nu_alpha[fieldA + f_off];
428                 for (d3 = 0; d3 < LANDAU_DIM; d3++) { gg3[fieldA][d2][d3] = -gg3_temp[d2][d3] * nu_alpha[fieldA + f_off] * invMass[fieldA + f_off]; }
429               }
430             }
431             /* add electric field term once per IP */
432             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) { gg2[fieldA][LANDAU_DIM - 1] += Eq_m[fieldA + f_off]; }
433             /* Jacobian transform - g2, g3 */
434             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
435               for (d = 0; d < dim; ++d) {
436                 g2[fieldA][d] = 0.0;
437                 for (d2 = 0; d2 < dim; ++d2) {
438                   g2[fieldA][d] += invJj[d * dim + d2] * gg2[fieldA][d2];
439                   g3[fieldA][d][d2] = 0.0;
440                   for (d3 = 0; d3 < dim; ++d3) {
441                     for (dp = 0; dp < dim; ++dp) { g3[fieldA][d][d2] += invJj[d * dim + d3] * gg3[fieldA][d3][dp] * invJj[d2 * dim + dp]; }
442                   }
443                   g3[fieldA][d][d2] *= wj;
444                 }
445                 g2[fieldA][d] *= wj;
446               }
447             }
448           } else { // mass
449             PetscReal wj = ww[jpidx_glb];
450             /* Jacobian transform - g0 */
451             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
452               if (dim == 2) {
453                 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
454               } else {
455                 g0[fieldA] = wj * shift; // move this to below and remove g0
456               }
457             }
458           }
459           /* FE matrix construction */
460           {
461             PetscInt         fieldA, d, f, d2, g;
462             const PetscReal *BJq = &BB[qj * Nb], *DIq = &DD[qj * Nb * dim];
463             /* assemble - on the diagonal (I,I) */
464             for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
465               for (f = 0; f < Nb; f++) {
466                 const PetscInt i = fieldA * Nb + f; /* Element matrix row */
467                 for (g = 0; g < Nb; ++g) {
468                   const PetscInt j    = fieldA * Nb + g; /* Element matrix column */
469                   const PetscInt fOff = i * totDim + j;
470                   if (shift == 0.0) {
471                     for (d = 0; d < dim; ++d) {
472                       elemMat[fOff] += DIq[f * dim + d] * g2[fieldA][d] * BJq[g];
473                       for (d2 = 0; d2 < dim; ++d2) { elemMat[fOff] += DIq[f * dim + d] * g3[fieldA][d][d2] * DIq[g * dim + d2]; }
474                     }
475                   } else { // mass
476                     elemMat[fOff] += BJq[f] * g0[fieldA] * BJq[g];
477                   }
478                 }
479               }
480             }
481           }
482         }                   /* qj loop */
483         if (shift == 0.0) { // Jacobian
484           PetscCall(PetscLogEventEnd(ctx->events[4], 0, 0, 0, 0));
485         } else {
486           PetscCall(PetscLogEventEnd(ctx->events[16], 0, 0, 0, 0));
487         }
488 #if defined(PETSC_HAVE_THREADSAFETY)
489         endtime = MPI_Wtime();
490         if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
491 #endif
492         /* assemble matrix */
493         if (!container) {
494           PetscInt cStart;
495           PetscCall(PetscLogEventBegin(ctx->events[6], 0, 0, 0, 0));
496           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL));
497           PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[LAND_PACK_IDX(b_id, grid)], loc_elem + cStart, elemMat, ADD_VALUES));
498           PetscCall(PetscLogEventEnd(ctx->events[6], 0, 0, 0, 0));
499         } else { // GPU like assembly for debugging
500           PetscInt    fieldA, q, f, g, d, nr, nc, rows0[LANDAU_MAX_Q_FACE] = {0}, cols0[LANDAU_MAX_Q_FACE] = {0}, rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
501           PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE] = {0}, row_scale[LANDAU_MAX_Q_FACE] = {0}, col_scale[LANDAU_MAX_Q_FACE] = {0};
502           LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ + 1] = (LandauIdx(*)[LANDAU_MAX_NQ + 1]) ctx->SData_d.coo_elem_point_offsets;
503           /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
504           for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
505             LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
506             for (f = 0; f < Nb; f++) {
507               PetscInt idx = Idxs[f];
508               if (idx >= 0) {
509                 nr           = 1;
510                 rows0[0]     = idx;
511                 row_scale[0] = 1.;
512               } else {
513                 idx = -idx - 1;
514                 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
515                   if (maps[grid].c_maps[idx][q].gid < 0) break;
516                   rows0[q]     = maps[grid].c_maps[idx][q].gid;
517                   row_scale[q] = maps[grid].c_maps[idx][q].scale;
518                 }
519               }
520               for (g = 0; g < Nb; ++g) {
521                 idx = Idxs[g];
522                 if (idx >= 0) {
523                   nc           = 1;
524                   cols0[0]     = idx;
525                   col_scale[0] = 1.;
526                 } else {
527                   idx = -idx - 1;
528                   nc  = maps[grid].num_face;
529                   for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
530                     if (maps[grid].c_maps[idx][q].gid < 0) break;
531                     cols0[q]     = maps[grid].c_maps[idx][q].gid;
532                     col_scale[q] = maps[grid].c_maps[idx][q].scale;
533                   }
534                 }
535                 const PetscInt    i   = fieldA * Nb + f; /* Element matrix row */
536                 const PetscInt    j   = fieldA * Nb + g; /* Element matrix column */
537                 const PetscScalar Aij = elemMat[i * totDim + j];
538                 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
539                   const int fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
540                   const int idx0 = b_id * coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
541                   for (int q = 0, idx2 = idx0; q < nr; q++) {
542                     for (int d = 0; d < nc; d++, idx2++) { coo_vals[idx2] = row_scale[q] * col_scale[d] * Aij; }
543                   }
544                 } else {
545                   for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
546                   for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
547                   for (q = 0; q < nr; q++) {
548                     for (d = 0; d < nc; d++) { vals[q * nc + d] = row_scale[q] * col_scale[d] * Aij; }
549                   }
550                   PetscCall(MatSetValues(JacP, nr, rows, nc, cols, vals, ADD_VALUES));
551                 }
552               }
553             }
554           }
555         }
556         if (loc_elem == -1) {
557           PetscCall(PetscPrintf(ctx->comm, "CPU Element matrix\n"));
558           for (int d = 0; d < totDim; ++d) {
559             for (int f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm, " %12.5e", (double)PetscRealPart(elemMat[d * totDim + f])));
560             PetscCall(PetscPrintf(ctx->comm, "\n"));
561           }
562           exit(12);
563         }
564         PetscCall(PetscFree(elemMat));
565       }                 /* grid */
566     }                   /* outer element & batch loop */
567     if (shift == 0.0) { // mass
568       PetscCall(PetscFree4(ff, dudx, dudy, dudz));
569     }
570     if (!container) {                                         // 'CPU' assembly move nest matrix to global JacP
571       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP
572         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
573           const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
574           PetscInt           nloc, nzl, colbuf[1024], row;
575           const PetscInt    *cols;
576           const PetscScalar *vals;
577           Mat                B = subJ[LAND_PACK_IDX(b_id, grid)];
578           PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
579           PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
580           PetscCall(MatGetSize(B, &nloc, NULL));
581           for (int i = 0; i < nloc; i++) {
582             PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
583             PetscCheck(nzl <= 1024, PetscObjectComm((PetscObject)B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT, nzl);
584             for (int j = 0; j < nzl; j++) colbuf[j] = moffset + cols[j];
585             row = moffset + i;
586             PetscCall(MatSetValues(JacP, 1, &row, nzl, colbuf, vals, ADD_VALUES));
587             PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
588           }
589           PetscCall(MatDestroy(&B));
590         }
591       }
592     }
593     if (coo_vals) {
594       PetscCall(MatSetValuesCOO(JacP, coo_vals, ADD_VALUES));
595       PetscCall(PetscFree(coo_vals));
596     }
597   } /* CPU version */
598   PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY));
599   PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY));
600   /* clean up */
601   if (cellClosure) PetscCall(PetscFree(cellClosure));
602   if (xdata) { PetscCall(VecRestoreArrayReadAndMemType(a_X, &xdata)); }
603   PetscFunctionReturn(0);
604 }
605 
606 #if defined(LANDAU_ADD_BCS)
607 static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[]) {
608   uexact[0] = 0;
609 }
610 #endif
611 
612 #define MATVEC2(__a, __x, __p) \
613   { \
614     int i, j; \
615     for (i = 0.; i < 2; i++) { \
616       __p[i] = 0; \
617       for (j = 0.; j < 2; j++) __p[i] += __a[i][j] * __x[j]; \
618     } \
619   }
620 static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y, PetscReal *outX, PetscReal *outY) {
621   PetscReal rr = PetscSqrtReal(x * x + y * y), outfact, efact;
622   if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
623     *outX = x;
624     *outY = y;
625   } else {
626     const PetscReal xy[2] = {x, y}, sinphi = y / rr, cosphi = x / rr;
627     PetscReal       cth, sth, xyprime[2], Rth[2][2], rotcos, newrr;
628     if (num_sections == 2) {
629       rotcos  = 0.70710678118654;
630       outfact = 1.5;
631       efact   = 2.5;
632       /* rotate normalized vector into [-pi/4,pi/4) */
633       if (sinphi >= 0.) { /* top cell, -pi/2 */
634         cth = 0.707106781186548;
635         sth = -0.707106781186548;
636       } else { /* bottom cell -pi/8 */
637         cth = 0.707106781186548;
638         sth = .707106781186548;
639       }
640     } else if (num_sections == 3) {
641       rotcos  = 0.86602540378443;
642       outfact = 1.5;
643       efact   = 2.5;
644       /* rotate normalized vector into [-pi/6,pi/6) */
645       if (sinphi >= 0.5) { /* top cell, -pi/3 */
646         cth = 0.5;
647         sth = -0.866025403784439;
648       } else if (sinphi >= -.5) { /* mid cell 0 */
649         cth = 1.;
650         sth = .0;
651       } else { /* bottom cell +pi/3 */
652         cth = 0.5;
653         sth = 0.866025403784439;
654       }
655     } else if (num_sections == 4) {
656       rotcos  = 0.9238795325112;
657       outfact = 1.5;
658       efact   = 3;
659       /* rotate normalized vector into [-pi/8,pi/8) */
660       if (sinphi >= 0.707106781186548) { /* top cell, -3pi/8 */
661         cth = 0.38268343236509;
662         sth = -0.923879532511287;
663       } else if (sinphi >= 0.) { /* mid top cell -pi/8 */
664         cth = 0.923879532511287;
665         sth = -.38268343236509;
666       } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
667         cth = 0.923879532511287;
668         sth = 0.38268343236509;
669       } else { /* bottom cell + 3pi/8 */
670         cth = 0.38268343236509;
671         sth = .923879532511287;
672       }
673     } else {
674       cth    = 0.;
675       sth    = 0.;
676       rotcos = 0;
677       efact  = 0;
678     }
679     Rth[0][0] = cth;
680     Rth[0][1] = -sth;
681     Rth[1][0] = sth;
682     Rth[1][1] = cth;
683     MATVEC2(Rth, xy, xyprime);
684     if (num_sections == 2) {
685       newrr = xyprime[0] / rotcos;
686     } else {
687       PetscReal newcosphi = xyprime[0] / rr, rin = r1, rout = rr - rin;
688       PetscReal routmax = r0 * rotcos / newcosphi - rin, nroutmax = r0 - rin, routfrac = rout / routmax;
689       newrr = rin + routfrac * nroutmax;
690     }
691     *outX = cosphi * newrr;
692     *outY = sinphi * newrr;
693     /* grade */
694     PetscReal fact, tt, rs, re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
695     if (rr > r2) {
696       rs   = r2;
697       re   = r0;
698       fact = outfact;
699     } /* outer zone */
700     else {
701       rs   = r1;
702       re   = r2;
703       fact = efact;
704     } /* electron zone */
705     tt = (rs + PetscPowReal((rr - rs) / (re - rs), fact) * (re - rs)) / rr;
706     *outX *= tt;
707     *outY *= tt;
708   }
709 }
710 
711 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx) {
712   LandauCtx *ctx = (LandauCtx *)a_ctx;
713   PetscReal  r = abc[0], z = abc[1];
714   if (ctx->inflate) {
715     PetscReal absR, absZ;
716     absR = PetscAbs(r);
717     absZ = PetscAbs(z);
718     CircleInflate(ctx->i_radius[0], ctx->e_radius, ctx->radius[0], ctx->num_sections, absR, absZ, &absR, &absZ); // wrong: how do I know what grid I am on?
719     r = (r > 0) ? absR : -absR;
720     z = (z > 0) ? absZ : -absZ;
721   }
722   xyz[0] = r;
723   xyz[1] = z;
724   if (dim == 3) xyz[2] = abc[2];
725 
726   PetscFunctionReturn(0);
727 }
728 
729 /* create DMComposite of meshes for each species group */
730 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack) {
731   PetscFunctionBegin;
732   { /* p4est, quads */
733     /* Create plex mesh of Landau domain */
734     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
735       PetscReal radius = ctx->radius[grid];
736       if (!ctx->sphere) {
737         PetscInt       cells[] = {2, 2, 2};
738         PetscReal      lo[] = {-radius, -radius, -radius}, hi[] = {radius, radius, radius};
739         DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim == 2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
740         if (dim == 2) {
741           lo[0]                     = 0;
742           cells[0] /* = cells[1] */ = 1;
743         }
744         PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid])); // todo: make composite and create dm[grid] here
745         PetscCall(DMLocalizeCoordinates(ctx->plex[grid]));                                                                     /* needed for periodic */
746         if (dim == 3) PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "cube"));
747         else PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "half-plane"));
748       } else if (dim == 2) { // sphere is all wrong. should just have one inner radius
749         PetscInt   numCells, cells[16][4], i, j;
750         PetscInt   numVerts;
751         PetscReal  inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius;
752         PetscReal *flatCoords = NULL;
753         PetscInt  *flatCells  = NULL, *pcell;
754         if (ctx->num_sections == 2) {
755 #if 1
756           numCells        = 5;
757           numVerts        = 10;
758           int cells2[][4] = {
759             {0, 1, 4, 3},
760             {1, 2, 5, 4},
761             {3, 4, 7, 6},
762             {4, 5, 8, 7},
763             {6, 7, 8, 9}
764           };
765           for (i = 0; i < numCells; i++)
766             for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
767           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
768           {
769             PetscReal(*coords)[2] = (PetscReal(*)[2])flatCoords;
770             for (j = 0; j < numVerts - 1; j++) {
771               PetscReal z, r, theta = -PETSC_PI / 2 + (j % 3) * PETSC_PI / 2;
772               PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid];
773               z             = rad * PetscSinReal(theta);
774               coords[j][1]  = z;
775               r             = rad * PetscCosReal(theta);
776               coords[j][0]  = r;
777             }
778             coords[numVerts - 1][0] = coords[numVerts - 1][1] = 0;
779           }
780 #else
781           numCells = 4;
782           numVerts = 8;
783           static int cells2[][4] = {
784             {0, 1, 2, 3},
785             {4, 5, 1, 0},
786             {5, 6, 2, 1},
787             {6, 7, 3, 2}
788           };
789           for (i = 0; i < numCells; i++)
790             for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
791           PetscCall(loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
792           {
793             PetscReal(*coords)[2] = (PetscReal(*)[2])flatCoords;
794             PetscInt j;
795             for (j = 0; j < 8; j++) {
796               PetscReal z, r;
797               PetscReal theta = -PETSC_PI / 2 + (j % 4) * PETSC_PI / 3.;
798               PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0);
799               z = rad * PetscSinReal(theta);
800               coords[j][1] = z;
801               r = rad * PetscCosReal(theta);
802               coords[j][0] = r;
803             }
804           }
805 #endif
806         } else if (ctx->num_sections == 3) {
807           numCells        = 7;
808           numVerts        = 12;
809           int cells2[][4] = {
810             {0, 1, 5,  4 },
811             {1, 2, 6,  5 },
812             {2, 3, 7,  6 },
813             {4, 5, 9,  8 },
814             {5, 6, 10, 9 },
815             {6, 7, 11, 10},
816             {8, 9, 10, 11}
817           };
818           for (i = 0; i < numCells; i++)
819             for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
820           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
821           {
822             PetscReal(*coords)[2] = (PetscReal(*)[2])flatCoords;
823             for (j = 0; j < numVerts; j++) {
824               PetscReal z, r, theta = -PETSC_PI / 2 + (j % 4) * PETSC_PI / 3;
825               PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid];
826               z             = rad * PetscSinReal(theta);
827               coords[j][1]  = z;
828               r             = rad * PetscCosReal(theta);
829               coords[j][0]  = r;
830             }
831           }
832         } else if (ctx->num_sections == 4) {
833           numCells        = 10;
834           numVerts        = 16;
835           int cells2[][4] = {
836             {0,  1,  6,  5 },
837             {1,  2,  7,  6 },
838             {2,  3,  8,  7 },
839             {3,  4,  9,  8 },
840             {5,  6,  11, 10},
841             {6,  7,  12, 11},
842             {7,  8,  13, 12},
843             {8,  9,  14, 13},
844             {10, 11, 12, 15},
845             {12, 13, 14, 15}
846           };
847           for (i = 0; i < numCells; i++)
848             for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
849           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
850           {
851             PetscReal(*coords)[2] = (PetscReal(*)[2])flatCoords;
852             for (j = 0; j < numVerts - 1; j++) {
853               PetscReal z, r, theta = -PETSC_PI / 2 + (j % 5) * PETSC_PI / 4;
854               PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid];
855               z             = rad * PetscSinReal(theta);
856               coords[j][1]  = z;
857               r             = rad * PetscCosReal(theta);
858               coords[j][0]  = r;
859             }
860             coords[numVerts - 1][0] = coords[numVerts - 1][1] = 0;
861           }
862         } else {
863           numCells = 0;
864           numVerts = 0;
865         }
866         for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
867           pcell[0] = cells[j][0];
868           pcell[1] = cells[j][1];
869           pcell[2] = cells[j][2];
870           pcell[3] = cells[j][3];
871         }
872         PetscCall(DMPlexCreateFromCellListPetsc(comm_self, 2, numCells, numVerts, 4, ctx->interpolate, flatCells, 2, flatCoords, &ctx->plex[grid]));
873         PetscCall(PetscFree2(flatCoords, flatCells));
874         PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "semi-circle"));
875       } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");
876 
877       PetscCall(DMSetFromOptions(ctx->plex[grid]));
878     } // grid loop
879     PetscCall(PetscObjectSetOptionsPrefix((PetscObject)pack, prefix));
880 
881     { /* convert to p4est (or whatever), wait for discretization to create pack */
882       char      convType[256];
883       PetscBool flg;
884 
885       PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
886       PetscCall(PetscOptionsFList("-dm_landau_type", "Convert DMPlex to another format (p4est)", "plexland.c", DMList, DMPLEX, convType, 256, &flg));
887       PetscOptionsEnd();
888       if (flg) {
889         ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
890         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
891           DM dmforest;
892           PetscCall(DMConvert(ctx->plex[grid], convType, &dmforest));
893           if (dmforest) {
894             PetscBool isForest;
895             PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dmforest, prefix));
896             PetscCall(DMIsForest(dmforest, &isForest));
897             if (isForest) {
898               if (ctx->sphere && ctx->inflate) { PetscCall(DMForestSetBaseCoordinateMapping(dmforest, GeometryDMLandau, ctx)); }
899               PetscCall(DMDestroy(&ctx->plex[grid]));
900               ctx->plex[grid] = dmforest; // Forest for adaptivity
901             } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
902           } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
903         }
904       } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
905     }
906   } /* non-file */
907   PetscCall(DMSetDimension(pack, dim));
908   PetscCall(PetscObjectSetName((PetscObject)pack, "Mesh"));
909   PetscCall(DMSetApplicationContext(pack, ctx));
910 
911   PetscFunctionReturn(0);
912 }
913 
914 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx) {
915   PetscInt     ii, i0;
916   char         buf[256];
917   PetscSection section;
918 
919   PetscFunctionBegin;
920   for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
921     if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e"));
922     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii));
923     /* Setup Discretization - FEM */
924     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]));
925     PetscCall(PetscObjectSetName((PetscObject)ctx->fe[ii], buf));
926     PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject)ctx->fe[ii]));
927   }
928   PetscCall(DMCreateDS(ctx->plex[grid]));
929   PetscCall(DMGetSection(ctx->plex[grid], &section));
930   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
931     if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se"));
932     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii));
933     PetscCall(PetscSectionSetComponentName(section, i0, 0, buf));
934   }
935   PetscFunctionReturn(0);
936 }
937 
938 /* Define a Maxwellian function for testing out the operator. */
939 
940 /* Using cartesian velocity space coordinates, the particle */
941 /* density, [1/m^3], is defined according to */
942 
943 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
944 
945 /* Using some constant, c, we normalize the velocity vector into a */
946 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
947 
948 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
949 
950 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
951 /* for finding the particle within the interval in a box dx^3 around x is */
952 
953 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
954 
955 typedef struct {
956   PetscReal v_0;
957   PetscReal kT_m;
958   PetscReal n;
959   PetscReal shift;
960 } MaxwellianCtx;
961 
962 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) {
963   MaxwellianCtx *mctx = (MaxwellianCtx *)actx;
964   PetscInt       i;
965   PetscReal      v2 = 0, theta = 2 * mctx->kT_m / (mctx->v_0 * mctx->v_0); /* theta = 2kT/mc^2 */
966   PetscFunctionBegin;
967   /* compute the exponents, v^2 */
968   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
969   /* evaluate the Maxwellian */
970   u[0] = mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
971   if (mctx->shift != 0.) {
972     v2 = 0;
973     for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i];
974     v2 += (x[dim - 1] - mctx->shift) * (x[dim - 1] - mctx->shift);
975     /* evaluate the shifted Maxwellian */
976     u[0] += mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
977   }
978   PetscFunctionReturn(0);
979 }
980 
981 /*@
982  DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state
983 
984  Collective on X
985 
986  Input Parameters:
987  .   dm - The mesh (local)
988  +   time - Current time
989  -   temps - Temperatures of each species (global)
990  .   ns - Number density of each species (global)
991  -   grid - index into current grid - just used for offset into temp and ns
992  .   b_id - batch index
993  -   n_batch - number of batches
994  +   actx - Landau context
995 
996  Output Parameter:
997  .   X  - The state (local to this grid)
998 
999  Level: beginner
1000 
1001  .keywords: mesh
1002  .seealso: `DMPlexLandauCreateVelocitySpace()`
1003  @*/
1004 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx) {
1005   LandauCtx *ctx = (LandauCtx *)actx;
1006   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *);
1007   PetscInt       dim;
1008   MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
1009 
1010   PetscFunctionBegin;
1011   PetscCall(DMGetDimension(dm, &dim));
1012   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
1013   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
1014     mctxs[i0]      = &data[i0];
1015     data[i0].v_0   = ctx->v_0;                                            // v_0 same for all grids
1016     data[i0].kT_m  = ctx->k * temps[ii] / ctx->masses[ii];                /* kT/m */
1017     data[i0].n     = ns[ii] * (1 + 0.1 * (double)b_id / (double)n_batch); // ramp density up 10% to mimic application, n[0] use for Conner-Hastie
1018     initu[i0]      = maxwellian;
1019     data[i0].shift = 0;
1020   }
1021   data[0].shift = ctx->electronShift;
1022   /* need to make ADD_ALL_VALUES work - TODO */
1023   PetscCall(DMProjectFunction(dm, time, initu, (void **)mctxs, INSERT_ALL_VALUES, X));
1024   PetscFunctionReturn(0);
1025 }
1026 
1027 /*
1028  LandauSetInitialCondition - Addes Maxwellians with context
1029 
1030  Collective on X
1031 
1032  Input Parameters:
1033  .   dm - The mesh
1034  -   grid - index into current grid - just used for offset into temp and ns
1035  .   b_id - batch index
1036  -   n_batch - number of batches
1037  +   actx - Landau context with T and n
1038 
1039  Output Parameter:
1040  .   X  - The state
1041 
1042  Level: beginner
1043 
1044  .keywords: mesh
1045  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()`
1046  */
1047 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx) {
1048   LandauCtx *ctx = (LandauCtx *)actx;
1049   PetscFunctionBegin;
1050   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
1051   PetscCall(VecZeroEntries(X));
1052   PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, n_batch, ctx));
1053   PetscFunctionReturn(0);
1054 }
1055 
1056 // adapt a level once. Forest in/out
1057 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest) {
1058   DM              forest, plex, adaptedDM = NULL;
1059   PetscDS         prob;
1060   PetscBool       isForest;
1061   PetscQuadrature quad;
1062   PetscInt        Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1063   DMLabel         adaptLabel = NULL;
1064 
1065   PetscFunctionBegin;
1066   forest = ctx->plex[grid];
1067   PetscCall(DMCreateDS(forest));
1068   PetscCall(DMGetDS(forest, &prob));
1069   PetscCall(DMGetDimension(forest, &dim));
1070   PetscCall(DMIsForest(forest, &isForest));
1071   PetscCheck(isForest, ctx->comm, PETSC_ERR_ARG_WRONG, "! Forest");
1072   PetscCall(DMConvert(forest, DMPLEX, &plex));
1073   PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd));
1074   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "adapt", &adaptLabel));
1075   PetscCall(PetscFEGetQuadrature(fem, &quad));
1076   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
1077   PetscCheck(Nq <= LANDAU_MAX_NQ, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)", Nq, LANDAU_MAX_NQ);
1078   PetscCall(PetscDSGetDimensions(prob, &Nb));
1079   if (type == 4) {
1080     for (c = cStart; c < cEnd; c++) { PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); }
1081     PetscCall(PetscInfo(sol, "Phase:%s: Uniform refinement\n", "adaptToleranceFEM"));
1082   } else if (type == 2) {
1083     PetscInt  rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim == 3) ? 8 : 2;
1084     PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1085     for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; }
1086     for (c = cStart; c < cEnd; c++) {
1087       PetscReal tt, v0[LANDAU_MAX_NQ * 3], detJ[LANDAU_MAX_NQ];
1088       PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ));
1089       for (qj = 0; qj < Nq; ++qj) {
1090         tt = PetscSqr(v0[dim * qj + 0]) + PetscSqr(v0[dim * qj + 1]) + PetscSqr(((dim == 3) ? v0[dim * qj + 2] : 0));
1091         r  = PetscSqrtReal(tt);
1092         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON * 10.) {
1093           minRad         = r;
1094           nr             = 0;
1095           rCellIdx[nr++] = c;
1096           PetscCall(PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", (double)r, c, qj + 1, Nq));
1097         } else if ((r - minRad) < PETSC_SQRT_MACHINE_EPSILON * 100. && nr < nrmax) {
1098           for (k = 0; k < nr; k++)
1099             if (c == rCellIdx[k]) break;
1100           if (k == nr) {
1101             rCellIdx[nr++] = c;
1102             PetscCall(PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", (double)r, c, qj + 1, Nq, (double)(r - minRad)));
1103           }
1104         }
1105         if (ctx->sphere) {
1106           if ((tt = r - ctx->e_radius) > 0) {
1107             PetscCall(PetscInfo(sol, "\t\t\t %" PetscInt_FMT " cell r=%g\n", c, (double)tt));
1108             if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON * 100.) {
1109               eMinRad             = tt;
1110               eMaxIdx             = 0;
1111               eCellIdx[eMaxIdx++] = c;
1112             } else if (eMaxIdx > 0 && (tt - eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx - 1]) {
1113               eCellIdx[eMaxIdx++] = c;
1114             }
1115           }
1116           if ((tt = r - ctx->i_radius[grid]) > 0) {
1117             if (tt < iMinRad - 1.e-5) {
1118               iMinRad             = tt;
1119               iMaxIdx             = 0;
1120               iCellIdx[iMaxIdx++] = c;
1121             } else if (iMaxIdx > 0 && (tt - iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx - 1]) {
1122               iCellIdx[iMaxIdx++] = c;
1123             }
1124           }
1125         }
1126       }
1127     }
1128     for (k = 0; k < nr; k++) { PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE)); }
1129     if (ctx->sphere) {
1130       for (c = 0; c < eMaxIdx; c++) {
1131         PetscCall(DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE));
1132         PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %" PetscInt_FMT " r=%g\n", "adaptToleranceFEM", eCellIdx[c], (double)eMinRad));
1133       }
1134       for (c = 0; c < iMaxIdx; c++) {
1135         PetscCall(DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE));
1136         PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %" PetscInt_FMT " r=%g\n", "adaptToleranceFEM", iCellIdx[c], (double)iMinRad));
1137       }
1138     }
1139     PetscCall(PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n", "adaptToleranceFEM", rCellIdx[0], rCellIdx[1], (double)minRad));
1140   } else if (type == 0 || type == 1 || type == 3) { /* refine along r=0 axis */
1141     PetscScalar *coef = NULL;
1142     Vec          coords;
1143     PetscInt     csize, Nv, d, nz;
1144     DM           cdm;
1145     PetscSection cs;
1146     PetscCall(DMGetCoordinatesLocal(forest, &coords));
1147     PetscCall(DMGetCoordinateDM(forest, &cdm));
1148     PetscCall(DMGetLocalSection(cdm, &cs));
1149     for (c = cStart; c < cEnd; c++) {
1150       PetscInt doit = 0, outside = 0;
1151       PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef));
1152       Nv = csize / dim;
1153       for (nz = d = 0; d < Nv; d++) {
1154         PetscReal z = PetscRealPart(coef[d * dim + (dim - 1)]), x = PetscSqr(PetscRealPart(coef[d * dim + 0])) + ((dim == 3) ? PetscSqr(PetscRealPart(coef[d * dim + 1])) : 0);
1155         x = PetscSqrtReal(x);
1156         if (x < PETSC_MACHINE_EPSILON * 10. && PetscAbs(z) < PETSC_MACHINE_EPSILON * 10.) doit = 1;                              /* refine origin */
1157         else if (type == 0 && (z < -PETSC_MACHINE_EPSILON * 10. || z > ctx->re_radius + PETSC_MACHINE_EPSILON * 10.)) outside++; /* first pass don't refine bottom */
1158         else if (type == 1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++;                                  /* don't refine outside electron refine radius */
1159         else if (type == 3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++;                                  /* don't refine outside ion refine radius */
1160         if (x < PETSC_MACHINE_EPSILON * 10.) nz++;
1161       }
1162       PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef));
1163       if (doit || (outside < Nv && nz)) { PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); }
1164     }
1165     PetscCall(PetscInfo(sol, "Phase:%s: RE refinement\n", "adaptToleranceFEM"));
1166   }
1167   PetscCall(DMDestroy(&plex));
1168   PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM));
1169   PetscCall(DMLabelDestroy(&adaptLabel));
1170   *newForest = adaptedDM;
1171   if (adaptedDM) {
1172     if (isForest) {
1173       PetscCall(DMForestSetAdaptivityForest(adaptedDM, NULL)); // ????
1174     } else exit(33);                                           // ???????
1175     PetscCall(DMConvert(adaptedDM, DMPLEX, &plex));
1176     PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd));
1177     PetscCall(PetscInfo(sol, "\tPhase: adaptToleranceFEM: %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n", cEnd - cStart, Nq * (cEnd - cStart)));
1178     PetscCall(DMDestroy(&plex));
1179   } else *newForest = NULL;
1180   PetscFunctionReturn(0);
1181 }
1182 
1183 // forest goes in (ctx->plex[grid]), plex comes out
1184 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu) {
1185   PetscInt adaptIter;
1186 
1187   PetscFunctionBegin;
1188   PetscInt type, limits[5] = {(grid == 0) ? ctx->numRERefine : 0, (grid == 0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid == 0) ? ctx->nZRefine2 : 0, ctx->postAMRRefine[grid]};
1189   for (type = 0; type < 5; type++) {
1190     for (adaptIter = 0; adaptIter < limits[type]; adaptIter++) {
1191       DM newForest = NULL;
1192       PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest));
1193       if (newForest) {
1194         PetscCall(DMDestroy(&ctx->plex[grid]));
1195         PetscCall(VecDestroy(uu));
1196         PetscCall(DMCreateGlobalVector(newForest, uu));
1197         PetscCall(PetscObjectSetName((PetscObject)*uu, "uAMR"));
1198         PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, 1, ctx));
1199         ctx->plex[grid] = newForest;
1200       } else {
1201         exit(4); // can happen with no AMR and post refinement
1202       }
1203     }
1204   }
1205   PetscFunctionReturn(0);
1206 }
1207 
1208 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[]) {
1209   PetscBool flg, sph_flg;
1210   PetscInt  ii, nt, nm, nc, num_species_grid[LANDAU_MAX_GRIDS];
1211   PetscReal v0_grid[LANDAU_MAX_GRIDS];
1212   DM        dummy;
1213 
1214   PetscFunctionBegin;
1215   PetscCall(DMCreate(ctx->comm, &dummy));
1216   /* get options - initialize context */
1217   ctx->verbose = 1; // should be 0 for silent compliance
1218 #if defined(PETSC_HAVE_THREADSAFETY)
1219   ctx->batch_sz = PetscNumOMPThreads;
1220 #else
1221   ctx->batch_sz = 1;
1222 #endif
1223   ctx->batch_view_idx = 0;
1224   ctx->interpolate    = PETSC_TRUE;
1225   ctx->gpu_assembly   = PETSC_TRUE;
1226   ctx->norm_state     = 0;
1227   ctx->electronShift  = 0;
1228   ctx->M              = NULL;
1229   ctx->J              = NULL;
1230   /* geometry and grids */
1231   ctx->sphere         = PETSC_FALSE;
1232   ctx->inflate        = PETSC_FALSE;
1233   ctx->use_p4est      = PETSC_FALSE;
1234   ctx->num_sections   = 3; /* 2, 3 or 4 */
1235   for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1236     ctx->radius[grid]             = 5.; /* thermal radius (velocity) */
1237     ctx->numAMRRefine[grid]       = 5;
1238     ctx->postAMRRefine[grid]      = 0;
1239     ctx->species_offset[grid + 1] = 1; // one species default
1240     num_species_grid[grid]        = 0;
1241     ctx->plex[grid]               = NULL; /* cache as expensive to Convert */
1242   }
1243   ctx->species_offset[0] = 0;
1244   ctx->re_radius         = 0.;
1245   ctx->vperp0_radius1    = 0;
1246   ctx->vperp0_radius2    = 0;
1247   ctx->nZRefine1         = 0;
1248   ctx->nZRefine2         = 0;
1249   ctx->numRERefine       = 0;
1250   num_species_grid[0]    = 1; // one species default
1251   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1252   ctx->charges[0]        = -1;                       /* electron charge (MKS) */
1253   ctx->masses[0]         = 1 / 1835.469965278441013; /* temporary value in proton mass */
1254   ctx->n[0]              = 1;
1255   ctx->v_0               = 1; /* thermal velocity, we could start with a scale != 1 */
1256   ctx->thermal_temps[0]  = 1;
1257   /* constants, etc. */
1258   ctx->epsilon0          = 8.8542e-12;     /* permittivity of free space (MKS) F/m */
1259   ctx->k                 = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1260   ctx->lnLam             = 10;             /* cross section ratio large - small angle collisions */
1261   ctx->n_0               = 1.e20;          /* typical plasma n, but could set it to 1 */
1262   ctx->Ez                = 0;
1263   for (PetscInt grid = 0; grid < LANDAU_NUM_TIMERS; grid++) ctx->times[grid] = 0;
1264   ctx->use_matrix_mass                = PETSC_FALSE;
1265   ctx->use_relativistic_corrections   = PETSC_FALSE;
1266   ctx->use_energy_tensor_trick        = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1267   ctx->SData_d.w                      = NULL;
1268   ctx->SData_d.x                      = NULL;
1269   ctx->SData_d.y                      = NULL;
1270   ctx->SData_d.z                      = NULL;
1271   ctx->SData_d.invJ                   = NULL;
1272   ctx->jacobian_field_major_order     = PETSC_FALSE;
1273   ctx->SData_d.coo_elem_offsets       = NULL;
1274   ctx->SData_d.coo_elem_point_offsets = NULL;
1275   ctx->coo_assembly                   = PETSC_FALSE;
1276   ctx->SData_d.coo_elem_fullNb        = NULL;
1277   ctx->SData_d.coo_size               = 0;
1278   PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1279   {
1280     char opstring[256];
1281 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1282     ctx->deviceType = LANDAU_KOKKOS;
1283     PetscCall(PetscStrcpy(opstring, "kokkos"));
1284 #elif defined(PETSC_HAVE_CUDA)
1285     ctx->deviceType = LANDAU_CUDA;
1286     PetscCall(PetscStrcpy(opstring, "cuda"));
1287 #else
1288     ctx->deviceType = LANDAU_CPU;
1289     PetscCall(PetscStrcpy(opstring, "cpu"));
1290 #endif
1291     PetscCall(PetscOptionsString("-dm_landau_device_type", "Use kernels on 'cpu', 'cuda', or 'kokkos'", "plexland.c", opstring, opstring, sizeof(opstring), NULL));
1292     PetscCall(PetscStrcmp("cpu", opstring, &flg));
1293     if (flg) {
1294       ctx->deviceType = LANDAU_CPU;
1295     } else {
1296       PetscCall(PetscStrcmp("cuda", opstring, &flg));
1297       if (flg) {
1298         ctx->deviceType = LANDAU_CUDA;
1299       } else {
1300         PetscCall(PetscStrcmp("kokkos", opstring, &flg));
1301         if (flg) ctx->deviceType = LANDAU_KOKKOS;
1302         else SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_device_type %s", opstring);
1303       }
1304     }
1305   }
1306   PetscCall(PetscOptionsReal("-dm_landau_electron_shift", "Shift in thermal velocity of electrons", "none", ctx->electronShift, &ctx->electronShift, NULL));
1307   PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL));
1308   PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL));
1309   PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "LANDAU_MAX_BATCH_SZ %" PetscInt_FMT " < ctx->batch_sz %" PetscInt_FMT, (PetscInt)LANDAU_MAX_BATCH_SZ, ctx->batch_sz);
1310   PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL));
1311   PetscCheck(ctx->batch_view_idx < ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT, ctx->batch_view_idx, ctx->batch_sz);
1312   PetscCall(PetscOptionsReal("-dm_landau_Ez", "Initial parallel electric field in unites of Conner-Hastie critical field", "plexland.c", ctx->Ez, &ctx->Ez, NULL));
1313   PetscCall(PetscOptionsReal("-dm_landau_n_0", "Normalization constant for number density", "plexland.c", ctx->n_0, &ctx->n_0, NULL));
1314   PetscCall(PetscOptionsReal("-dm_landau_ln_lambda", "Cross section parameter", "plexland.c", ctx->lnLam, &ctx->lnLam, NULL));
1315   PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL));
1316   PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL));
1317   PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick,
1318                              &ctx->use_energy_tensor_trick, NULL));
1319 
1320   /* get num species with temperature, set defaults */
1321   for (ii = 1; ii < LANDAU_MAX_SPECIES; ii++) {
1322     ctx->thermal_temps[ii] = 1;
1323     ctx->charges[ii]       = 1;
1324     ctx->masses[ii]        = 1;
1325     ctx->n[ii]             = 1;
1326   }
1327   nt = LANDAU_MAX_SPECIES;
1328   PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg));
1329   if (flg) {
1330     PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n", nt));
1331     ctx->num_species = nt;
1332   } else SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1333   for (ii = 0; ii < ctx->num_species; ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1334   nm = LANDAU_MAX_SPECIES - 1;
1335   PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg));
1336   PetscCheck(!flg || nm == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species - 1);
1337   nm = LANDAU_MAX_SPECIES;
1338   PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg));
1339   PetscCheck(!flg || nm == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species);
1340   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1341   ctx->masses[0] = 9.10938356e-31;                                           /* electron mass kg (should be about right already) */
1342   ctx->m_0       = ctx->masses[0];                                           /* arbitrary reference mass, electrons */
1343   nc             = LANDAU_MAX_SPECIES - 1;
1344   PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg));
1345   if (flg) PetscCheck(nc == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num charges %" PetscInt_FMT " != num species %" PetscInt_FMT, nc, ctx->num_species - 1);
1346   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1347   /* geometry and grids */
1348   nt = LANDAU_MAX_GRIDS;
1349   PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid", "Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid", "plexland.c", num_species_grid, &nt, &flg));
1350   if (flg) {
1351     ctx->num_grids = nt;
1352     for (ii = nt = 0; ii < ctx->num_grids; ii++) nt += num_species_grid[ii];
1353     PetscCheck(ctx->num_species == nt, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %d)", nt, ctx->num_species,
1354                ctx->num_grids, LANDAU_MAX_GRIDS);
1355   } else {
1356     ctx->num_grids      = 1; // go back to a single grid run
1357     num_species_grid[0] = ctx->num_species;
1358   }
1359   for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids; ii++) ctx->species_offset[ii + 1] = ctx->species_offset[ii] + num_species_grid[ii];
1360   PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????", ctx->species_offset[ctx->num_grids],
1361              ctx->num_species);
1362   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1363     int iii       = ctx->species_offset[grid];                                          // normalize with first (arbitrary) species on grid
1364     v0_grid[grid] = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */
1365   }
1366   ii = 0;
1367   PetscCall(PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL));
1368   ctx->v_0 = v0_grid[ii];                                                                                                                       /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1369   ctx->t_0 = 8 * PETSC_PI * PetscSqr(ctx->epsilon0 * ctx->m_0 / PetscSqr(ctx->charges[0])) / ctx->lnLam / ctx->n_0 * PetscPowReal(ctx->v_0, 3); /* note, this t_0 makes nu[0,0]=1 */
1370   /* domain */
1371   nt       = LANDAU_MAX_GRIDS;
1372   PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius", "Phase space size in units of thermal velocity of grid", "plexland.c", ctx->radius, &nt, &flg));
1373   if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1374   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1375     if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */
1376       if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1377       else ctx->radius[grid] = -ctx->radius[grid];
1378       ctx->radius[grid] = ctx->radius[grid] * SPEED_OF_LIGHT / ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1379       PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n", (double)ctx->radius[grid], grid));
1380     }
1381     ctx->radius[grid] *= v0_grid[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0
1382   }
1383   /* amr parametres */
1384   nt = LANDAU_MAX_GRIDS;
1385   PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg));
1386   PetscCheck(!flg || nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1387   nt = LANDAU_MAX_GRIDS;
1388   PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg));
1389   for (ii = 1; ii < ctx->num_grids; ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1390   PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg));
1391   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine1", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg));
1392   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine2", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg));
1393   PetscCall(PetscOptionsReal("-dm_landau_re_radius", "velocity range to refine on positive (z>0) r=0 axis for runaways", "plexland.c", ctx->re_radius, &ctx->re_radius, &flg));
1394   PetscCall(PetscOptionsReal("-dm_landau_z_radius1", "velocity range to refine r=0 axis (for electrons)", "plexland.c", ctx->vperp0_radius1, &ctx->vperp0_radius1, &flg));
1395   PetscCall(PetscOptionsReal("-dm_landau_z_radius2", "velocity range to refine r=0 axis (for ions) after origin AMR", "plexland.c", ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg));
1396   /* spherical domain (not used) */
1397   PetscCall(PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL));
1398   PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg));
1399   PetscCall(PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg));
1400   PetscCall(PetscOptionsReal("-dm_landau_e_radius", "Electron thermal velocity, used for circular meshes", "plexland.c", ctx->e_radius, &ctx->e_radius, &flg));
1401   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1402   if (!flg) { ctx->e_radius = 1.5 * PetscSqrtReal(8 * ctx->k * ctx->thermal_temps[0] / ctx->masses[0] / PETSC_PI) / ctx->v_0; }
1403   nt = LANDAU_MAX_GRIDS;
1404   PetscCall(PetscOptionsRealArray("-dm_landau_i_radius", "Ion thermal velocity, used for circular meshes", "plexland.c", ctx->i_radius, &nt, &flg));
1405   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE;
1406   if (!flg) {
1407     ctx->i_radius[0] = 1.5 * PetscSqrtReal(8 * ctx->k * ctx->thermal_temps[1] / ctx->masses[1] / PETSC_PI) / ctx->v_0; // need to correct for ion grid domain
1408   }
1409   if (flg) PetscCheck(ctx->num_grids == nt, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_i_radius: %" PetscInt_FMT " != num_species = %" PetscInt_FMT, nt, ctx->num_grids);
1410   if (ctx->sphere) PetscCheck(ctx->e_radius > ctx->i_radius[0], ctx->comm, PETSC_ERR_ARG_WRONG, "bad radii: %g < %g < %g", (double)ctx->i_radius[0], (double)ctx->e_radius, (double)ctx->radius[0]);
1411   /* processing options */
1412   PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL));
1413   if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos
1414     PetscCall(PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL));
1415     if (ctx->coo_assembly) PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "COO assembly requires 'gpu assembly' even if Kokkos 'CPU' back-end %d", ctx->coo_assembly);
1416   }
1417   PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering (DEPRECATED)", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL));
1418   if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly");
1419   PetscCheck(!ctx->jacobian_field_major_order, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED");
1420   PetscOptionsEnd();
1421 
1422   for (ii = ctx->num_species; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0;
1423   if (ctx->verbose > 0) {
1424     PetscCall(PetscPrintf(ctx->comm, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n", (double)ctx->masses[0], (double)(ctx->masses[1] / 1.6720e-27), (double)(ctx->num_species > 2 ? ctx->masses[2] / 1.6720e-27 : 0)));
1425     PetscCall(PetscPrintf(ctx->comm, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0], (double)(-ctx->charges[1] / ctx->charges[0]), (double)(ctx->num_species > 2 ? -ctx->charges[2] / ctx->charges[0] : 0)));
1426     PetscCall(PetscPrintf(ctx->comm, "n:             e: %10.3e                           i: %10.3e %10.3e\n", (double)ctx->n[0], (double)ctx->n[1], (double)(ctx->num_species > 2 ? ctx->n[2] : 0)));
1427     PetscCall(PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %" PetscInt_FMT " batched\n", (double)ctx->thermal_temps[0], (double)ctx->thermal_temps[1],
1428                           (double)((ctx->num_species > 2) ? ctx->thermal_temps[2] : 0), (double)ctx->v_0, (double)(ctx->v_0 / SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",
1429                           ctx->batch_sz));
1430     PetscCall(PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %d: %10.3e (%" PetscInt_FMT ") ", 0, (double)ctx->radius[0], ctx->numAMRRefine[0]));
1431     for (ii = 1; ii < ctx->num_grids; ii++) PetscCall(PetscPrintf(ctx->comm, ", %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ", ii, (double)ctx->radius[ii], ctx->numAMRRefine[ii]));
1432     PetscCall(PetscPrintf(ctx->comm, "\n"));
1433     if (ctx->jacobian_field_major_order) {
1434       PetscCall(PetscPrintf(ctx->comm, "Using field major order for GPU Jacobian\n"));
1435     } else {
1436       PetscCall(PetscPrintf(ctx->comm, "Using default Plex order for all matrices\n"));
1437     }
1438   }
1439   PetscCall(DMDestroy(&dummy));
1440   {
1441     PetscMPIInt rank;
1442     PetscCallMPI(MPI_Comm_rank(ctx->comm, &rank));
1443     ctx->stage = 0;
1444     PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13]));   /* 13 */
1445     PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2]));  /* 2 */
1446     PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12]));   /* 12 */
1447     PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15]));  /* 15 */
1448     PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */
1449     PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */
1450     PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]));  /* 0 */
1451     PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9]));      /* 9 */
1452     PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10]));       /* 10 */
1453     PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7]));  /* 7 */
1454     PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1]));  /* 1 */
1455     PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3]));     /* 3 */
1456     PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8]));  /* 8 */
1457     PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4]));  /* 4 */
1458     PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */
1459     PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]));     /* 5 */
1460     PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6]));    /* 6 */
1461 
1462     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1463       PetscCall(PetscOptionsClearValue(NULL, "-snes_converged_reason"));
1464       PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason"));
1465       PetscCall(PetscOptionsClearValue(NULL, "-snes_monitor"));
1466       PetscCall(PetscOptionsClearValue(NULL, "-ksp_monitor"));
1467       PetscCall(PetscOptionsClearValue(NULL, "-ts_monitor"));
1468       PetscCall(PetscOptionsClearValue(NULL, "-ts_view"));
1469       PetscCall(PetscOptionsClearValue(NULL, "-ts_adapt_monitor"));
1470       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_dm_view"));
1471       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_vec_view"));
1472       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_dm_view"));
1473       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_view"));
1474       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_jacobian_view"));
1475       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mat_view"));
1476       PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_converged_reason"));
1477       PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_monitor"));
1478       PetscCall(PetscOptionsClearValue(NULL, "-"));
1479       PetscCall(PetscOptionsClearValue(NULL, "-info"));
1480     }
1481   }
1482   PetscFunctionReturn(0);
1483 }
1484 
1485 static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx) {
1486   PetscSection     section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
1487   PetscQuadrature  quad;
1488   const PetscReal *quadWeights;
1489   PetscInt         numCells[LANDAU_MAX_GRIDS], Nq, Nf[LANDAU_MAX_GRIDS], ncellsTot = 0, MAP_BF_SIZE = 64 * LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_Q_FACE * LANDAU_MAX_SPECIES;
1490   PetscTabulation *Tf;
1491   PetscDS          prob;
1492 
1493   PetscFunctionBegin;
1494   PetscCall(DMGetDS(ctx->plex[0], &prob));    // same DS for all grids
1495   PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
1496   /* DS, Tab and quad is same on all grids */
1497   PetscCheck(ctx->plex[0], ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
1498   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
1499   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights));
1500   PetscCheck(Nq <= LANDAU_MAX_NQ, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)", Nq, LANDAU_MAX_NQ);
1501   /* setup each grid */
1502   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1503     PetscInt cStart, cEnd;
1504     PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
1505     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1506     numCells[grid] = cEnd - cStart; // grids can have different topology
1507     PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
1508     PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
1509     PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
1510     ncellsTot += numCells[grid];
1511   }
1512   /* create GPU assembly data */
1513   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1514     PetscContainer container;
1515     PetscScalar   *elemMatrix, *elMat;
1516     pointInterpolationP4est(*pointMaps)[LANDAU_MAX_Q_FACE];
1517     P4estVertexMaps *maps;
1518     const PetscInt  *plex_batch = NULL, Nb = Nq, elMatSz = Nq * Nq * ctx->num_species * ctx->num_species; // tensor elements;
1519     LandauIdx       *coo_elem_offsets = NULL, *coo_elem_fullNb = NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ + 1] = NULL;
1520     /* create GPU asssembly data */
1521     PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n", 1));
1522     PetscCall(PetscLogEventBegin(ctx->events[2], 0, 0, 0, 0));
1523     PetscCall(PetscMalloc(sizeof(*maps) * ctx->num_grids, &maps));
1524     PetscCall(PetscMalloc(sizeof(*pointMaps) * MAP_BF_SIZE, &pointMaps));
1525     PetscCall(PetscMalloc(sizeof(*elemMatrix) * elMatSz, &elemMatrix));
1526 
1527     if (ctx->coo_assembly) {                                                                                                      // setup COO assembly -- put COO metadata directly in ctx->SData_d
1528       PetscCall(PetscMalloc3(ncellsTot + 1, &coo_elem_offsets, ncellsTot, &coo_elem_fullNb, ncellsTot, &coo_elem_point_offsets)); // array of integer pointers
1529       coo_elem_offsets[0] = 0;                                                                                                    // finish later
1530       PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n", ncellsTot));
1531       ctx->SData_d.coo_n_cellsTot         = ncellsTot;
1532       ctx->SData_d.coo_elem_offsets       = (void *)coo_elem_offsets;
1533       ctx->SData_d.coo_elem_fullNb        = (void *)coo_elem_fullNb;
1534       ctx->SData_d.coo_elem_point_offsets = (void *)coo_elem_point_offsets;
1535     } else {
1536       ctx->SData_d.coo_elem_offsets = ctx->SData_d.coo_elem_fullNb = NULL;
1537       ctx->SData_d.coo_elem_point_offsets                          = NULL;
1538       ctx->SData_d.coo_n_cellsTot                                  = 0;
1539     }
1540 
1541     ctx->SData_d.coo_max_fullnb = 0;
1542     for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1543       PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc * Nq;
1544       if (grid_batch_is_inv[grid]) { PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch)); }
1545       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1546       // make maps
1547       maps[grid].d_self       = NULL;
1548       maps[grid].num_elements = numCells[grid];
1549       maps[grid].num_face     = (PetscInt)(pow(Nq, 1. / ((double)dim)) + .001);                 // Q
1550       maps[grid].num_face     = (PetscInt)(pow(maps[grid].num_face, (double)(dim - 1)) + .001); // Q^2
1551       maps[grid].num_reduced  = 0;
1552       maps[grid].deviceType   = ctx->deviceType;
1553       maps[grid].numgrids     = ctx->num_grids;
1554       // count reduced and get
1555       PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx));
1556       for (int ej = cStart, eidx = 0; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1557         if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx + 1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1558         for (int fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1559           int fullNb = 0;
1560           for (int q = 0; q < Nb; ++q) {
1561             PetscInt     numindices, *indices;
1562             PetscScalar *valuesOrig = elMat = elemMatrix;
1563             PetscCall(PetscArrayzero(elMat, totDim * totDim));
1564             elMat[(fieldA * Nb + q) * totDim + fieldA * Nb + q] = 1;
1565             PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **)&elMat));
1566             for (PetscInt f = 0; f < numindices; ++f) { // look for a non-zero on the diagonal
1567               if (PetscAbs(PetscRealPart(elMat[f * numindices + f])) > PETSC_MACHINE_EPSILON) {
1568                 // found it
1569                 if (PetscAbs(PetscRealPart(elMat[f * numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1570                   if (plex_batch) {
1571                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)plex_batch[indices[f]];
1572                   } else {
1573                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f];
1574                   }
1575                   fullNb++;
1576                 } else { //found a constraint
1577                   int            jj                = 0;
1578                   PetscReal      sum               = 0;
1579                   const PetscInt ff                = f;
1580                   maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1
1581 
1582                   do {                                                                                              // constraints are continuous in Plex - exploit that here
1583                     int ii;                                                                                         // get 'scale'
1584                     for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1585                       if (ff + ii < numindices) {                                                                   // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1586                         pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f * numindices + ff + ii]);
1587                       }
1588                     }
1589                     sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1590                     // get 'gid'
1591                     if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1592                     else {
1593                       if (plex_batch) {
1594                         pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1595                       } else {
1596                         pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1597                       }
1598                       fullNb++;
1599                     }
1600                   } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1601                   while (jj < maps[grid].num_face) {
1602                     pointMaps[maps[grid].num_reduced][jj].scale = 0;
1603                     pointMaps[maps[grid].num_reduced][jj].gid   = -1;
1604                     jj++;
1605                   }
1606                   if (PetscAbs(sum - 1.0) > 10 * PETSC_MACHINE_EPSILON) { // debug
1607                     int       d, f;
1608                     PetscReal tmp = 0;
1609                     PetscCall(PetscPrintf(PETSC_COMM_SELF, "\t\t%d.%d.%d) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%d)\n", eidx, q, fieldA, (double)sum, LANDAU_MAX_Q_FACE, maps[grid].num_face));
1610                     for (d = 0, tmp = 0; d < numindices; ++d) {
1611                       if (tmp != 0 && PetscAbs(tmp - 1.0) > 10 * PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3d) %3" PetscInt_FMT ": ", d, indices[d]));
1612                       for (f = 0; f < numindices; ++f) { tmp += PetscRealPart(elMat[d * numindices + f]); }
1613                       if (tmp != 0) PetscCall(PetscPrintf(ctx->comm, " | %22.16e\n", (double)tmp));
1614                     }
1615                   }
1616                   maps[grid].num_reduced++;
1617                   PetscCheck(maps[grid].num_reduced < MAP_BF_SIZE, PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %" PetscInt_FMT, maps[grid].num_reduced, MAP_BF_SIZE);
1618                 }
1619                 break;
1620               }
1621             }
1622             // cleanup
1623             PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **)&elMat));
1624             if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices * numindices, MPIU_SCALAR, &elMat));
1625           }
1626           if (ctx->coo_assembly) {                                 // setup COO assembly
1627             coo_elem_offsets[glb_elem_idx + 1] += fullNb * fullNb; // one species block, adds a block for each species, on this element in this grid
1628             if (fieldA == 0) {                                     // cache full Nb for this element, on this grid per species
1629               coo_elem_fullNb[glb_elem_idx] = fullNb;
1630               if (fullNb > ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1631             } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %d %d", coo_elem_fullNb[glb_elem_idx], fullNb);
1632           }
1633         } // field
1634       }   // cell
1635       // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1636       PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps));
1637       for (int ej = 0; ej < maps[grid].num_reduced; ++ej) {
1638         for (int q = 0; q < maps[grid].num_face; ++q) {
1639           maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1640           maps[grid].c_maps[ej][q].gid   = pointMaps[ej][q].gid;
1641         }
1642       }
1643 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1644       if (ctx->deviceType == LANDAU_KOKKOS) {
1645         PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid)); // imples Kokkos does
1646       }                                                                      // else could be CUDA
1647 #endif
1648 #if defined(PETSC_HAVE_CUDA)
1649       if (ctx->deviceType == LANDAU_CUDA) { PetscCall(LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid)); }
1650 #endif
1651       if (plex_batch) {
1652         PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch));
1653         PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this
1654       }
1655     } /* grids */
1656     // finish COO
1657     if (ctx->coo_assembly) { // setup COO assembly
1658       PetscInt *oor, *ooc;
1659       ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot] * ctx->batch_sz;
1660       PetscCall(PetscMalloc2(ctx->SData_d.coo_size, &oor, ctx->SData_d.coo_size, &ooc));
1661       for (int i = 0; i < ctx->SData_d.coo_size; i++) oor[i] = ooc[i] = -1;
1662       // get
1663       for (int grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1664         for (int ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1665           const int              fullNb           = coo_elem_fullNb[glb_elem_idx];
1666           const LandauIdx *const Idxs             = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1667           coo_elem_point_offsets[glb_elem_idx][0] = 0;
1668           for (int f = 0, cnt2 = 0; f < Nb; f++) {
1669             int idx                                     = Idxs[f];
1670             coo_elem_point_offsets[glb_elem_idx][f + 1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1671             if (idx >= 0) {
1672               cnt2++;
1673               coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1674             } else {
1675               idx = -idx - 1;
1676               for (int q = 0; q < maps[grid].num_face; q++) {
1677                 if (maps[grid].c_maps[idx][q].gid < 0) break;
1678                 cnt2++;
1679                 coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1680               }
1681             }
1682             PetscCheck(cnt2 <= fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %d < %d", fullNb, cnt2);
1683           }
1684           PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %d != fullNb=%d", coo_elem_point_offsets[glb_elem_idx][Nb], fullNb);
1685         }
1686       }
1687       // set
1688       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1689         for (int grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1690           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1691           for (int ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1692             const int fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
1693             // set (i,j)
1694             for (int fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1695               const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1696               int                    rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
1697               for (int f = 0; f < Nb; ++f) {
1698                 const int nr = coo_elem_point_offsets[glb_elem_idx][f + 1] - coo_elem_point_offsets[glb_elem_idx][f];
1699                 if (nr == 1) rows[0] = Idxs[f];
1700                 else {
1701                   const int idx = -Idxs[f] - 1;
1702                   for (int q = 0; q < nr; q++) { rows[q] = maps[grid].c_maps[idx][q].gid; }
1703                 }
1704                 for (int g = 0; g < Nb; ++g) {
1705                   const int nc = coo_elem_point_offsets[glb_elem_idx][g + 1] - coo_elem_point_offsets[glb_elem_idx][g];
1706                   if (nc == 1) cols[0] = Idxs[g];
1707                   else {
1708                     const int idx = -Idxs[g] - 1;
1709                     for (int q = 0; q < nc; q++) { cols[q] = maps[grid].c_maps[idx][q].gid; }
1710                   }
1711                   const int idx0 = b_id * coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1712                   for (int q = 0, idx = idx0; q < nr; q++) {
1713                     for (int d = 0; d < nc; d++, idx++) {
1714                       oor[idx] = rows[q] + moffset;
1715                       ooc[idx] = cols[d] + moffset;
1716                     }
1717                   }
1718                 }
1719               }
1720             }
1721           } // cell
1722         }   // grid
1723       }     // batch
1724       PetscCall(MatSetPreallocationCOO(ctx->J, ctx->SData_d.coo_size, oor, ooc));
1725       PetscCall(PetscFree2(oor, ooc));
1726     }
1727     PetscCall(PetscFree(pointMaps));
1728     PetscCall(PetscFree(elemMatrix));
1729     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
1730     PetscCall(PetscContainerSetPointer(container, (void *)maps));
1731     PetscCall(PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy));
1732     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "assembly_maps", (PetscObject)container));
1733     PetscCall(PetscContainerDestroy(&container));
1734     PetscCall(PetscLogEventEnd(ctx->events[2], 0, 0, 0, 0));
1735   } // end GPU assembly
1736   { /* create static point data, Jacobian called first, only one vertex copy */
1737     PetscReal     *invJe, *ww, *xx, *yy, *zz = NULL, *invJ_a;
1738     PetscInt       outer_ipidx, outer_ej, grid, nip_glb = 0;
1739     PetscFE        fe;
1740     const PetscInt Nb = Nq;
1741     PetscCall(PetscLogEventBegin(ctx->events[7], 0, 0, 0, 0));
1742     PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n"));
1743     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) nip_glb += Nq * numCells[grid];
1744     /* collect f data, first time is for Jacobian, but make mass now */
1745     if (ctx->verbose > 0) {
1746       PetscInt ncells = 0, N;
1747       PetscCall(MatGetSize(ctx->J, &N, NULL));
1748       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ncells += numCells[grid];
1749       PetscCall(PetscPrintf(ctx->comm, "%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n", 0, "FormLandau", nip_glb, ncells, Nb, Nq, dim, Nb,
1750                             ctx->num_species, Nb, dim, N));
1751     }
1752     PetscCall(PetscMalloc4(nip_glb, &ww, nip_glb, &xx, nip_glb, &yy, nip_glb * dim * dim, &invJ_a));
1753     if (dim == 3) { PetscCall(PetscMalloc1(nip_glb, &zz)); }
1754     if (ctx->use_energy_tensor_trick) {
1755       PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe));
1756       PetscCall(PetscObjectSetName((PetscObject)fe, "energy"));
1757     }
1758     /* init each grids static data - no batch */
1759     for (grid = 0, outer_ipidx = 0, outer_ej = 0; grid < ctx->num_grids; grid++) { // OpenMP (once)
1760       Vec          v2_2 = NULL;                                                    // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1761       PetscSection e_section;
1762       DM           dmEnergy;
1763       PetscInt     cStart, cEnd, ej;
1764 
1765       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1766       // prep energy trick, get v^2 / 2 vector
1767       if (ctx->use_energy_tensor_trick) {
1768         PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1769         Vec        glob_v2;
1770         PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};
1771 
1772         PetscCall(DMClone(ctx->plex[grid], &dmEnergy));
1773         PetscCall(PetscObjectSetName((PetscObject)dmEnergy, "energy"));
1774         PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe));
1775         PetscCall(DMCreateDS(dmEnergy));
1776         PetscCall(DMGetSection(dmEnergy, &e_section));
1777         PetscCall(DMGetGlobalVector(dmEnergy, &glob_v2));
1778         PetscCall(PetscObjectSetName((PetscObject)glob_v2, "trick"));
1779         c2_0[0] = &data[0];
1780         PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void **)c2_0, INSERT_ALL_VALUES, glob_v2));
1781         PetscCall(DMGetLocalVector(dmEnergy, &v2_2));
1782         PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */
1783         PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1784         PetscCall(DMGlobalToLocalEnd(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1785         PetscCall(DMViewFromOptions(dmEnergy, NULL, "-energy_dm_view"));
1786         PetscCall(VecViewFromOptions(glob_v2, NULL, "-energy_vec_view"));
1787         PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2));
1788       }
1789       /* append part of the IP data for each grid */
1790       for (ej = 0; ej < numCells[grid]; ++ej, ++outer_ej) {
1791         PetscScalar *coefs = NULL;
1792         PetscReal    vj[LANDAU_MAX_NQ * LANDAU_DIM], detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ * LANDAU_DIM * LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1793         invJe = invJ_a + outer_ej * Nq * dim * dim;
1794         PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej + cStart, quad, vj, Jdummy, invJe, detJj));
1795         if (ctx->use_energy_tensor_trick) { PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs)); }
1796         /* create static point data */
1797         for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1798           const PetscInt   gidx = outer_ipidx;
1799           const PetscReal *invJ = &invJe[qj * dim * dim];
1800           ww[gidx]              = detJj[qj] * quadWeights[qj];
1801           if (dim == 2) ww[gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */
1802           // get xx, yy, zz
1803           if (ctx->use_energy_tensor_trick) {
1804             double                 refSpaceDer[3], eGradPhi[3];
1805             const PetscReal *const DD = Tf[0]->T[1];
1806             const PetscReal       *Dq = &DD[qj * Nb * dim];
1807             for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1808             for (int b = 0; b < Nb; ++b) {
1809               for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b * dim + d] * PetscRealPart(coefs[b]);
1810             }
1811             xx[gidx] = 1e10;
1812             if (ctx->use_relativistic_corrections) {
1813               double dg2_c2 = 0;
1814               //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1815               for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1816               dg2_c2 *= (double)c02;
1817               if (dg2_c2 >= .999) {
1818                 xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1819                 yy[gidx] = vj[qj * dim + 1];
1820                 if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1821                 PetscCall(PetscPrintf(ctx->comm, "Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n", (double)PetscSqrtReal(xx[gidx] * xx[gidx] + yy[gidx] * yy[gidx] + zz[gidx] * zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx]));
1822               } else {
1823                 PetscReal fact = c02 / PetscSqrtReal(1. - dg2_c2);
1824                 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1825                 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1826               }
1827             }
1828             if (xx[gidx] == 1e10) {
1829               for (int d = 0; d < dim; ++d) {
1830                 for (int e = 0; e < dim; ++e) { eGradPhi[d] += invJ[e * dim + d] * refSpaceDer[e]; }
1831               }
1832               xx[gidx] = eGradPhi[0];
1833               yy[gidx] = eGradPhi[1];
1834               if (dim == 3) zz[gidx] = eGradPhi[2];
1835             }
1836           } else {
1837             xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1838             yy[gidx] = vj[qj * dim + 1];
1839             if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1840           }
1841         } /* q */
1842         if (ctx->use_energy_tensor_trick) { PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs)); }
1843       } /* ej */
1844       if (ctx->use_energy_tensor_trick) {
1845         PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2));
1846         PetscCall(DMDestroy(&dmEnergy));
1847       }
1848     } /* grid */
1849     if (ctx->use_energy_tensor_trick) { PetscCall(PetscFEDestroy(&fe)); }
1850     /* cache static data */
1851     if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
1852 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS)
1853       PetscReal invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1854       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1855         for (PetscInt ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++) {
1856           invMass[ii]  = ctx->m_0 / ctx->masses[ii];
1857           nu_alpha[ii] = PetscSqr(ctx->charges[ii] / ctx->m_0) * ctx->m_0 / ctx->masses[ii];
1858           nu_beta[ii]  = PetscSqr(ctx->charges[ii] / ctx->epsilon0) * ctx->lnLam / (8 * PETSC_PI) * ctx->t_0 * ctx->n_0 / PetscPowReal(ctx->v_0, 3);
1859         }
1860       }
1861       if (ctx->deviceType == LANDAU_CUDA) {
1862 #if defined(PETSC_HAVE_CUDA)
1863         PetscCall(LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1864 #else
1865         SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type cuda not built");
1866 #endif
1867       } else if (ctx->deviceType == LANDAU_KOKKOS) {
1868 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1869         PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1870 #else
1871         SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type kokkos not built");
1872 #endif
1873       }
1874 #endif
1875       /* free */
1876       PetscCall(PetscFree4(ww, xx, yy, invJ_a));
1877       if (dim == 3) PetscCall(PetscFree(zz));
1878     } else { /* CPU version, just copy in, only use part */
1879       ctx->SData_d.w    = (void *)ww;
1880       ctx->SData_d.x    = (void *)xx;
1881       ctx->SData_d.y    = (void *)yy;
1882       ctx->SData_d.z    = (void *)zz;
1883       ctx->SData_d.invJ = (void *)invJ_a;
1884     }
1885     PetscCall(PetscLogEventEnd(ctx->events[7], 0, 0, 0, 0));
1886   } // initialize
1887   PetscFunctionReturn(0);
1888 }
1889 
1890 /* < v, u > */
1891 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) {
1892   g0[0] = 1.;
1893 }
1894 
1895 /* < v, u > */
1896 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) {
1897   static double ttt = 1e-12;
1898   g0[0]             = ttt++;
1899 }
1900 
1901 /* < v, u > */
1902 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) {
1903   g0[0] = 2. * PETSC_PI * x[0];
1904 }
1905 
1906 static PetscErrorCode MatrixNfDestroy(void *ptr) {
1907   PetscInt *nf = (PetscInt *)ptr;
1908   PetscFunctionBegin;
1909   PetscCall(PetscFree(nf));
1910   PetscFunctionReturn(0);
1911 }
1912 
1913 /*
1914  LandauCreateJacobianMatrix - creates ctx->J with without real data. Hard to keep sparse.
1915   - Like DMPlexLandauCreateMassMatrix. Should remove one and combine
1916   - has old support for field major ordering
1917  */
1918 static PetscErrorCode LandauCreateJacobianMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx) {
1919   PetscInt *idxs = NULL;
1920   Mat       subM[LANDAU_MAX_GRIDS];
1921 
1922   PetscFunctionBegin;
1923   if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1924     PetscFunctionReturn(0);
1925   }
1926   // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is'
1927   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, &idxs)); }
1928   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1929     const PetscInt *values, n = ctx->mat_offset[grid + 1] - ctx->mat_offset[grid];
1930     Mat             gMat;
1931     DM              massDM;
1932     PetscDS         prob;
1933     Vec             tvec;
1934     // get "mass" matrix for reordering
1935     PetscCall(DMClone(ctx->plex[grid], &massDM));
1936     PetscCall(DMCopyFields(ctx->plex[grid], massDM));
1937     PetscCall(DMCreateDS(massDM));
1938     PetscCall(DMGetDS(massDM, &prob));
1939     for (int ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) { PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL)); }
1940     PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); // this trick is need to both sparsify the matrix and avoid runtime error
1941     PetscCall(DMCreateMatrix(massDM, &gMat));
1942     PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
1943     PetscCall(MatSetOption(gMat, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
1944     PetscCall(MatSetOption(gMat, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
1945     PetscCall(DMCreateLocalVector(ctx->plex[grid], &tvec));
1946     PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx));
1947     PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view"));
1948     PetscCall(DMDestroy(&massDM));
1949     PetscCall(VecDestroy(&tvec));
1950     subM[grid] = gMat;
1951     if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1952       MatOrderingType rtype = MATORDERINGRCM;
1953       IS              isrow, isicol;
1954       PetscCall(MatGetOrdering(gMat, rtype, &isrow, &isicol));
1955       PetscCall(ISInvertPermutation(isrow, PETSC_DECIDE, &grid_batch_is_inv[grid]));
1956       PetscCall(ISGetIndices(isrow, &values));
1957       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
1958 #if !defined(LANDAU_SPECIES_MAJOR)
1959         PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id * N;
1960         for (int ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1961 #else
1962         PetscInt n0 = ctx->mat_offset[grid] * ctx->batch_sz + b_id * n;
1963         for (int ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1964 #endif
1965       }
1966       PetscCall(ISRestoreIndices(isrow, &values));
1967       PetscCall(ISDestroy(&isrow));
1968       PetscCall(ISDestroy(&isicol));
1969     }
1970   }
1971   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { PetscCall(ISCreateGeneral(comm, ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, idxs, PETSC_OWN_POINTER, &ctx->batch_is)); }
1972   // get a block matrix
1973   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1974     Mat      B = subM[grid];
1975     PetscInt nloc, nzl, *colbuf, row, COL_BF_SIZE = 1024;
1976     PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
1977     PetscCall(MatGetSize(B, &nloc, NULL));
1978     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1979       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1980       const PetscInt    *cols;
1981       const PetscScalar *vals;
1982       for (int i = 0; i < nloc; i++) {
1983         PetscCall(MatGetRow(B, i, &nzl, NULL, NULL));
1984         if (nzl > COL_BF_SIZE) {
1985           PetscCall(PetscFree(colbuf));
1986           PetscCall(PetscInfo(ctx->plex[grid], "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl));
1987           COL_BF_SIZE = nzl;
1988           PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
1989         }
1990         PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
1991         for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
1992         row = i + moffset;
1993         PetscCall(MatSetValues(ctx->J, 1, &row, nzl, colbuf, vals, INSERT_VALUES));
1994         PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
1995       }
1996     }
1997     PetscCall(PetscFree(colbuf));
1998   }
1999   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { PetscCall(MatDestroy(&subM[grid])); }
2000   PetscCall(MatAssemblyBegin(ctx->J, MAT_FINAL_ASSEMBLY));
2001   PetscCall(MatAssemblyEnd(ctx->J, MAT_FINAL_ASSEMBLY));
2002 
2003   // debug
2004   PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view"));
2005   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2006     Mat mat_block_order;
2007     PetscCall(MatCreateSubMatrix(ctx->J, ctx->batch_is, ctx->batch_is, MAT_INITIAL_MATRIX, &mat_block_order)); // use MatPermute
2008     PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_mat_view"));
2009     PetscCall(MatDestroy(&mat_block_order));
2010     PetscCall(VecScatterCreate(X, ctx->batch_is, X, NULL, &ctx->plex_batch));
2011     PetscCall(VecDuplicate(X, &ctx->work_vec));
2012   }
2013 
2014   PetscFunctionReturn(0);
2015 }
2016 
2017 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
2018 /*@C
2019  DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh
2020 
2021  Collective on comm
2022 
2023  Input Parameters:
2024  +   comm  - The MPI communicator
2025  .   dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
2026  -   prefix - prefix for options (not tested)
2027 
2028  Output Parameter:
2029  .   pack  - The DM object representing the mesh
2030  +   X - A vector (user destroys)
2031  -   J - Optional matrix (object destroys)
2032 
2033  Level: beginner
2034 
2035  .keywords: mesh
2036  .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()`
2037  @*/
2038 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack) {
2039   LandauCtx *ctx;
2040   Vec        Xsub[LANDAU_MAX_GRIDS];
2041   IS         grid_batch_is_inv[LANDAU_MAX_GRIDS];
2042 
2043   PetscFunctionBegin;
2044   PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
2045   PetscCheck(LANDAU_DIM == dim, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d", dim, LANDAU_DIM);
2046   PetscCall(PetscNew(&ctx));
2047   ctx->comm = comm; /* used for diagnostics and global errors */
2048   /* process options */
2049   PetscCall(ProcessOptions(ctx, prefix));
2050   if (dim == 2) ctx->use_relativistic_corrections = PETSC_FALSE;
2051   /* Create Mesh */
2052   PetscCall(DMCompositeCreate(PETSC_COMM_SELF, pack));
2053   PetscCall(PetscLogEventBegin(ctx->events[13], 0, 0, 0, 0));
2054   PetscCall(PetscLogEventBegin(ctx->events[15], 0, 0, 0, 0));
2055   PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR)
2056   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2057     /* create FEM */
2058     PetscCall(SetupDS(ctx->plex[grid], dim, grid, ctx));
2059     /* set initial state */
2060     PetscCall(DMCreateGlobalVector(ctx->plex[grid], &Xsub[grid]));
2061     PetscCall(PetscObjectSetName((PetscObject)Xsub[grid], "u_orig"));
2062     /* initial static refinement, no solve */
2063     PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx));
2064     /* forest refinement - forest goes in (if forest), plex comes out */
2065     if (ctx->use_p4est) {
2066       DM plex;
2067       PetscCall(adapt(grid, ctx, &Xsub[grid]));                                      // forest goes in, plex comes out
2068       PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, "-dm_landau_amr_dm_view")); // need to differentiate - todo
2069       PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view"));
2070       // convert to plex, all done with this level
2071       PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex));
2072       PetscCall(DMDestroy(&ctx->plex[grid]));
2073       ctx->plex[grid] = plex;
2074     }
2075 #if !defined(LANDAU_SPECIES_MAJOR)
2076     PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
2077 #else
2078     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
2079       PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
2080     }
2081 #endif
2082     PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx));
2083   }
2084 #if !defined(LANDAU_SPECIES_MAJOR)
2085   // stack the batched DMs, could do it all here!!! b_id=0
2086   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
2087     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid])); }
2088   }
2089 #endif
2090   // create ctx->mat_offset
2091   ctx->mat_offset[0] = 0;
2092   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2093     PetscInt n;
2094     PetscCall(VecGetLocalSize(Xsub[grid], &n));
2095     ctx->mat_offset[grid + 1] = ctx->mat_offset[grid] + n;
2096   }
2097   // creat DM & Jac
2098   PetscCall(DMSetApplicationContext(*pack, ctx));
2099   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only"));
2100   PetscCall(DMCreateMatrix(*pack, &ctx->J));
2101   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
2102   PetscCall(MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2103   PetscCall(MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2104   PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac"));
2105   // construct initial conditions in X
2106   PetscCall(DMCreateGlobalVector(*pack, X));
2107   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2108     PetscInt n;
2109     PetscCall(VecGetLocalSize(Xsub[grid], &n));
2110     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2111       PetscScalar const *values;
2112       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2113       PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx->batch_sz, ctx));
2114       PetscCall(VecGetArrayRead(Xsub[grid], &values));
2115       for (int i = 0, idx = moffset; i < n; i++, idx++) { PetscCall(VecSetValue(*X, idx, values[i], INSERT_VALUES)); }
2116       PetscCall(VecRestoreArrayRead(Xsub[grid], &values));
2117     }
2118   }
2119   // cleanup
2120   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { PetscCall(VecDestroy(&Xsub[grid])); }
2121   /* check for correct matrix type */
2122   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2123     PetscBool flg;
2124     if (ctx->deviceType == LANDAU_CUDA) {
2125       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJCUSPARSE, MATMPIAIJCUSPARSE, MATAIJCUSPARSE, ""));
2126       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda or use '-dm_landau_device_type cpu'");
2127     } else if (ctx->deviceType == LANDAU_KOKKOS) {
2128       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, MATAIJKOKKOS, ""));
2129 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2130       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2131 #else
2132       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2133 #endif
2134     }
2135   }
2136   PetscCall(PetscLogEventEnd(ctx->events[15], 0, 0, 0, 0));
2137   // create field major ordering
2138 
2139   ctx->work_vec   = NULL;
2140   ctx->plex_batch = NULL;
2141   ctx->batch_is   = NULL;
2142   for (int i = 0; i < LANDAU_MAX_GRIDS; i++) grid_batch_is_inv[i] = NULL;
2143   PetscCall(PetscLogEventBegin(ctx->events[12], 0, 0, 0, 0));
2144   PetscCall(LandauCreateJacobianMatrix(comm, *X, grid_batch_is_inv, ctx));
2145   PetscCall(PetscLogEventEnd(ctx->events[12], 0, 0, 0, 0));
2146 
2147   // create AMR GPU assembly maps and static GPU data
2148   PetscCall(CreateStaticGPUData(dim, grid_batch_is_inv, ctx));
2149 
2150   PetscCall(PetscLogEventEnd(ctx->events[13], 0, 0, 0, 0));
2151 
2152   // create mass matrix
2153   PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL));
2154 
2155   if (J) *J = ctx->J;
2156 
2157   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2158     PetscContainer container;
2159     // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2160     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2161     PetscCall(PetscContainerSetPointer(container, (void *)ctx));
2162     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "LandauCtx", (PetscObject)container));
2163     PetscCall(PetscContainerDestroy(&container));
2164     // batch solvers need to map -- can batch solvers work
2165     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2166     PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch));
2167     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "plex_batch_is", (PetscObject)container));
2168     PetscCall(PetscContainerDestroy(&container));
2169   }
2170   // for batch solvers
2171   {
2172     PetscContainer container;
2173     PetscInt      *pNf;
2174     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2175     PetscCall(PetscMalloc1(sizeof(*pNf), &pNf));
2176     *pNf = ctx->batch_sz;
2177     PetscCall(PetscContainerSetPointer(container, (void *)pNf));
2178     PetscCall(PetscContainerSetUserDestroy(container, MatrixNfDestroy));
2179     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject)container));
2180     PetscCall(PetscContainerDestroy(&container));
2181   }
2182 
2183   PetscFunctionReturn(0);
2184 }
2185 
2186 /*@
2187  DMPlexLandauAddToFunction - Add to the distribution function with user callback
2188 
2189  Collective on dm
2190 
2191  Input Parameters:
2192  .   pack - the DMComposite
2193  +   func - call back function
2194  .   user_ctx - user context
2195 
2196  Input/Output Parameters:
2197  +   X - Vector to data to
2198 
2199  Level: advanced
2200 
2201  .keywords: mesh
2202  .seealso: `DMPlexLandauCreateVelocitySpace()`
2203  @*/
2204 PetscErrorCode DMPlexLandauAddToFunction(DM pack, Vec X, PetscErrorCode (*func)(DM, Vec, PetscInt, PetscInt, PetscInt, void *), void *user_ctx) {
2205   LandauCtx *ctx;
2206   PetscFunctionBegin;
2207   PetscCall(DMGetApplicationContext(pack, &ctx)); // uses ctx->num_grids; ctx->plex[grid]; ctx->batch_sz; ctx->mat_offset
2208   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2209     PetscInt dim, n, Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid];
2210     Vec      vec;
2211     DM       dm;
2212     PetscFE  fe;
2213     PetscCall(DMGetDimension(pack, &dim));
2214     PetscCall(DMClone(ctx->plex[grid], &dm));
2215     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe));
2216     PetscCall(PetscObjectSetName((PetscObject)fe, "species"));
2217     PetscCall(DMSetField(dm, 0, NULL, (PetscObject)fe));
2218     PetscCall(PetscFEDestroy(&fe));
2219     PetscCall(DMSetApplicationContext(dm, ctx)); // does the user want this?
2220     PetscCall(DMCreateGlobalVector(dm, &vec));
2221     PetscCall(VecGetSize(vec, &n));
2222     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2223       const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2224       for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) {
2225         PetscCall(VecZeroEntries(vec));
2226         /* Add your data with 'dm' for species 'sp' to 'vec' */
2227         PetscCall(func(dm, vec, i0, grid, b_id, user_ctx));
2228         /* add to global */
2229         PetscScalar const *values;
2230         PetscCall(VecGetArrayRead(vec, &values));
2231         for (int i = 0, idx = moffset + i0; i < n; i++, idx += Nf) { // works for Q1 & Q2 only -- TODO
2232           PetscCall(VecSetValue(X, idx, values[i], ADD_VALUES));
2233         }
2234         PetscCall(VecRestoreArrayRead(vec, &values));
2235       }
2236     } // batch
2237     PetscCall(VecDestroy(&vec));
2238     PetscCall(DMDestroy(&dm));
2239   } // grid
2240   PetscFunctionReturn(0);
2241 }
2242 
2243 /*@
2244  DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh
2245 
2246  Collective on dm
2247 
2248  Input/Output Parameters:
2249  .   dm - the dm to destroy
2250 
2251  Level: beginner
2252 
2253  .keywords: mesh
2254  .seealso: `DMPlexLandauCreateVelocitySpace()`
2255  @*/
2256 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm) {
2257   LandauCtx *ctx;
2258   PetscFunctionBegin;
2259   PetscCall(DMGetApplicationContext(*dm, &ctx));
2260   PetscCall(MatDestroy(&ctx->M));
2261   PetscCall(MatDestroy(&ctx->J));
2262   for (PetscInt ii = 0; ii < ctx->num_species; ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii]));
2263   PetscCall(ISDestroy(&ctx->batch_is));
2264   PetscCall(VecDestroy(&ctx->work_vec));
2265   PetscCall(VecScatterDestroy(&ctx->plex_batch));
2266   if (ctx->deviceType == LANDAU_CUDA) {
2267 #if defined(PETSC_HAVE_CUDA)
2268     PetscCall(LandauCUDAStaticDataClear(&ctx->SData_d));
2269 #else
2270     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "cuda");
2271 #endif
2272   } else if (ctx->deviceType == LANDAU_KOKKOS) {
2273 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2274     PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d));
2275 #else
2276     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
2277 #endif
2278   } else {
2279     if (ctx->SData_d.x) { /* in a CPU run */
2280       PetscReal *invJ = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
2281       LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ + 1] = (LandauIdx(*)[LANDAU_MAX_NQ + 1]) ctx->SData_d.coo_elem_point_offsets;
2282       PetscCall(PetscFree4(ww, xx, yy, invJ));
2283       if (zz) PetscCall(PetscFree(zz));
2284       if (coo_elem_offsets) {
2285         PetscCall(PetscFree3(coo_elem_offsets, coo_elem_fullNb, coo_elem_point_offsets)); // could be NULL
2286       }
2287     }
2288   }
2289 
2290   if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2291     PetscCall(PetscPrintf(ctx->comm, "TSStep               N  1.0 %10.3e\n", ctx->times[LANDAU_EX2_TSSOLVE]));
2292     PetscCall(PetscPrintf(ctx->comm, "2:           Solve:  %10.3e with %" PetscInt_FMT " threads\n", ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL], ctx->batch_sz));
2293     PetscCall(PetscPrintf(ctx->comm, "3:          Landau:  %10.3e\n", ctx->times[LANDAU_MATRIX_TOTAL]));
2294     PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian       %" PetscInt_FMT " 1.0 %10.3e\n", (PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT], ctx->times[LANDAU_JACOBIAN]));
2295     PetscCall(PetscPrintf(ctx->comm, "Landau Operator       N 1.0  %10.3e\n", ctx->times[LANDAU_OPERATOR]));
2296     PetscCall(PetscPrintf(ctx->comm, "Landau Mass           N 1.0  %10.3e\n", ctx->times[LANDAU_MASS]));
2297     PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU)       N 1.0  %10.3e\n", ctx->times[LANDAU_F_DF]));
2298     PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU)         N 1.0  %10.3e\n", ctx->times[LANDAU_KERNEL]));
2299     PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum        X 1.0 %10.3e\n", ctx->times[KSP_FACTOR]));
2300     PetscCall(PetscPrintf(ctx->comm, "MatSolve              X 1.0 %10.3e\n", ctx->times[KSP_SOLVE]));
2301   }
2302   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { PetscCall(DMDestroy(&ctx->plex[grid])); }
2303   PetscFree(ctx);
2304   PetscCall(DMDestroy(dm));
2305   PetscFunctionReturn(0);
2306 }
2307 
2308 /* < v, ru > */
2309 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) {
2310   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2311   f0[0]       = u[ii];
2312 }
2313 
2314 /* < v, ru > */
2315 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) {
2316   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2317   f0[0] = x[jj] * u[ii]; /* x momentum */
2318 }
2319 
2320 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) {
2321   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2322   double   tmp1 = 0.;
2323   for (i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2324   f0[0] = tmp1 * u[ii];
2325 }
2326 
2327 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx) {
2328   const PetscReal *c2_0_arr = ((PetscReal *)actx);
2329   const PetscReal  c02      = c2_0_arr[0];
2330 
2331   PetscFunctionBegin;
2332   for (int s = 0; s < Nf; s++) {
2333     PetscReal tmp1 = 0.;
2334     for (int i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2335 #if defined(PETSC_USE_DEBUG)
2336     u[s] = PetscSqrtReal(1. + tmp1 / c02); //  u[0] = PetscSqrtReal(1. + xx);
2337 #else
2338     {
2339       PetscReal xx = tmp1 / c02;
2340       u[s] = xx / (PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2341     }
2342 #endif
2343   }
2344   PetscFunctionReturn(0);
2345 }
2346 
2347 /* < v, ru > */
2348 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) {
2349   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2350   f0[0]       = 2. * PETSC_PI * x[0] * u[ii];
2351 }
2352 
2353 /* < v, ru > */
2354 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) {
2355   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2356   f0[0]       = 2. * PETSC_PI * x[0] * x[1] * u[ii];
2357 }
2358 
2359 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) {
2360   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2361   f0[0]       = 2. * PETSC_PI * x[0] * (x[0] * x[0] + x[1] * x[1]) * u[ii];
2362 }
2363 
2364 /*@
2365  DMPlexLandauPrintNorms - collects moments and prints them
2366 
2367  Collective on dm
2368 
2369  Input Parameters:
2370  +   X  - the state
2371  -   stepi - current step to print
2372 
2373  Level: beginner
2374 
2375  .keywords: mesh
2376  .seealso: `DMPlexLandauCreateVelocitySpace()`
2377  @*/
2378 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi) {
2379   LandauCtx  *ctx;
2380   PetscDS     prob;
2381   DM          pack;
2382   PetscInt    cStart, cEnd, dim, ii, i0, nDMs;
2383   PetscScalar xmomentumtot = 0, ymomentumtot = 0, zmomentumtot = 0, energytot = 0, densitytot = 0, tt[LANDAU_MAX_SPECIES];
2384   PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2385   Vec        *globXArray;
2386 
2387   PetscFunctionBegin;
2388   PetscCall(VecGetDM(X, &pack));
2389   PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM");
2390   PetscCall(DMGetDimension(pack, &dim));
2391   PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]", dim);
2392   PetscCall(DMGetApplicationContext(pack, &ctx));
2393   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2394   /* print momentum and energy */
2395   PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
2396   PetscCheck(nDMs == ctx->num_grids * ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT, nDMs, ctx->num_grids * ctx->batch_sz);
2397   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
2398   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
2399   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2400     Vec Xloc = globXArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2401     PetscCall(DMGetDS(ctx->plex[grid], &prob));
2402     for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
2403       PetscScalar user[2] = {(PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
2404       PetscCall(PetscDSSetConstants(prob, 2, user));
2405       if (dim == 2) { /* 2/3X + 3V (cylindrical coordinates) */
2406         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden));
2407         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2408         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2409         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom));
2410         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2411         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2412         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2));
2413         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2414         energy[ii] = tt[0] * 0.5 * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2415         zmomentumtot += zmomentum[ii];
2416         energytot += energy[ii];
2417         densitytot += density[ii];
2418         PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii])));
2419       } else { /* 2/3Xloc + 3V */
2420         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den));
2421         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2422         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2423         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom));
2424         user[1] = 0;
2425         PetscCall(PetscDSSetConstants(prob, 2, user));
2426         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2427         xmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2428         user[1]       = 1;
2429         PetscCall(PetscDSSetConstants(prob, 2, user));
2430         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2431         ymomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2432         user[1]       = 2;
2433         PetscCall(PetscDSSetConstants(prob, 2, user));
2434         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2435         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2436         if (ctx->use_relativistic_corrections) {
2437           /* gamma * M * f */
2438           if (ii == 0 && grid == 0) { // do all at once
2439             Vec Mf, globGamma, *globMfArray, *globGammaArray;
2440             PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {gamma_n_f};
2441             PetscReal *c2_0[1], data[1];
2442 
2443             PetscCall(VecDuplicate(X, &globGamma));
2444             PetscCall(VecDuplicate(X, &Mf));
2445             PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globMfArray));
2446             PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globGammaArray));
2447             /* M * f */
2448             PetscCall(MatMult(ctx->M, X, Mf));
2449             /* gamma */
2450             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2451             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2452               Vec v1  = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2453               data[0] = PetscSqr(C_0(ctx->v_0));
2454               c2_0[0] = &data[0];
2455               PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void **)c2_0, INSERT_ALL_VALUES, v1));
2456             }
2457             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2458             /* gamma * Mf */
2459             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2460             PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2461             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice
2462               PetscInt Nf    = ctx->species_offset[grid + 1] - ctx->species_offset[grid], N, bs;
2463               Vec      Mfsub = globMfArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], Gsub = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], v1, v2;
2464               // get each component
2465               PetscCall(VecGetSize(Mfsub, &N));
2466               PetscCall(VecCreate(ctx->comm, &v1));
2467               PetscCall(VecSetSizes(v1, PETSC_DECIDE, N / Nf));
2468               PetscCall(VecCreate(ctx->comm, &v2));
2469               PetscCall(VecSetSizes(v2, PETSC_DECIDE, N / Nf));
2470               PetscCall(VecSetFromOptions(v1)); // ???
2471               PetscCall(VecSetFromOptions(v2));
2472               // get each component
2473               PetscCall(VecGetBlockSize(Gsub, &bs));
2474               PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub", bs, Nf);
2475               PetscCall(VecGetBlockSize(Mfsub, &bs));
2476               PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT, bs, Nf);
2477               for (int i = 0, ix = ctx->species_offset[grid]; i < Nf; i++, ix++) {
2478                 PetscScalar val;
2479                 PetscCall(VecStrideGather(Gsub, i, v1, INSERT_VALUES)); // this is not right -- TODO
2480                 PetscCall(VecStrideGather(Mfsub, i, v2, INSERT_VALUES));
2481                 PetscCall(VecDot(v1, v2, &val));
2482                 energy[ix] = PetscRealPart(val) * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ix];
2483               }
2484               PetscCall(VecDestroy(&v1));
2485               PetscCall(VecDestroy(&v2));
2486             } /* grids */
2487             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2488             PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2489             PetscCall(PetscFree(globGammaArray));
2490             PetscCall(PetscFree(globMfArray));
2491             PetscCall(VecDestroy(&globGamma));
2492             PetscCall(VecDestroy(&Mf));
2493           }
2494         } else {
2495           PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2));
2496           PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2497           energy[ii] = 0.5 * tt[0] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2498         }
2499         PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(xmomentum[ii]), (double)PetscRealPart(ymomentum[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii])));
2500         xmomentumtot += xmomentum[ii];
2501         ymomentumtot += ymomentum[ii];
2502         zmomentumtot += zmomentum[ii];
2503         energytot += energy[ii];
2504         densitytot += density[ii];
2505       }
2506       if (ctx->num_species > 1) PetscPrintf(ctx->comm, "\n");
2507     }
2508   }
2509   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
2510   PetscCall(PetscFree(globXArray));
2511   /* totals */
2512   PetscCall(DMPlexGetHeightStratum(ctx->plex[0], 0, &cStart, &cEnd));
2513   if (ctx->num_species > 1) {
2514     if (dim == 2) {
2515       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2516                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2517     } else {
2518       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(xmomentumtot), (double)PetscRealPart(ymomentumtot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2519                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2520     }
2521   } else PetscCall(PetscPrintf(ctx->comm, " -- %" PetscInt_FMT " cells", cEnd - cStart));
2522   PetscCall(PetscPrintf(ctx->comm, "\n"));
2523   PetscFunctionReturn(0);
2524 }
2525 
2526 /*@
2527  DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2528   - puts mass matrix into ctx->M
2529 
2530  Collective on pack
2531 
2532  Input/Output Parameters:
2533 . pack     - the DM object. Puts matrix in Landau context M field
2534 
2535  Output Parameters:
2536 . Amat - The mass matrix (optional), mass matrix is added to the DM context
2537 
2538  Level: beginner
2539 
2540  .keywords: mesh
2541  .seealso: `DMPlexLandauCreateVelocitySpace()`
2542  @*/
2543 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat) {
2544   DM         mass_pack, massDM[LANDAU_MAX_GRIDS];
2545   PetscDS    prob;
2546   PetscInt   ii, dim, N1 = 1, N2;
2547   LandauCtx *ctx;
2548   Mat        packM, subM[LANDAU_MAX_GRIDS];
2549 
2550   PetscFunctionBegin;
2551   PetscValidHeaderSpecific(pack, DM_CLASSID, 1);
2552   if (Amat) PetscValidPointer(Amat, 2);
2553   PetscCall(DMGetApplicationContext(pack, &ctx));
2554   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2555   PetscCall(PetscLogEventBegin(ctx->events[14], 0, 0, 0, 0));
2556   PetscCall(DMGetDimension(pack, &dim));
2557   PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject)pack), &mass_pack));
2558   /* create pack mass matrix */
2559   for (PetscInt grid = 0, ix = 0; grid < ctx->num_grids; grid++) {
2560     PetscCall(DMClone(ctx->plex[grid], &massDM[grid]));
2561     PetscCall(DMCopyFields(ctx->plex[grid], massDM[grid]));
2562     PetscCall(DMCreateDS(massDM[grid]));
2563     PetscCall(DMGetDS(massDM[grid], &prob));
2564     for (ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) {
2565       if (dim == 3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL));
2566       else PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL));
2567     }
2568 #if !defined(LANDAU_SPECIES_MAJOR)
2569     PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2570 #else
2571     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
2572       PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2573     }
2574 #endif
2575     PetscCall(DMCreateMatrix(massDM[grid], &subM[grid]));
2576   }
2577 #if !defined(LANDAU_SPECIES_MAJOR)
2578   // stack the batched DMs
2579   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
2580     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { PetscCall(DMCompositeAddDM(mass_pack, massDM[grid])); }
2581   }
2582 #endif
2583   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only"));
2584   PetscCall(DMCreateMatrix(mass_pack, &packM));
2585   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
2586   PetscCall(MatSetOption(packM, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2587   PetscCall(MatSetOption(packM, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2588   PetscCall(DMDestroy(&mass_pack));
2589   /* make mass matrix for each block */
2590   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2591     Vec locX;
2592     DM  plex = massDM[grid];
2593     PetscCall(DMGetLocalVector(plex, &locX));
2594     /* Mass matrix is independent of the input, so no need to fill locX */
2595     PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx));
2596     PetscCall(DMRestoreLocalVector(plex, &locX));
2597     PetscCall(DMDestroy(&massDM[grid]));
2598   }
2599   PetscCall(MatGetSize(ctx->J, &N1, NULL));
2600   PetscCall(MatGetSize(packM, &N2, NULL));
2601   PetscCheck(N1 == N2, PetscObjectComm((PetscObject)pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT, N1, N2);
2602   /* assemble block diagonals */
2603   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2604     Mat      B = subM[grid];
2605     PetscInt nloc, nzl, *colbuf, COL_BF_SIZE = 1024, row;
2606     PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
2607     PetscCall(MatGetSize(B, &nloc, NULL));
2608     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2609       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2610       const PetscInt    *cols;
2611       const PetscScalar *vals;
2612       for (int i = 0; i < nloc; i++) {
2613         PetscCall(MatGetRow(B, i, &nzl, NULL, NULL));
2614         if (nzl > COL_BF_SIZE) {
2615           PetscCall(PetscFree(colbuf));
2616           PetscCall(PetscInfo(pack, "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl));
2617           COL_BF_SIZE = nzl;
2618           PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
2619         }
2620         PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
2621         for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
2622         row = i + moffset;
2623         PetscCall(MatSetValues(packM, 1, &row, nzl, colbuf, vals, INSERT_VALUES));
2624         PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
2625       }
2626     }
2627     PetscCall(PetscFree(colbuf));
2628   }
2629   // cleanup
2630   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { PetscCall(MatDestroy(&subM[grid])); }
2631   PetscCall(MatAssemblyBegin(packM, MAT_FINAL_ASSEMBLY));
2632   PetscCall(MatAssemblyEnd(packM, MAT_FINAL_ASSEMBLY));
2633   PetscCall(PetscObjectSetName((PetscObject)packM, "mass"));
2634   PetscCall(MatViewFromOptions(packM, NULL, "-dm_landau_mass_view"));
2635   ctx->M = packM;
2636   if (Amat) *Amat = packM;
2637   PetscCall(PetscLogEventEnd(ctx->events[14], 0, 0, 0, 0));
2638   PetscFunctionReturn(0);
2639 }
2640 
2641 /*@
2642  DMPlexLandauIFunction - TS residual calculation, confusingly this computes the Jacobian w/o mass
2643 
2644  Collective on ts
2645 
2646  Input Parameters:
2647 +   TS  - The time stepping context
2648 .   time_dummy - current time (not used)
2649 .   X - Current state
2650 .   X_t - Time derivative of current state
2651 -   actx - Landau context
2652 
2653  Output Parameter:
2654 .   F  - The residual
2655 
2656  Level: beginner
2657 
2658  .keywords: mesh
2659  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()`
2660  @*/
2661 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx) {
2662   LandauCtx *ctx = (LandauCtx *)actx;
2663   PetscInt   dim;
2664   DM         pack;
2665 #if defined(PETSC_HAVE_THREADSAFETY)
2666   double starttime, endtime;
2667 #endif
2668   PetscObjectState state;
2669 
2670   PetscFunctionBegin;
2671   PetscCall(TSGetDM(ts, &pack));
2672   PetscCall(DMGetApplicationContext(pack, &ctx));
2673   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2674   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2675   PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0));
2676   PetscCall(PetscLogEventBegin(ctx->events[0], 0, 0, 0, 0));
2677 #if defined(PETSC_HAVE_THREADSAFETY)
2678   starttime = MPI_Wtime();
2679 #endif
2680   PetscCall(DMGetDimension(pack, &dim));
2681   PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2682   if (state != ctx->norm_state) {
2683     PetscCall(PetscInfo(ts, "Create Landau Jacobian t=%g J.state %" PetscInt64_FMT " --> %" PetscInt64_FMT "\n", (double)time_dummy, ctx->norm_state, state));
2684     PetscCall(MatZeroEntries(ctx->J));
2685     PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, 0.0, (void *)ctx));
2686     PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view"));
2687     PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2688     ctx->norm_state = state;
2689   } else {
2690     PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n", state));
2691   }
2692   /* mat vec for op */
2693   PetscCall(MatMult(ctx->J, X, F)); /* C*f */
2694   /* add time term */
2695   if (X_t) PetscCall(MatMultAdd(ctx->M, X_t, F, F));
2696 #if defined(PETSC_HAVE_THREADSAFETY)
2697   if (ctx->stage) {
2698     endtime = MPI_Wtime();
2699     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2700     ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2701     ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2702   }
2703 #endif
2704   PetscCall(PetscLogEventEnd(ctx->events[0], 0, 0, 0, 0));
2705   PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0));
2706   if (ctx->stage) {
2707     PetscCall(PetscLogStagePop());
2708 #if defined(PETSC_HAVE_THREADSAFETY)
2709     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2710 #endif
2711   }
2712   PetscFunctionReturn(0);
2713 }
2714 
2715 /*@
2716  DMPlexLandauIJacobian - TS Jacobian construction, confusingly this adds mass
2717 
2718  Collective on ts
2719 
2720  Input Parameters:
2721 +   TS  - The time stepping context
2722 .   time_dummy - current time (not used)
2723 .   X - Current state
2724 .   U_tdummy - Time derivative of current state (not used)
2725 .   shift - shift for du/dt term
2726 -   actx - Landau context
2727 
2728  Output Parameters:
2729 +   Amat  - Jacobian
2730 -   Pmat  - same as Amat
2731 
2732  Level: beginner
2733 
2734  .keywords: mesh
2735  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()`
2736  @*/
2737 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx) {
2738   LandauCtx *ctx = NULL;
2739   PetscInt   dim;
2740   DM         pack;
2741 #if defined(PETSC_HAVE_THREADSAFETY)
2742   double starttime, endtime;
2743 #endif
2744   PetscObjectState state;
2745 
2746   PetscFunctionBegin;
2747   PetscCall(TSGetDM(ts, &pack));
2748   PetscCall(DMGetApplicationContext(pack, &ctx));
2749   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2750   PetscCheck(Amat == Pmat && Amat == ctx->J, ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2751   PetscCall(DMGetDimension(pack, &dim));
2752   /* get collision Jacobian into A */
2753   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2754   PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0));
2755   PetscCall(PetscLogEventBegin(ctx->events[9], 0, 0, 0, 0));
2756 #if defined(PETSC_HAVE_THREADSAFETY)
2757   starttime = MPI_Wtime();
2758 #endif
2759   PetscCall(PetscInfo(ts, "Adding mass to Jacobian t=%g, shift=%g\n", (double)time_dummy, (double)shift));
2760   PetscCheck(shift != 0.0, ctx->comm, PETSC_ERR_PLIB, "zero shift");
2761   PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2762   PetscCheck(state == ctx->norm_state, ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT "", ctx->norm_state, state);
2763   if (!ctx->use_matrix_mass) {
2764     PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, shift, (void *)ctx));
2765   } else { /* add mass */
2766     PetscCall(MatAXPY(Pmat, shift, ctx->M, SAME_NONZERO_PATTERN));
2767   }
2768 #if defined(PETSC_HAVE_THREADSAFETY)
2769   if (ctx->stage) {
2770     endtime = MPI_Wtime();
2771     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2772     ctx->times[LANDAU_MASS] += (endtime - starttime);
2773   }
2774 #endif
2775   PetscCall(PetscLogEventEnd(ctx->events[9], 0, 0, 0, 0));
2776   PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0));
2777   if (ctx->stage) {
2778     PetscCall(PetscLogStagePop());
2779 #if defined(PETSC_HAVE_THREADSAFETY)
2780     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2781 #endif
2782   }
2783   PetscFunctionReturn(0);
2784 }
2785