xref: /petsc/src/ts/utils/dmplexlandau/plexland.c (revision 4e8208cbcbc709572b8abe32f33c78b69c819375) !
1 #include <../src/mat/impls/aij/seq/aij.h>
2 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/
3 #include <petsclandau.h>              /*I "petsclandau.h"   I*/
4 #include <petscts.h>
5 #include <petscdmforest.h>
6 #include <petscdmcomposite.h>
7 
8 /* Landau collision operator */
9 
10 /* relativistic terms */
11 #if defined(PETSC_USE_REAL_SINGLE)
12   #define SPEED_OF_LIGHT 2.99792458e8F
13   #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
14 #else
15   #define SPEED_OF_LIGHT 2.99792458e8
16   #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
17 #endif
18 
19 #include "land_tensors.h"
20 
21 #if defined(PETSC_HAVE_OPENMP)
22   #include <omp.h>
23 #endif
24 
LandauGPUMapsDestroy(PetscCtxRt ptr)25 static PetscErrorCode LandauGPUMapsDestroy(PetscCtxRt ptr)
26 {
27   P4estVertexMaps *maps = *(P4estVertexMaps **)ptr;
28 
29   PetscFunctionBegin;
30   // free device data
31   if (maps[0].deviceType != LANDAU_CPU) {
32 #if defined(PETSC_HAVE_KOKKOS)
33     if (maps[0].deviceType == LANDAU_KOKKOS) PetscCall(LandauKokkosDestroyMatMaps(maps, maps[0].numgrids)); // implies Kokkos does
34 #endif
35   }
36   // free host data
37   for (PetscInt grid = 0; grid < maps[0].numgrids; grid++) {
38     PetscCall(PetscFree(maps[grid].c_maps));
39     PetscCall(PetscFree(maps[grid].gIdx));
40   }
41   PetscCall(PetscFree(maps));
42   PetscFunctionReturn(PETSC_SUCCESS);
43 }
energy_f(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nf_dummy,PetscScalar * u,void * actx)44 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
45 {
46   PetscReal v2 = 0;
47 
48   PetscFunctionBegin;
49   /* compute v^2 / 2 */
50   for (PetscInt i = 0; i < dim; ++i) v2 += x[i] * x[i];
51   /* evaluate the Maxwellian */
52   u[0] = v2 / 2;
53   PetscFunctionReturn(PETSC_SUCCESS);
54 }
55 
56 /* needs double */
gamma_m1_f(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nf_dummy,PetscScalar * u,void * actx)57 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
58 {
59   PetscReal *c2_0_arr = ((PetscReal *)actx);
60   double     u2 = 0, c02 = (double)*c2_0_arr, xx;
61 
62   PetscFunctionBegin;
63   /* compute u^2 / 2 */
64   for (PetscInt i = 0; i < dim; ++i) u2 += x[i] * x[i];
65   /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
66   xx = u2 / c02;
67 #if defined(PETSC_USE_DEBUG)
68   u[0] = PetscSqrtReal(1. + xx);
69 #else
70   u[0] = xx / (PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
71 #endif
72   PetscFunctionReturn(PETSC_SUCCESS);
73 }
74 
75 /*
76  LandauFormJacobian_Internal - Evaluates Jacobian matrix.
77 
78  Input Parameters:
79  .  globX - input vector
80  .  actx - optional user-defined context
81  .  dim - dimension
82 
83  Output Parameter:
84  .  J0acP - Jacobian matrix filled, not created
85  */
LandauFormJacobian_Internal(Vec a_X,Mat JacP,const PetscInt dim,PetscReal shift,void * a_ctx)86 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
87 {
88   LandauCtx         *ctx = (LandauCtx *)a_ctx;
89   PetscInt           numCells[LANDAU_MAX_GRIDS], Nq, Nb;
90   PetscQuadrature    quad;
91   PetscReal          Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
92   PetscScalar       *cellClosure = NULL;
93   const PetscScalar *xdata       = NULL;
94   PetscDS            prob;
95   PetscContainer     container;
96   P4estVertexMaps   *maps;
97   Mat                subJ[LANDAU_MAX_GRIDS * LANDAU_MAX_BATCH_SZ];
98 
99   PetscFunctionBegin;
100   PetscValidHeaderSpecific(a_X, VEC_CLASSID, 1);
101   PetscValidHeaderSpecific(JacP, MAT_CLASSID, 2);
102   PetscAssertPointer(ctx, 5);
103   /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
104   PetscCheck(ctx->plex[0] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
105   PetscCall(PetscLogEventBegin(ctx->events[10], 0, 0, 0, 0));
106   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
107   PetscCall(PetscObjectQuery((PetscObject)JacP, "assembly_maps", (PetscObject *)&container));
108   if (container) {
109     PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "maps but no GPU assembly");
110     PetscCall(PetscContainerGetPointer(container, &maps));
111     PetscCheck(maps, ctx->comm, PETSC_ERR_ARG_WRONG, "empty GPU matrix container");
112     for (PetscInt i = 0; i < ctx->num_grids * ctx->batch_sz; i++) subJ[i] = NULL;
113   } else {
114     PetscCheck(!ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "No maps but GPU assembly");
115     for (PetscInt tid = 0; tid < ctx->batch_sz; tid++) {
116       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[LAND_PACK_IDX(tid, grid)]));
117     }
118     maps = NULL;
119   }
120   // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
121   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
122   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
123   PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb));
124   PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND);
125   PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND);
126   // get metadata for collecting dynamic data
127   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
128     PetscInt cStart, cEnd;
129     PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
130     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
131     numCells[grid] = cEnd - cStart; // grids can have different topology
132   }
133   PetscCall(PetscLogEventEnd(ctx->events[10], 0, 0, 0, 0));
134   if (shift == 0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
135     DM pack;
136     PetscCall(VecGetDM(a_X, &pack));
137     PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM");
138     PetscCall(PetscLogEventBegin(ctx->events[1], 0, 0, 0, 0));
139     for (PetscInt fieldA = 0; fieldA < ctx->num_species; fieldA++) {
140       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
141       if (dim == 2) Eq_m[fieldA] *= 2 * PETSC_PI;                                                  /* add the 2pi term that is not in Landau */
142     }
143     if (!ctx->gpu_assembly) {
144       Vec         *locXArray, *globXArray;
145       PetscScalar *cellClosure_it;
146       PetscInt     cellClosure_sz = 0, nDMs, Nf[LANDAU_MAX_GRIDS];
147       PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
148       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
149         PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
150         PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
151         PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
152       }
153       /* count cellClosure size */
154       PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
155       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) cellClosure_sz += Nb * Nf[grid] * numCells[grid];
156       PetscCall(PetscMalloc1(cellClosure_sz * ctx->batch_sz, &cellClosure));
157       cellClosure_it = cellClosure;
158       PetscCall(PetscMalloc(sizeof(*locXArray) * nDMs, &locXArray));
159       PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
160       PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
161       PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray));
162       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP (once)
163         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
164           Vec      locX = locXArray[LAND_PACK_IDX(b_id, grid)], globX = globXArray[LAND_PACK_IDX(b_id, grid)], locX2;
165           PetscInt cStart, cEnd, ei;
166           PetscCall(VecDuplicate(locX, &locX2));
167           PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2));
168           PetscCall(DMGlobalToLocalEnd(ctx->plex[grid], globX, INSERT_VALUES, locX2));
169           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
170           for (ei = cStart; ei < cEnd; ++ei) {
171             PetscScalar *coef = NULL;
172             PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
173             PetscCall(PetscMemcpy(cellClosure_it, coef, Nb * Nf[grid] * sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */
174             PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
175             cellClosure_it += Nb * Nf[grid];
176           }
177           PetscCall(VecDestroy(&locX2));
178         }
179       }
180       PetscCheck(cellClosure_it - cellClosure == cellClosure_sz * ctx->batch_sz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscCount_FMT " != cellClosure_sz = %" PetscInt_FMT, cellClosure_it - cellClosure, cellClosure_sz * ctx->batch_sz);
181       PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
182       PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray));
183       PetscCall(PetscFree(locXArray));
184       PetscCall(PetscFree(globXArray));
185       xdata = NULL;
186     } else {
187       PetscMemType mtype;
188       if (ctx->jacobian_field_major_order) { // get data in batch ordering
189         PetscCall(VecScatterBegin(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD));
190         PetscCall(VecScatterEnd(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD));
191         PetscCall(VecGetArrayReadAndMemType(ctx->work_vec, &xdata, &mtype));
192       } else {
193         PetscCall(VecGetArrayReadAndMemType(a_X, &xdata, &mtype));
194       }
195       PetscCheck(mtype == PETSC_MEMTYPE_HOST || ctx->deviceType != LANDAU_CPU, ctx->comm, PETSC_ERR_ARG_WRONG, "CPU run with device data: use -mat_type aij");
196       cellClosure = NULL;
197     }
198     PetscCall(PetscLogEventEnd(ctx->events[1], 0, 0, 0, 0));
199   } else xdata = cellClosure = NULL;
200 
201   /* do it */
202   if (ctx->deviceType == LANDAU_KOKKOS) {
203 #if defined(PETSC_HAVE_KOKKOS)
204     PetscCall(LandauKokkosJacobian(ctx->plex, Nq, Nb, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP));
205 #else
206     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
207 #endif
208   } else {               /* CPU version */
209     PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
210     PetscInt         ip_offset[LANDAU_MAX_GRIDS + 1], ipf_offset[LANDAU_MAX_GRIDS + 1], elem_offset[LANDAU_MAX_GRIDS + 1], IPf_sz_glb, IPf_sz_tot, num_grids = ctx->num_grids, Nf[LANDAU_MAX_GRIDS];
211     PetscReal       *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
212     PetscReal       *nu_alpha = (PetscReal *)ctx->SData_d.alpha, *nu_beta = (PetscReal *)ctx->SData_d.beta, *invMass = (PetscReal *)ctx->SData_d.invMass;
213     PetscReal (*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal (*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas;
214     PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
215     PetscScalar *coo_vals = NULL;
216     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
217       PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
218       PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
219       PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
220     }
221     /* count IPf size, etc */
222     PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
223     const PetscReal *const BB = Tf[0]->T[0], *const DD = Tf[0]->T[1];
224     ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
225     for (PetscInt grid = 0; grid < num_grids; grid++) {
226       PetscInt nfloc        = ctx->species_offset[grid + 1] - ctx->species_offset[grid];
227       elem_offset[grid + 1] = elem_offset[grid] + numCells[grid];
228       ip_offset[grid + 1]   = ip_offset[grid] + numCells[grid] * Nq;
229       ipf_offset[grid + 1]  = ipf_offset[grid] + Nq * nfloc * numCells[grid];
230     }
231     IPf_sz_glb = ipf_offset[num_grids];
232     IPf_sz_tot = IPf_sz_glb * ctx->batch_sz;
233     // prep COO
234     PetscCall(PetscMalloc1(ctx->SData_d.coo_size, &coo_vals)); // allocate every time?
235     if (shift == 0.0) {                                        /* compute dynamic data f and df and init data for Jacobian */
236 #if defined(PETSC_HAVE_THREADSAFETY)
237       double starttime, endtime;
238       starttime = MPI_Wtime();
239 #endif
240       PetscCall(PetscLogEventBegin(ctx->events[8], 0, 0, 0, 0));
241       PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, (dim == 3 ? IPf_sz_tot : 0), &dudz));
242       // F df/dx
243       for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) {                        // for each element
244         const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem; // b_id == OMP thd_id in batch
245         // find my grid:
246         PetscInt grid = 0;
247         while (b_elem_idx >= elem_offset[grid + 1]) grid++; // yuck search for grid
248         {
249           const PetscInt loc_nip = numCells[grid] * Nq, loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
250           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
251           PetscScalar   *coef, coef_buff[LANDAU_MAX_SPECIES * LANDAU_MAX_NQND];
252           PetscReal     *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim]; // ingJ is static data on batch 0
253           PetscInt       b, f, q;
254           if (cellClosure) {
255             coef = &cellClosure[b_id * IPf_sz_glb + ipf_offset[grid] + loc_elem * Nb * loc_Nf]; // this is const
256           } else {
257             coef = coef_buff;
258             for (f = 0; f < loc_Nf; ++f) {
259               LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
260               for (b = 0; b < Nb; ++b) {
261                 PetscInt idx = Idxs[b];
262                 if (idx >= 0) {
263                   coef[f * Nb + b] = xdata[idx + moffset];
264                 } else {
265                   idx              = -idx - 1;
266                   coef[f * Nb + b] = 0;
267                   for (q = 0; q < maps[grid].num_face; q++) {
268                     PetscInt    id    = maps[grid].c_maps[idx][q].gid;
269                     PetscScalar scale = maps[grid].c_maps[idx][q].scale;
270                     coef[f * Nb + b] += scale * xdata[id + moffset];
271                   }
272                 }
273               }
274             }
275           }
276           /* get f and df */
277           for (PetscInt qi = 0; qi < Nq; qi++) {
278             const PetscReal *invJ = &invJe[qi * dim * dim];
279             const PetscReal *Bq   = &BB[qi * Nb];
280             const PetscReal *Dq   = &DD[qi * Nb * dim];
281             PetscReal        u_x[LANDAU_DIM];
282             /* get f & df */
283             for (f = 0; f < loc_Nf; ++f) {
284               const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid] + f * loc_nip + loc_elem * Nq + qi;
285               PetscInt       b, e;
286               PetscReal      refSpaceDer[LANDAU_DIM];
287               ff[idx] = 0.0;
288               for (PetscInt d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
289               for (b = 0; b < Nb; ++b) {
290                 const PetscInt cidx = b;
291                 ff[idx] += Bq[cidx] * PetscRealPart(coef[f * Nb + cidx]);
292                 for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx * dim + d] * PetscRealPart(coef[f * Nb + cidx]);
293               }
294               for (PetscInt d = 0; d < LANDAU_DIM; ++d) {
295                 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) u_x[d] += invJ[e * dim + d] * refSpaceDer[e];
296               }
297               dudx[idx] = u_x[0];
298               dudy[idx] = u_x[1];
299 #if LANDAU_DIM == 3
300               dudz[idx] = u_x[2];
301 #endif
302             }
303           } // q
304         } // grid
305       } // grid*batch
306       PetscCall(PetscLogEventEnd(ctx->events[8], 0, 0, 0, 0));
307 #if defined(PETSC_HAVE_THREADSAFETY)
308       endtime = MPI_Wtime();
309       if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
310 #endif
311     } // Jacobian setup
312     // assemble Jacobian (or mass)
313     for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) { // for each element
314       const PetscInt b_Nelem      = elem_offset[num_grids];
315       const PetscInt glb_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem;
316       PetscInt       grid = 0;
317 #if defined(PETSC_HAVE_THREADSAFETY)
318       double starttime, endtime;
319       starttime = MPI_Wtime();
320 #endif
321       while (glb_elem_idx >= elem_offset[grid + 1]) grid++;
322       {
323         const PetscInt   loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
324         const PetscInt   moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset), totDim = loc_Nf * Nq, elemMatSize = totDim * totDim;
325         PetscScalar     *elemMat;
326         const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim];
327         PetscCall(PetscMalloc1(elemMatSize, &elemMat));
328         PetscCall(PetscMemzero(elemMat, elemMatSize * sizeof(*elemMat)));
329         if (shift == 0.0) { // Jacobian
330           PetscCall(PetscLogEventBegin(ctx->events[4], 0, 0, 0, 0));
331         } else { // mass
332           PetscCall(PetscLogEventBegin(ctx->events[16], 0, 0, 0, 0));
333         }
334         for (PetscInt qj = 0; qj < Nq; ++qj) {
335           const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
336           PetscReal      g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
337           PetscInt       d, d2, dp, d3, IPf_idx;
338           if (shift == 0.0) { // Jacobian
339             const PetscReal *const invJj = &invJe[qj * dim * dim];
340             PetscReal              gg2[LANDAU_MAX_SPECIES][LANDAU_DIM], gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
341             const PetscReal        vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
342             // create g2 & g3
343             for (d = 0; d < LANDAU_DIM; d++) { // clear accumulation data D & K
344               gg2_temp[d] = 0;
345               for (d2 = 0; d2 < LANDAU_DIM; d2++) gg3_temp[d][d2] = 0;
346             }
347             /* inner beta reduction */
348             IPf_idx = 0;
349             for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
350               PetscInt nip_loc_r = numCells[grid_r] * Nq, Nfloc_r = Nf[grid_r];
351               for (PetscInt ei_r = 0; ei_r < numCells[grid_r]; ++ei_r) {
352                 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++) {
353                   const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
354                   PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
355 #if LANDAU_DIM == 2
356                   PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
357                   LandauTensor2D(vj, x, y, Ud, Uk, mask);
358 #else
359                   PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2] - z) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
360                   if (ctx->use_relativistic_corrections) {
361                     LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
362                   } else {
363                     LandauTensor3D(vj, x, y, z, U, mask);
364                   }
365 #endif
366                   for (PetscInt f = 0; f < Nfloc_r; ++f) {
367                     const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid_r] + f * nip_loc_r + ei_r * Nq + qi;
368 
369                     temp1[0] += dudx[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r];
370                     temp1[1] += dudy[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r];
371 #if LANDAU_DIM == 3
372                     temp1[2] += dudz[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r];
373 #endif
374                     temp2 += ff[idx] * nu_beta[f + f_off] * (*lambdas)[grid][grid_r];
375                   }
376                   temp1[0] *= wi;
377                   temp1[1] *= wi;
378 #if LANDAU_DIM == 3
379                   temp1[2] *= wi;
380 #endif
381                   temp2 *= wi;
382 #if LANDAU_DIM == 2
383                   for (d2 = 0; d2 < 2; d2++) {
384                     for (d3 = 0; d3 < 2; ++d3) {
385                       /* K = U * grad(f): g2=e: i,A */
386                       gg2_temp[d2] += Uk[d2][d3] * temp1[d3];
387                       /* D = -U * (I \kron (fx)): g3=f: i,j,A */
388                       gg3_temp[d2][d3] += Ud[d2][d3] * temp2;
389                     }
390                   }
391 #else
392                   for (d2 = 0; d2 < 3; ++d2) {
393                     for (d3 = 0; d3 < 3; ++d3) {
394                       /* K = U * grad(f): g2 = e: i,A */
395                       gg2_temp[d2] += U[d2][d3] * temp1[d3];
396                       /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
397                       gg3_temp[d2][d3] += U[d2][d3] * temp2;
398                     }
399                   }
400 #endif
401                 } // qi
402               } // ei_r
403               IPf_idx += nip_loc_r * Nfloc_r;
404             } /* grid_r - IPs */
405             PetscCheck(IPf_idx == IPf_sz_glb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT, IPf_idx, IPf_sz_glb);
406             // add alpha and put in gg2/3
407             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
408               for (d2 = 0; d2 < LANDAU_DIM; d2++) {
409                 gg2[fieldA][d2] = gg2_temp[d2] * nu_alpha[fieldA + f_off];
410                 for (d3 = 0; d3 < LANDAU_DIM; d3++) gg3[fieldA][d2][d3] = -gg3_temp[d2][d3] * nu_alpha[fieldA + f_off] * invMass[fieldA + f_off];
411               }
412             }
413             /* add electric field term once per IP */
414             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) gg2[fieldA][LANDAU_DIM - 1] += Eq_m[fieldA + f_off];
415             /* Jacobian transform - g2, g3 */
416             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
417               for (d = 0; d < dim; ++d) {
418                 g2[fieldA][d] = 0.0;
419                 for (d2 = 0; d2 < dim; ++d2) {
420                   g2[fieldA][d] += invJj[d * dim + d2] * gg2[fieldA][d2];
421                   g3[fieldA][d][d2] = 0.0;
422                   for (d3 = 0; d3 < dim; ++d3) {
423                     for (dp = 0; dp < dim; ++dp) g3[fieldA][d][d2] += invJj[d * dim + d3] * gg3[fieldA][d3][dp] * invJj[d2 * dim + dp];
424                   }
425                   g3[fieldA][d][d2] *= wj;
426                 }
427                 g2[fieldA][d] *= wj;
428               }
429             }
430           } else { // mass
431             PetscReal wj = ww[jpidx_glb];
432             /* Jacobian transform - g0 */
433             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
434               if (dim == 2) {
435                 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
436               } else {
437                 g0[fieldA] = wj * shift; // move this to below and remove g0
438               }
439             }
440           }
441           /* FE matrix construction */
442           {
443             PetscInt         fieldA, d, f, d2, g;
444             const PetscReal *BJq = &BB[qj * Nb], *DIq = &DD[qj * Nb * dim];
445             /* assemble - on the diagonal (I,I) */
446             for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
447               for (f = 0; f < Nb; f++) {
448                 const PetscInt i = fieldA * Nb + f; /* Element matrix row */
449                 for (g = 0; g < Nb; ++g) {
450                   const PetscInt j    = fieldA * Nb + g; /* Element matrix column */
451                   const PetscInt fOff = i * totDim + j;
452                   if (shift == 0.0) {
453                     for (d = 0; d < dim; ++d) {
454                       elemMat[fOff] += DIq[f * dim + d] * g2[fieldA][d] * BJq[g];
455                       for (d2 = 0; d2 < dim; ++d2) elemMat[fOff] += DIq[f * dim + d] * g3[fieldA][d][d2] * DIq[g * dim + d2];
456                     }
457                   } else { // mass
458                     elemMat[fOff] += BJq[f] * g0[fieldA] * BJq[g];
459                   }
460                 }
461               }
462             }
463           }
464         } /* qj loop */
465         if (shift == 0.0) { // Jacobian
466           PetscCall(PetscLogEventEnd(ctx->events[4], 0, 0, 0, 0));
467         } else {
468           PetscCall(PetscLogEventEnd(ctx->events[16], 0, 0, 0, 0));
469         }
470 #if defined(PETSC_HAVE_THREADSAFETY)
471         endtime = MPI_Wtime();
472         if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
473 #endif
474         /* assemble matrix */
475         if (!container) {
476           PetscInt cStart;
477           PetscCall(PetscLogEventBegin(ctx->events[6], 0, 0, 0, 0));
478           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL));
479           PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[LAND_PACK_IDX(b_id, grid)], loc_elem + cStart, elemMat, ADD_VALUES));
480           PetscCall(PetscLogEventEnd(ctx->events[6], 0, 0, 0, 0));
481         } else { // GPU like assembly for debugging
482           PetscInt    fieldA, q, f, g, d, nr, nc, rows0[LANDAU_MAX_Q_FACE] = {0}, cols0[LANDAU_MAX_Q_FACE] = {0}, rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
483           PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE] = {0}, row_scale[LANDAU_MAX_Q_FACE] = {0}, col_scale[LANDAU_MAX_Q_FACE] = {0};
484           LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = (LandauIdx(*)[LANDAU_MAX_NQND + 1]) ctx->SData_d.coo_elem_point_offsets;
485           /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
486           for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
487             LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
488             for (f = 0; f < Nb; f++) {
489               PetscInt idx = Idxs[f];
490               if (idx >= 0) {
491                 nr           = 1;
492                 rows0[0]     = idx;
493                 row_scale[0] = 1.;
494               } else {
495                 idx = -idx - 1;
496                 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
497                   if (maps[grid].c_maps[idx][q].gid < 0) break;
498                   rows0[q]     = maps[grid].c_maps[idx][q].gid;
499                   row_scale[q] = maps[grid].c_maps[idx][q].scale;
500                 }
501               }
502               for (g = 0; g < Nb; ++g) {
503                 idx = Idxs[g];
504                 if (idx >= 0) {
505                   nc           = 1;
506                   cols0[0]     = idx;
507                   col_scale[0] = 1.;
508                 } else {
509                   idx = -idx - 1;
510                   nc  = maps[grid].num_face;
511                   for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
512                     if (maps[grid].c_maps[idx][q].gid < 0) break;
513                     cols0[q]     = maps[grid].c_maps[idx][q].gid;
514                     col_scale[q] = maps[grid].c_maps[idx][q].scale;
515                   }
516                 }
517                 const PetscInt    i   = fieldA * Nb + f; /* Element matrix row */
518                 const PetscInt    j   = fieldA * Nb + g; /* Element matrix column */
519                 const PetscScalar Aij = elemMat[i * totDim + j];
520                 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
521                   const PetscInt fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
522                   const PetscInt idx0 = b_id * coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
523                   for (PetscInt q = 0, idx2 = idx0; q < nr; q++) {
524                     for (PetscInt d = 0; d < nc; d++, idx2++) coo_vals[idx2] = row_scale[q] * col_scale[d] * Aij;
525                   }
526                 } else {
527                   for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
528                   for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
529                   for (q = 0; q < nr; q++) {
530                     for (d = 0; d < nc; d++) vals[q * nc + d] = row_scale[q] * col_scale[d] * Aij;
531                   }
532                   PetscCall(MatSetValues(JacP, nr, rows, nc, cols, vals, ADD_VALUES));
533                 }
534               }
535             }
536           }
537         }
538         if (loc_elem == -1) {
539           PetscCall(PetscPrintf(ctx->comm, "CPU Element matrix\n"));
540           for (PetscInt d = 0; d < totDim; ++d) {
541             for (PetscInt f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm, " %12.5e", (double)PetscRealPart(elemMat[d * totDim + f])));
542             PetscCall(PetscPrintf(ctx->comm, "\n"));
543           }
544           exit(12);
545         }
546         PetscCall(PetscFree(elemMat));
547       } /* grid */
548     } /* outer element & batch loop */
549     if (shift == 0.0) { // mass
550       PetscCall(PetscFree4(ff, dudx, dudy, dudz));
551     }
552     if (!container) {                                         // 'CPU' assembly move nest matrix to global JacP
553       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP
554         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
555           const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
556           PetscInt           nloc, nzl, colbuf[1024], row;
557           const PetscInt    *cols;
558           const PetscScalar *vals;
559           Mat                B = subJ[LAND_PACK_IDX(b_id, grid)];
560           PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
561           PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
562           PetscCall(MatGetSize(B, &nloc, NULL));
563           for (PetscInt i = 0; i < nloc; i++) {
564             PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
565             PetscCheck(nzl <= 1024, PetscObjectComm((PetscObject)B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT, nzl);
566             for (PetscInt j = 0; j < nzl; j++) colbuf[j] = moffset + cols[j];
567             row = moffset + i;
568             PetscCall(MatSetValues(JacP, 1, &row, nzl, colbuf, vals, ADD_VALUES));
569             PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
570           }
571           PetscCall(MatDestroy(&B));
572         }
573       }
574     }
575     if (coo_vals) {
576       PetscCall(MatSetValuesCOO(JacP, coo_vals, ADD_VALUES));
577       PetscCall(PetscFree(coo_vals));
578     }
579   } /* CPU version */
580   PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY));
581   PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY));
582   /* clean up */
583   if (cellClosure) PetscCall(PetscFree(cellClosure));
584   if (xdata) PetscCall(VecRestoreArrayReadAndMemType(a_X, &xdata));
585   PetscFunctionReturn(PETSC_SUCCESS);
586 }
587 
588 /* create DMComposite of meshes for each species group */
LandauDMCreateVMeshes(MPI_Comm comm_self,const PetscInt dim,const char prefix[],LandauCtx * ctx,DM pack)589 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack)
590 {
591   PetscFunctionBegin;
592   /* p4est, quads */
593   /* Create plex mesh of Landau domain */
594   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
595     PetscReal par_radius = ctx->radius_par[grid], perp_radius = ctx->radius_perp[grid];
596     if (!ctx->sphere && !ctx->simplex) { // 2 or 3D (only 3D option)
597       PetscReal      lo[] = {-perp_radius, -par_radius, -par_radius}, hi[] = {perp_radius, par_radius, par_radius};
598       DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim == 2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
599       if (dim == 2) lo[0] = 0;
600       else {
601         lo[1] = -perp_radius;
602         hi[1] = perp_radius; // 3D y is a perp
603       }
604       PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, ctx->cells0, lo, hi, periodicity, PETSC_TRUE, 0, PETSC_TRUE, &ctx->plex[grid])); // TODO: make composite and create dm[grid] here
605       PetscCall(DMLocalizeCoordinates(ctx->plex[grid]));                                                                                          /* needed for periodic */
606       if (dim == 3) PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "cube"));
607       else PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "half-plane"));
608     } else if (dim == 2) {
609       size_t len;
610       PetscCall(PetscStrlen(ctx->filename, &len));
611       if (len) {
612         Vec          coords;
613         PetscScalar *x;
614         PetscInt     N;
615         char         str[] = "-dm_landau_view_file_0";
616         str[21] += grid;
617         PetscCall(DMPlexCreateFromFile(comm_self, ctx->filename, "plexland.c", PETSC_TRUE, &ctx->plex[grid]));
618         PetscCall(DMPlexOrient(ctx->plex[grid]));
619         PetscCall(DMGetCoordinatesLocal(ctx->plex[grid], &coords));
620         PetscCall(VecGetSize(coords, &N));
621         PetscCall(VecGetArray(coords, &x));
622         /* scale by domain size */
623         for (PetscInt i = 0; i < N; i += 2) {
624           x[i + 0] *= ctx->radius_perp[grid];
625           x[i + 1] *= ctx->radius_par[grid];
626         }
627         PetscCall(VecRestoreArray(coords, &x));
628         PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], ctx->filename));
629         PetscCall(PetscInfo(ctx->plex[grid], "%" PetscInt_FMT ") Read %s mesh file (%s)\n", grid, ctx->filename, str));
630         PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, str));
631       } else { // simplex forces a sphere
632         PetscInt       numCells = ctx->simplex ? 12 : 6, cell_size = ctx->simplex ? 3 : 4, j;
633         const PetscInt numVerts    = 11;
634         PetscInt       cellsT[][4] = {
635           {0,  1, 6, 5 },
636           {1,  2, 7, 6 },
637           {2,  3, 8, 7 },
638           {3,  4, 9, 8 },
639           {5,  6, 7, 10},
640           {10, 7, 8, 9 }
641         };
642         PetscInt cellsS[][3] = {
643           {0,  1, 6 },
644           {1,  2, 6 },
645           {6,  2, 7 },
646           {7,  2, 8 },
647           {8,  2, 3 },
648           {8,  3, 4 },
649           {0,  6, 5 },
650           {5,  6, 7 },
651           {5,  7, 10},
652           {10, 7, 9 },
653           {9,  7, 8 },
654           {9,  8, 4 }
655         };
656         const PetscInt *pcell = (const PetscInt *)(ctx->simplex ? &cellsS[0][0] : &cellsT[0][0]);
657         PetscReal       coords[11][2], *flatCoords = &coords[0][0];
658         PetscReal       rad = ctx->radius[grid];
659         for (j = 0; j < 5; j++) { // outside edge
660           PetscReal z, r, theta = -PETSC_PI / 2 + (j % 5) * PETSC_PI / 4;
661           r            = rad * PetscCosReal(theta);
662           coords[j][0] = r;
663           z            = rad * PetscSinReal(theta);
664           coords[j][1] = z;
665         }
666         coords[j][0]   = 0;
667         coords[j++][1] = -rad * ctx->sphere_inner_radius_90degree[grid];
668         coords[j][0]   = rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548;
669         coords[j++][1] = -rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548;
670         coords[j][0]   = rad * ctx->sphere_inner_radius_90degree[grid];
671         coords[j++][1] = 0;
672         coords[j][0]   = rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548;
673         coords[j++][1] = rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548;
674         coords[j][0]   = 0;
675         coords[j++][1] = rad * ctx->sphere_inner_radius_90degree[grid];
676         coords[j][0]   = 0;
677         coords[j++][1] = 0;
678         PetscCall(DMPlexCreateFromCellListPetsc(comm_self, 2, numCells, numVerts, cell_size, ctx->interpolate, pcell, 2, flatCoords, &ctx->plex[grid]));
679         PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "semi-circle"));
680         PetscCall(PetscInfo(ctx->plex[grid], "\t%" PetscInt_FMT ") Make circle %s mesh\n", grid, ctx->simplex ? "simplex" : "tensor"));
681       }
682     } else {
683       PetscCheck(dim == 3 && ctx->sphere && !ctx->simplex, ctx->comm, PETSC_ERR_ARG_WRONG, "not: dim == 3 && ctx->sphere && !ctx->simplex");
684       PetscReal      rad = ctx->radius[grid], inner_rad = rad * ctx->sphere_inner_radius_90degree[grid], outer_rad = rad;
685       const PetscInt numCells = 7, cell_size = 8, numVerts = 16;
686       const PetscInt cells[][8] = {
687         {0, 3, 2, 1, 4,  5,  6,  7 },
688         {0, 4, 5, 1, 8,  9,  13, 12},
689         {1, 5, 6, 2, 9,  10, 14, 13},
690         {2, 6, 7, 3, 10, 11, 15, 14},
691         {0, 3, 7, 4, 8,  12, 15, 11},
692         {0, 1, 2, 3, 8,  11, 10, 9 },
693         {4, 7, 6, 5, 12, 13, 14, 15}
694       };
695       PetscReal coords[16 /* numVerts */][3];
696       for (PetscInt j = 0; j < 4; j++) { // inner edge, low
697         coords[j][0] = inner_rad * (j == 0 || j == 3 ? 1 : -1);
698         coords[j][1] = inner_rad * (j / 2 < 1 ? 1 : -1);
699         coords[j][2] = inner_rad * -1;
700       }
701       for (PetscInt j = 0, jj = 4; j < 4; j++, jj++) { // inner edge, hi
702         coords[jj][0] = inner_rad * (j == 0 || j == 3 ? 1 : -1);
703         coords[jj][1] = inner_rad * (j / 2 < 1 ? 1 : -1);
704         coords[jj][2] = inner_rad * 1;
705       }
706       for (PetscInt j = 0, jj = 8; j < 4; j++, jj++) { // outer edge, low
707         coords[jj][0] = outer_rad * (j == 0 || j == 3 ? 1 : -1);
708         coords[jj][1] = outer_rad * (j / 2 < 1 ? 1 : -1);
709         coords[jj][2] = outer_rad * -1;
710       }
711       for (PetscInt j = 0, jj = 12; j < 4; j++, jj++) { // outer edge, hi
712         coords[jj][0] = outer_rad * (j == 0 || j == 3 ? 1 : -1);
713         coords[jj][1] = outer_rad * (j / 2 < 1 ? 1 : -1);
714         coords[jj][2] = outer_rad * 1;
715       }
716       PetscCall(DMPlexCreateFromCellListPetsc(comm_self, 3, numCells, numVerts, cell_size, ctx->interpolate, (const PetscInt *)cells, 3, (const PetscReal *)coords, &ctx->plex[grid]));
717       PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "cubed sphere"));
718       PetscCall(PetscInfo(ctx->plex[grid], "\t%" PetscInt_FMT ") Make cubed sphere %s mesh\n", grid, ctx->simplex ? "simplex" : "tensor"));
719     }
720     PetscCall(DMSetOptionsPrefix(ctx->plex[grid], prefix));
721     PetscCall(DMSetFromOptions(ctx->plex[grid]));
722   } // grid loop
723   PetscCall(DMSetOptionsPrefix(pack, prefix));
724   { /* convert to p4est (or whatever), wait for discretization to create pack */
725     char      convType[256];
726     PetscBool flg;
727 
728     PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
729     PetscCall(PetscOptionsFList("-dm_landau_type", "Convert DMPlex to another format (p4est)", "plexland.c", DMList, DMPLEX, convType, 256, &flg));
730     PetscOptionsEnd();
731     if (flg) {
732       ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
733       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
734         DM        dmforest;
735         PetscBool isForest;
736 
737         PetscCall(DMConvert(ctx->plex[grid], convType, &dmforest));
738         PetscCheck(dmforest, ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
739         PetscCall(DMSetOptionsPrefix(dmforest, prefix));
740         PetscCall(DMIsForest(dmforest, &isForest));
741         PetscCheck(isForest, ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
742         PetscCall(DMDestroy(&ctx->plex[grid]));
743         ctx->plex[grid] = dmforest; // Forest for adaptivity
744       }
745     } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
746   }
747   PetscCall(DMSetDimension(pack, dim));
748   PetscCall(PetscObjectSetName((PetscObject)pack, "Mesh"));
749   PetscCall(DMSetApplicationContext(pack, ctx));
750   PetscFunctionReturn(PETSC_SUCCESS);
751 }
752 
SetupDS(DM pack,PetscInt dim,PetscInt grid,const char prefix[],LandauCtx * ctx)753 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, const char prefix[], LandauCtx *ctx)
754 {
755   PetscInt     ii, i0;
756   char         buf[256];
757   PetscSection section;
758 
759   PetscFunctionBegin;
760   for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
761     if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e"));
762     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii));
763     /* Setup Discretization - FEM */
764     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, ctx->simplex, prefix, PETSC_DECIDE, &ctx->fe[ii]));
765     PetscCall(PetscObjectSetName((PetscObject)ctx->fe[ii], buf));
766     PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject)ctx->fe[ii]));
767   }
768   PetscCall(DMCreateDS(ctx->plex[grid]));
769   PetscCall(DMGetLocalSection(ctx->plex[grid], &section));
770   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
771     if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se"));
772     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii));
773     PetscCall(PetscSectionSetComponentName(section, i0, 0, buf));
774   }
775   PetscFunctionReturn(PETSC_SUCCESS);
776 }
777 
778 /* Define a Maxwellian function for testing out the operator. */
779 
780 /* Using cartesian velocity space coordinates, the particle */
781 /* density, [1/m^3], is defined according to */
782 
783 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
784 
785 /* Using some constant, c, we normalize the velocity vector into a */
786 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
787 
788 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
789 
790 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
791 /* for finding the particle within the interval in a box dx^3 around x is */
792 
793 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
794 
795 typedef struct {
796   PetscReal v_0;
797   PetscReal kT_m;
798   PetscReal n;
799   PetscReal shift;
800 } MaxwellianCtx;
801 
maxwellian(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nf_dummy,PetscScalar * u,void * actx)802 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
803 {
804   MaxwellianCtx *mctx = (MaxwellianCtx *)actx;
805   PetscInt       i;
806   PetscReal      v2 = 0, theta = 2 * mctx->kT_m / (mctx->v_0 * mctx->v_0), shift; /* theta = 2kT/mc^2 */
807 
808   PetscFunctionBegin;
809   /* compute the exponents, v^2 */
810   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
811   /* evaluate the Maxwellian */
812   if (mctx->shift < 0) shift = -mctx->shift;
813   else {
814     u[0]  = mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
815     shift = mctx->shift;
816   }
817   if (shift != 0.) {
818     v2 = 0;
819     for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i];
820     v2 += (x[dim - 1] - shift) * (x[dim - 1] - shift);
821     /* evaluate the shifted Maxwellian */
822     u[0] += mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
823   }
824   PetscFunctionReturn(PETSC_SUCCESS);
825 }
826 
827 /*@
828   DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state
829 
830   Collective
831 
832   Input Parameters:
833 + dm      - The mesh (local)
834 . time    - Current time
835 . temps   - Temperatures of each species (global)
836 . ns      - Number density of each species (global)
837 . grid    - index into current grid - just used for offset into `temp` and `ns`
838 . b_id    - batch index
839 . n_batch - number of batches
840 - actx    - Landau context
841 
842   Output Parameter:
843 . X - The state (local to this grid)
844 
845   Level: beginner
846 
847 .seealso: `DMPlexLandauCreateVelocitySpace()`
848  @*/
DMPlexLandauAddMaxwellians(DM dm,Vec X,PetscReal time,PetscReal temps[],PetscReal ns[],PetscInt grid,PetscInt b_id,PetscInt n_batch,void * actx)849 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
850 {
851   LandauCtx *ctx = (LandauCtx *)actx;
852   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *);
853   PetscInt       dim;
854   MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
855 
856   PetscFunctionBegin;
857   PetscCall(DMGetDimension(dm, &dim));
858   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
859   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
860     mctxs[i0]      = &data[i0];
861     data[i0].v_0   = ctx->v_0;                             // v_0 same for all grids
862     data[i0].kT_m  = ctx->k * temps[ii] / ctx->masses[ii]; /* kT/m */
863     data[i0].n     = ns[ii];
864     initu[i0]      = maxwellian;
865     data[i0].shift = 0;
866   }
867   data[0].shift = ctx->electronShift;
868   /* need to make ADD_ALL_VALUES work - TODO */
869   PetscCall(DMProjectFunction(dm, time, initu, (void **)mctxs, INSERT_ALL_VALUES, X));
870   PetscFunctionReturn(PETSC_SUCCESS);
871 }
872 
873 /*
874  LandauSetInitialCondition - Adds Maxwellians with context
875 
876  Collective
877 
878  Input Parameters:
879  .   dm - The mesh
880  -   grid - index into current grid - just used for offset into temp and ns
881  .   b_id - batch index
882  -   n_batch - number of batches
883  +   actx - Landau context with T and n
884 
885  Output Parameter:
886  .   X  - The state
887 
888  Level: beginner
889 
890 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()`
891  */
LandauSetInitialCondition(DM dm,Vec X,PetscInt grid,PetscInt b_id,PetscInt n_batch,void * actx)892 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
893 {
894   LandauCtx *ctx = (LandauCtx *)actx;
895 
896   PetscFunctionBegin;
897   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
898   PetscCall(VecZeroEntries(X));
899   PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, n_batch, ctx));
900   PetscFunctionReturn(PETSC_SUCCESS);
901 }
902 
903 // adapt a level once. Forest in/out
904 #if defined(PETSC_USE_INFO)
905 static const char *s_refine_names[] = {"RE", "Z1", "Origin", "Z2", "Uniform"};
906 #endif
adaptToleranceFEM(PetscFE fem,Vec sol,PetscInt type,PetscInt grid,LandauCtx * ctx,DM * newForest)907 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
908 {
909   DM              forest, plex, adaptedDM = NULL;
910   PetscDS         prob;
911   PetscBool       isForest;
912   PetscQuadrature quad;
913   PetscInt        Nq, Nb, *Nb2, cStart, cEnd, c, dim, qj, k;
914   DMLabel         adaptLabel = NULL;
915 
916   PetscFunctionBegin;
917   forest = ctx->plex[grid];
918   PetscCall(DMCreateDS(forest));
919   PetscCall(DMGetDS(forest, &prob));
920   PetscCall(DMGetDimension(forest, &dim));
921   PetscCall(DMIsForest(forest, &isForest));
922   PetscCheck(isForest, ctx->comm, PETSC_ERR_ARG_WRONG, "! Forest");
923   PetscCall(DMConvert(forest, DMPLEX, &plex));
924   PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd));
925   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "adapt", &adaptLabel));
926   PetscCall(PetscFEGetQuadrature(fem, &quad));
927   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
928   PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND);
929   PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb));
930   PetscCall(PetscDSGetDimensions(prob, &Nb2));
931   PetscCheck(Nb2[0] == Nb, ctx->comm, PETSC_ERR_ARG_WRONG, " Nb = %" PetscInt_FMT " != Nb (%" PetscInt_FMT ")", Nb, Nb2[0]);
932   PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND);
933   PetscCall(PetscInfo(sol, "%" PetscInt_FMT ") Refine phase: %s\n", grid, s_refine_names[type]));
934   if (type == 4) {
935     for (c = cStart; c < cEnd; c++) PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
936   } else if (type == 2) {
937     PetscInt  rCellIdx[8], nr = 0, nrmax = (dim == 3) ? 8 : 2;
938     PetscReal minRad = PETSC_INFINITY, r;
939     for (c = cStart; c < cEnd; c++) {
940       PetscReal tt, v0[LANDAU_MAX_NQND * 3], J[LANDAU_MAX_NQND * 9], invJ[LANDAU_MAX_NQND * 9], detJ[LANDAU_MAX_NQND];
941       PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, J, invJ, detJ));
942       (void)J;
943       (void)invJ;
944       for (qj = 0; qj < Nq; ++qj) {
945         tt = PetscSqr(v0[dim * qj + 0]) + PetscSqr(v0[dim * qj + 1]) + PetscSqr((dim == 3) ? v0[dim * qj + 2] : 0);
946         r  = PetscSqrtReal(tt);
947         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON * 10.) {
948           minRad         = r;
949           nr             = 0;
950           rCellIdx[nr++] = c;
951           PetscCall(PetscInfo(sol, "\t\t%" PetscInt_FMT ") Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", grid, (double)r, c, qj + 1, Nq));
952         } else if ((r - minRad) < PETSC_SQRT_MACHINE_EPSILON * 100. && nr < nrmax) {
953           for (k = 0; k < nr; k++)
954             if (c == rCellIdx[k]) break;
955           if (k == nr) {
956             rCellIdx[nr++] = c;
957             PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", grid, (double)r, c, qj + 1, Nq, (double)(r - minRad)));
958           }
959         }
960       }
961     }
962     for (k = 0; k < nr; k++) PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE));
963     PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n", grid, nr, rCellIdx[0], rCellIdx[1], (double)minRad));
964   } else if (type == 0 || type == 1 || type == 3) { /* refine along r=0 axis */
965     PetscScalar *coef = NULL;
966     Vec          coords;
967     PetscInt     csize, Nv, d, nz, nrefined = 0;
968     DM           cdm;
969     PetscSection cs;
970     PetscCall(DMGetCoordinatesLocal(forest, &coords));
971     PetscCall(DMGetCoordinateDM(forest, &cdm));
972     PetscCall(DMGetLocalSection(cdm, &cs));
973     for (c = cStart; c < cEnd; c++) {
974       PetscInt doit = 0, outside = 0;
975       PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef));
976       Nv = csize / dim;
977       for (nz = d = 0; d < Nv; d++) {
978         PetscReal z = PetscRealPart(coef[d * dim + (dim - 1)]), x = PetscSqr(PetscRealPart(coef[d * dim + 0])) + ((dim == 3) ? PetscSqr(PetscRealPart(coef[d * dim + 1])) : 0);
979         x = PetscSqrtReal(x);
980         if (type == 0) {
981           if (ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON && (z < -PETSC_MACHINE_EPSILON * 10. || z > ctx->re_radius + PETSC_MACHINE_EPSILON * 10.)) outside++; /* first pass don't refine bottom */
982         } else if (type == 1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) {
983           outside++; /* don't refine outside electron refine radius */
984           PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type]));
985         } else if (type == 3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) {
986           outside++; /* refine r=0 cells on refinement front */
987           PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type]));
988         }
989         if (x < PETSC_MACHINE_EPSILON * 10. && (type != 0 || ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON)) nz++;
990       }
991       PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef));
992       if (doit || (outside < Nv && nz)) {
993         PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
994         nrefined++;
995       }
996     }
997     PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " cells\n", grid, nrefined));
998   }
999   PetscCall(DMDestroy(&plex));
1000   PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM));
1001   PetscCall(DMLabelDestroy(&adaptLabel));
1002   *newForest = adaptedDM;
1003   if (adaptedDM) {
1004     if (isForest) PetscCall(DMForestSetAdaptivityForest(adaptedDM, NULL)); // ????
1005     PetscCall(DMConvert(adaptedDM, DMPLEX, &plex));
1006     PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd));
1007     PetscCall(PetscInfo(sol, "\t\t\t\t%" PetscInt_FMT ") %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n", grid, cEnd - cStart, Nq * (cEnd - cStart)));
1008     PetscCall(DMDestroy(&plex));
1009   } else *newForest = NULL;
1010   PetscFunctionReturn(PETSC_SUCCESS);
1011 }
1012 
1013 // forest goes in (ctx->plex[grid]), plex comes out
adapt(PetscInt grid,LandauCtx * ctx,Vec * uu)1014 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
1015 {
1016   PetscInt adaptIter;
1017 
1018   PetscFunctionBegin;
1019   PetscInt type, limits[5] = {(grid == 0) ? ctx->numRERefine : 0, (grid == 0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid == 0) ? ctx->nZRefine2 : 0, ctx->postAMRRefine[grid]};
1020   for (type = 0; type < 5; type++) {
1021     for (adaptIter = 0; adaptIter < limits[type]; adaptIter++) {
1022       DM newForest = NULL;
1023       PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest));
1024       if (newForest) {
1025         PetscCall(DMDestroy(&ctx->plex[grid]));
1026         PetscCall(VecDestroy(uu));
1027         PetscCall(DMCreateGlobalVector(newForest, uu));
1028         PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, 1, ctx));
1029         ctx->plex[grid] = newForest;
1030       } else {
1031         PetscCall(PetscInfo(*uu, "No refinement\n"));
1032       }
1033     }
1034   }
1035   PetscCall(PetscObjectSetName((PetscObject)*uu, "uAMR"));
1036   PetscFunctionReturn(PETSC_SUCCESS);
1037 }
1038 
1039 // make log(Lambdas) from NRL Plasma formulary
makeLambdas(LandauCtx * ctx)1040 static PetscErrorCode makeLambdas(LandauCtx *ctx)
1041 {
1042   PetscFunctionBegin;
1043   for (PetscInt gridi = 0; gridi < ctx->num_grids; gridi++) {
1044     PetscInt  iii   = ctx->species_offset[gridi];
1045     PetscReal Ti_ev = (ctx->thermal_temps[iii] / 1.1604525e7) * 1000; // convert (back) to eV
1046     PetscReal ni    = ctx->n[iii] * ctx->n_0;
1047     for (PetscInt gridj = gridi; gridj < ctx->num_grids; gridj++) {
1048       PetscInt  jjj = ctx->species_offset[gridj];
1049       PetscReal Zj  = ctx->charges[jjj] / 1.6022e-19;
1050       if (gridi == 0) {
1051         if (gridj == 0) { // lam_ee
1052           ctx->lambdas[gridi][gridj] = 23.5 - PetscLogReal(PetscSqrtReal(ni) * PetscPowReal(Ti_ev, -1.25)) - PetscSqrtReal(1e-5 + PetscSqr(PetscLogReal(Ti_ev) - 2) / 16);
1053         } else { // lam_ei == lam_ie
1054           if (10 * Zj * Zj > Ti_ev) {
1055             ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(PetscSqrtReal(ni) * Zj * PetscPowReal(Ti_ev, -1.5));
1056           } else {
1057             ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 24 - PetscLogReal(PetscSqrtReal(ni) / Ti_ev);
1058           }
1059         }
1060       } else { // lam_ii'
1061         PetscReal mui = ctx->masses[iii] / 1.6720e-27, Zi = ctx->charges[iii] / 1.6022e-19;
1062         PetscReal Tj_ev            = (ctx->thermal_temps[jjj] / 1.1604525e7) * 1000; // convert (back) to eV
1063         PetscReal muj              = ctx->masses[jjj] / 1.6720e-27;
1064         PetscReal nj               = ctx->n[jjj] * ctx->n_0;
1065         ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(Zi * Zj * (mui + muj) / (mui * Tj_ev + muj * Ti_ev) * PetscSqrtReal(ni * Zi * Zi / Ti_ev + nj * Zj * Zj / Tj_ev));
1066       }
1067     }
1068   }
1069   //PetscReal v0 = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */
1070   PetscFunctionReturn(PETSC_SUCCESS);
1071 }
1072 
ProcessOptions(LandauCtx * ctx,const char prefix[])1073 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1074 {
1075   PetscBool flg, fileflg;
1076   PetscInt  ii, nt, nm, nc, num_species_grid[LANDAU_MAX_GRIDS], non_dim_grid;
1077   PetscReal lnLam = 10;
1078   DM        dummy;
1079 
1080   PetscFunctionBegin;
1081   PetscCall(DMCreate(ctx->comm, &dummy));
1082   /* get options - initialize context */
1083   ctx->verbose        = 1; // should be 0 for silent compliance
1084   ctx->batch_sz       = 1;
1085   ctx->batch_view_idx = 0;
1086   ctx->interpolate    = PETSC_TRUE;
1087   ctx->gpu_assembly   = PETSC_TRUE;
1088   ctx->norm_state     = 0;
1089   ctx->electronShift  = 0;
1090   ctx->M              = NULL;
1091   ctx->J              = NULL;
1092   /* geometry and grids */
1093   ctx->sphere     = PETSC_FALSE;
1094   ctx->map_sphere = PETSC_TRUE;
1095   ctx->use_p4est  = PETSC_FALSE;
1096   ctx->simplex    = PETSC_FALSE;
1097   for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1098     ctx->radius[grid]             = 5.; /* thermal radius (velocity) */
1099     ctx->radius_perp[grid]        = 5.; /* thermal radius (velocity) */
1100     ctx->radius_par[grid]         = 5.; /* thermal radius (velocity) */
1101     ctx->numAMRRefine[grid]       = 0;
1102     ctx->postAMRRefine[grid]      = 0;
1103     ctx->species_offset[grid + 1] = 1; // one species default
1104     num_species_grid[grid]        = 0;
1105     ctx->plex[grid]               = NULL; /* cache as expensive to Convert */
1106   }
1107   ctx->species_offset[0] = 0;
1108   ctx->re_radius         = 0.;
1109   ctx->vperp0_radius1    = 0;
1110   ctx->vperp0_radius2    = 0;
1111   ctx->nZRefine1         = 0;
1112   ctx->nZRefine2         = 0;
1113   ctx->numRERefine       = 0;
1114   num_species_grid[0]    = 1; // one species default
1115   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1116   ctx->charges[0]       = -1;                       /* electron charge (MKS) */
1117   ctx->masses[0]        = 1 / 1835.469965278441013; /* temporary value in proton mass */
1118   ctx->n[0]             = 1;
1119   ctx->v_0              = 1; /* thermal velocity, we could start with a scale != 1 */
1120   ctx->thermal_temps[0] = 1;
1121   /* constants, etc. */
1122   ctx->epsilon0 = 8.8542e-12;     /* permittivity of free space (MKS) F/m */
1123   ctx->k        = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1124   ctx->n_0      = 1.e20;          /* typical plasma n, but could set it to 1 */
1125   ctx->Ez       = 0;
1126   for (PetscInt grid = 0; grid < LANDAU_NUM_TIMERS; grid++) ctx->times[grid] = 0;
1127   for (PetscInt ii = 0; ii < LANDAU_DIM; ii++) ctx->cells0[ii] = 2;
1128   if (LANDAU_DIM == 2) ctx->cells0[0] = 1;
1129   ctx->use_matrix_mass                = PETSC_FALSE;
1130   ctx->use_relativistic_corrections   = PETSC_FALSE;
1131   ctx->use_energy_tensor_trick        = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1132   ctx->SData_d.w                      = NULL;
1133   ctx->SData_d.x                      = NULL;
1134   ctx->SData_d.y                      = NULL;
1135   ctx->SData_d.z                      = NULL;
1136   ctx->SData_d.invJ                   = NULL;
1137   ctx->jacobian_field_major_order     = PETSC_FALSE;
1138   ctx->SData_d.coo_elem_offsets       = NULL;
1139   ctx->SData_d.coo_elem_point_offsets = NULL;
1140   ctx->SData_d.coo_elem_fullNb        = NULL;
1141   ctx->SData_d.coo_size               = 0;
1142   PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1143 #if defined(PETSC_HAVE_KOKKOS)
1144   ctx->deviceType = LANDAU_KOKKOS;
1145   PetscCall(PetscStrncpy(ctx->filename, "kokkos", sizeof(ctx->filename)));
1146 #else
1147   ctx->deviceType = LANDAU_CPU;
1148   PetscCall(PetscStrncpy(ctx->filename, "cpu", sizeof(ctx->filename)));
1149 #endif
1150   PetscCall(PetscOptionsString("-dm_landau_device_type", "Use kernels on 'cpu' 'kokkos'", "plexland.c", ctx->filename, ctx->filename, sizeof(ctx->filename), NULL));
1151   PetscCall(PetscStrcmp("cpu", ctx->filename, &flg));
1152   if (flg) {
1153     ctx->deviceType = LANDAU_CPU;
1154   } else {
1155     PetscCall(PetscStrcmp("kokkos", ctx->filename, &flg));
1156     PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_device_type %s", ctx->filename);
1157     ctx->deviceType = LANDAU_KOKKOS;
1158   }
1159   ctx->filename[0] = '\0';
1160   PetscCall(PetscOptionsString("-dm_landau_filename", "file to read mesh from", "plexland.c", ctx->filename, ctx->filename, sizeof(ctx->filename), &fileflg));
1161   PetscCall(PetscOptionsReal("-dm_landau_electron_shift", "Shift in thermal velocity of electrons", "none", ctx->electronShift, &ctx->electronShift, NULL));
1162   PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL));
1163   PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL));
1164   PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "LANDAU_MAX_BATCH_SZ %d < ctx->batch_sz %" PetscInt_FMT, LANDAU_MAX_BATCH_SZ, ctx->batch_sz);
1165   PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL));
1166   PetscCheck(ctx->batch_view_idx < ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT, ctx->batch_view_idx, ctx->batch_sz);
1167   PetscCall(PetscOptionsReal("-dm_landau_Ez", "Initial parallel electric field in unites of Conner-Hastie critical field", "plexland.c", ctx->Ez, &ctx->Ez, NULL));
1168   PetscCall(PetscOptionsReal("-dm_landau_n_0", "Normalization constant for number density", "plexland.c", ctx->n_0, &ctx->n_0, NULL));
1169   PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL));
1170   PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL));
1171   PetscCall(PetscOptionsBool("-dm_landau_simplex", "Use simplex elements", "plexland.c", ctx->simplex, &ctx->simplex, NULL));
1172   PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, NULL));
1173   PetscCall(PetscOptionsBool("-dm_landau_map_sphere", "Map to sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->map_sphere, &ctx->map_sphere, NULL));
1174   if (LANDAU_DIM == 2 && ctx->use_relativistic_corrections) ctx->use_relativistic_corrections = PETSC_FALSE; // should warn
1175   PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick,
1176                              &ctx->use_energy_tensor_trick, NULL));
1177 
1178   /* get num species with temperature, set defaults */
1179   for (ii = 1; ii < LANDAU_MAX_SPECIES; ii++) {
1180     ctx->thermal_temps[ii] = 1;
1181     ctx->charges[ii]       = 1;
1182     ctx->masses[ii]        = 1;
1183     ctx->n[ii]             = 1;
1184   }
1185   nt = LANDAU_MAX_SPECIES;
1186   PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg));
1187   PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1188   PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n", nt));
1189   ctx->num_species = nt;
1190   for (ii = 0; ii < ctx->num_species; ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1191   nm = LANDAU_MAX_SPECIES - 1;
1192   PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg));
1193   PetscCheck(!flg || nm == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species - 1);
1194   nm = LANDAU_MAX_SPECIES;
1195   PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg));
1196   PetscCheck(!flg || nm == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species);
1197   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1198   ctx->masses[0] = 9.10938356e-31;                                           /* electron mass kg (should be about right already) */
1199   nc             = LANDAU_MAX_SPECIES - 1;
1200   PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg));
1201   if (flg) PetscCheck(nc == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num charges %" PetscInt_FMT " != num species %" PetscInt_FMT, nc, ctx->num_species - 1);
1202   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1203   /* geometry and grids */
1204   nt = LANDAU_MAX_GRIDS;
1205   PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid", "Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid", "plexland.c", num_species_grid, &nt, &flg));
1206   if (flg) {
1207     ctx->num_grids = nt;
1208     for (ii = nt = 0; ii < ctx->num_grids; ii++) nt += num_species_grid[ii];
1209     PetscCheck(ctx->num_species == nt, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %d)", nt, ctx->num_species,
1210                ctx->num_grids, LANDAU_MAX_GRIDS);
1211   } else {
1212     if (ctx->num_species > LANDAU_MAX_GRIDS) {
1213       num_species_grid[0] = 1;
1214       num_species_grid[1] = ctx->num_species - 1;
1215       ctx->num_grids      = 2;
1216     } else {
1217       ctx->num_grids = ctx->num_species;
1218       for (ii = 0; ii < ctx->num_grids; ii++) num_species_grid[ii] = 1;
1219     }
1220   }
1221   for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids; ii++) ctx->species_offset[ii + 1] = ctx->species_offset[ii] + num_species_grid[ii];
1222   PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????", ctx->species_offset[ctx->num_grids],
1223              ctx->num_species);
1224   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1225     PetscInt iii             = ctx->species_offset[grid];                                          // normalize with first (arbitrary) species on grid
1226     ctx->thermal_speed[grid] = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */
1227   }
1228   // get lambdas here because we need them for t_0 etc
1229   PetscCall(PetscOptionsReal("-dm_landau_ln_lambda", "Universal cross section parameter. Default uses NRL formulas", "plexland.c", lnLam, &lnLam, &flg));
1230   if (flg) {
1231     for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1232       for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) ctx->lambdas[gridj][grid] = lnLam; /* cross section ratio large - small angle collisions */
1233     }
1234   } else {
1235     PetscCall(makeLambdas(ctx));
1236   }
1237   non_dim_grid = 0;
1238   PetscCall(PetscOptionsInt("-dm_landau_normalization_grid", "Index of grid to use for setting v_0, m_0, t_0. (Not recommended)", "plexland.c", non_dim_grid, &non_dim_grid, &flg));
1239   if (non_dim_grid != 0) PetscCall(PetscInfo(dummy, "Normalization grid set to %" PetscInt_FMT ", but non-default not well verified\n", non_dim_grid));
1240   PetscCheck(non_dim_grid >= 0 && non_dim_grid < ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "Normalization grid wrong: %" PetscInt_FMT, non_dim_grid);
1241   ctx->v_0 = ctx->thermal_speed[non_dim_grid]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1242   ctx->m_0 = ctx->masses[non_dim_grid];        /* arbitrary reference mass, electrons */
1243   ctx->t_0 = 8 * PETSC_PI * PetscSqr(ctx->epsilon0 * ctx->m_0 / PetscSqr(ctx->charges[non_dim_grid])) / ctx->lambdas[non_dim_grid][non_dim_grid] / ctx->n_0 * PetscPowReal(ctx->v_0, 3); /* note, this t_0 makes nu[non_dim_grid,non_dim_grid]=1 */
1244   /* domain */
1245   nt = LANDAU_MAX_GRIDS;
1246   PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius", "Phase space size in units of thermal velocity of grid", "plexland.c", ctx->radius, &nt, &flg));
1247   if (flg) {
1248     PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1249     while (nt--) ctx->radius_par[nt] = ctx->radius_perp[nt] = ctx->radius[nt];
1250   } else {
1251     nt = LANDAU_MAX_GRIDS;
1252     PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_par", "Parallel velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_par, &nt, &flg));
1253     if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_par: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1254     PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_perp", "Perpendicular velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_perp, &nt, &flg));
1255     if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_perp: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1256   }
1257   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1258     if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c - need to set par and perp with this -- todo */
1259       if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1260       else ctx->radius[grid] = -ctx->radius[grid];
1261       ctx->radius[grid] = ctx->radius[grid] * SPEED_OF_LIGHT / ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1262       PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n", (double)ctx->radius[grid], grid));
1263     }
1264     ctx->radius[grid] *= ctx->thermal_speed[grid] / ctx->v_0;      // scale domain by thermal radius relative to v_0
1265     ctx->radius_perp[grid] *= ctx->thermal_speed[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0
1266     ctx->radius_par[grid] *= ctx->thermal_speed[grid] / ctx->v_0;  // scale domain by thermal radius relative to v_0
1267   }
1268   /* amr parameters */
1269   if (!fileflg) {
1270     nt = LANDAU_MAX_GRIDS;
1271     PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg));
1272     PetscCheck(!flg || nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT, nt, ctx->num_grids);
1273     nt = LANDAU_MAX_GRIDS;
1274     PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg));
1275     for (ii = 1; ii < ctx->num_grids; ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1276     PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg));
1277     PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_pre", "Number of levels to refine along v_perp=0 before origin refine", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg));
1278     PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_post", "Number of levels to refine along v_perp=0 after origin refine", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg));
1279     PetscCall(PetscOptionsReal("-dm_landau_re_radius", "velocity range to refine on positive (z>0) r=0 axis for runaways", "plexland.c", ctx->re_radius, &ctx->re_radius, &flg));
1280     PetscCall(PetscOptionsReal("-dm_landau_z_radius_pre", "velocity range to refine r=0 axis (for electrons)", "plexland.c", ctx->vperp0_radius1, &ctx->vperp0_radius1, &flg));
1281     PetscCall(PetscOptionsReal("-dm_landau_z_radius_post", "velocity range to refine r=0 axis (for electrons) after origin AMR", "plexland.c", ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg));
1282     /* spherical domain */
1283     if (ctx->sphere || ctx->simplex) {
1284       ctx->sphere_uniform_normal = PETSC_FALSE;
1285       PetscCall(PetscOptionsBool("-dm_landau_sphere_uniform_normal", "Scaling of circle radius to get uniform particles per cell with Maxwellians (not used)", "plexland.c", ctx->sphere_uniform_normal, &ctx->sphere_uniform_normal, NULL));
1286       if (!ctx->sphere_uniform_normal) { // true
1287         nt = LANDAU_MAX_GRIDS;
1288         PetscCall(PetscOptionsRealArray("-dm_landau_sphere_inner_radius_90degree_scale", "Scaling of radius for inner circle on 90 degree grid", "plexland.c", ctx->sphere_inner_radius_90degree, &nt, &flg));
1289         if (flg && nt < ctx->num_grids) {
1290           for (PetscInt grid = nt; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = ctx->sphere_inner_radius_90degree[0];
1291         } else if (!flg || nt == 0) {
1292           if (ctx->sphere && !ctx->simplex && LANDAU_DIM == 3) {
1293             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = 0.35; // optimized for R=6, Q4, AMR=0, 0 refinement
1294           } else {
1295             if (LANDAU_DIM == 2) {
1296               for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = 0.4; // optimized for R=5, Q4, AMR=0
1297             } else {
1298               for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = 0.577 * 0.40;
1299             }
1300           }
1301         }
1302         nt = LANDAU_MAX_GRIDS;
1303         PetscCall(PetscOptionsRealArray("-dm_landau_sphere_inner_radius_45degree_scale", "Scaling of radius for inner circle on 45 degree grid", "plexland.c", ctx->sphere_inner_radius_45degree, &nt, &flg));
1304         if (flg && nt < ctx->num_grids) {
1305           for (PetscInt grid = nt; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_45degree[grid] = ctx->sphere_inner_radius_45degree[0];
1306         } else if (!flg || nt == 0) {
1307           if (LANDAU_DIM == 2) {
1308             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_45degree[grid] = 0.45; // optimized for R=5, Q4, AMR=0
1309           } else {
1310             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_45degree[grid] = 0.4; // 3D sphere
1311           }
1312         }
1313         if (ctx->sphere) PetscCall(PetscInfo(ctx->plex[0], "sphere : , 45 degree scaling = %g; 90 degree scaling = %g\n", (double)ctx->sphere_inner_radius_45degree[0], (double)ctx->sphere_inner_radius_90degree[0]));
1314       } else {
1315         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1316           switch (ctx->numAMRRefine[grid]) {
1317           case 0:
1318           case 1:
1319           case 2:
1320           case 3:
1321           default:
1322             if (LANDAU_DIM == 2) {
1323               ctx->sphere_inner_radius_90degree[grid] = 0.40;
1324               ctx->sphere_inner_radius_45degree[grid] = 0.45;
1325             } else {
1326               ctx->sphere_inner_radius_45degree[grid] = 0.25;
1327             }
1328           }
1329         }
1330       }
1331     } else {
1332       nt = LANDAU_DIM;
1333       PetscCall(PetscOptionsIntArray("-dm_landau_num_cells", "Number of cells in each dimension of base grid", "plexland.c", ctx->cells0, &nt, &flg));
1334     }
1335   }
1336   /* processing options */
1337   PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL));
1338   PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering (DEPRECATED)", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL));
1339   if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly");
1340   PetscCheck(!ctx->jacobian_field_major_order, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED");
1341   PetscOptionsEnd();
1342 
1343   for (ii = ctx->num_species; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0;
1344   if (ctx->verbose != 0) {
1345     PetscReal pmassunit = PetscRealConstant(1.6720e-27);
1346 
1347     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n", (double)ctx->masses[0], (double)(ctx->masses[1] / pmassunit), (double)(ctx->num_species > 2 ? ctx->masses[2] / pmassunit : 0)));
1348     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0], (double)(-ctx->charges[1] / ctx->charges[0]), (double)(ctx->num_species > 2 ? -ctx->charges[2] / ctx->charges[0] : 0)));
1349     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "n:             e: %10.3e                           i: %10.3e %10.3e\n", (double)ctx->n[0], (double)ctx->n[1], (double)(ctx->num_species > 2 ? ctx->n[2] : 0)));
1350     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "thermal T (K): e=%10.3e i=%10.3e %10.3e. Normalization grid %" PetscInt_FMT ": v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e %" PetscInt_FMT " batched, view batch %" PetscInt_FMT "\n", (double)ctx->thermal_temps[0],
1351                           (double)ctx->thermal_temps[1], (double)((ctx->num_species > 2) ? ctx->thermal_temps[2] : 0), non_dim_grid, (double)ctx->v_0, (double)(ctx->v_0 / SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->batch_sz, ctx->batch_view_idx));
1352     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Domain radius (AMR levels) grid %d: par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", 0, (double)ctx->radius_par[0], (double)ctx->radius_perp[0], ctx->numAMRRefine[0]));
1353     for (ii = 1; ii < ctx->num_grids; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, ", %" PetscInt_FMT ": par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", ii, (double)ctx->radius_par[ii], (double)ctx->radius_perp[ii], ctx->numAMRRefine[ii]));
1354     if (ctx->use_relativistic_corrections) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nUse relativistic corrections\n"));
1355     else PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
1356   }
1357   PetscCall(DMDestroy(&dummy));
1358   {
1359     PetscMPIInt rank;
1360     PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
1361     ctx->stage = 0;
1362     PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13]));   /* 13 */
1363     PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2]));  /* 2 */
1364     PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12]));   /* 12 */
1365     PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15]));  /* 15 */
1366     PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */
1367     PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */
1368     PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]));  /* 0 */
1369     PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9]));      /* 9 */
1370     PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10]));       /* 10 */
1371     PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7]));  /* 7 */
1372     PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1]));  /* 1 */
1373     PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3]));     /* 3 */
1374     PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8]));  /* 8 */
1375     PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4]));  /* 4 */
1376     PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */
1377     PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]));     /* 5 */
1378     PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6]));    /* 6 */
1379 
1380     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1381       PetscCall(PetscOptionsClearValue(NULL, "-snes_converged_reason"));
1382       PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason"));
1383       PetscCall(PetscOptionsClearValue(NULL, "-snes_monitor"));
1384       PetscCall(PetscOptionsClearValue(NULL, "-ksp_monitor"));
1385       PetscCall(PetscOptionsClearValue(NULL, "-ts_monitor"));
1386       PetscCall(PetscOptionsClearValue(NULL, "-ts_view"));
1387       PetscCall(PetscOptionsClearValue(NULL, "-ts_adapt_monitor"));
1388       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_dm_view"));
1389       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_vec_view"));
1390       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_dm_view"));
1391       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_view"));
1392       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_jacobian_view"));
1393       PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mat_view"));
1394       PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_converged_reason"));
1395       PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_monitor"));
1396       PetscCall(PetscOptionsClearValue(NULL, "-"));
1397       PetscCall(PetscOptionsClearValue(NULL, "-info"));
1398     }
1399   }
1400   PetscFunctionReturn(PETSC_SUCCESS);
1401 }
1402 
CreateStaticData(PetscInt dim,IS grid_batch_is_inv[],const char prefix[],LandauCtx * ctx)1403 static PetscErrorCode CreateStaticData(PetscInt dim, IS grid_batch_is_inv[], const char prefix[], LandauCtx *ctx)
1404 {
1405   PetscSection     section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
1406   PetscQuadrature  quad;
1407   const PetscReal *quadWeights;
1408   PetscReal        invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1409   PetscInt         numCells[LANDAU_MAX_GRIDS], Nq, Nb, Nf[LANDAU_MAX_GRIDS], ncellsTot = 0, MAP_BF_SIZE = 64 * LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_Q_FACE * LANDAU_MAX_SPECIES;
1410   PetscTabulation *Tf;
1411   PetscDS          prob;
1412 
1413   PetscFunctionBegin;
1414   PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb));
1415   PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND);
1416   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1417     for (PetscInt ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++) {
1418       invMass[ii]  = ctx->m_0 / ctx->masses[ii];
1419       nu_alpha[ii] = PetscSqr(ctx->charges[ii] / ctx->m_0) * ctx->m_0 / ctx->masses[ii];
1420       nu_beta[ii]  = PetscSqr(ctx->charges[ii] / ctx->epsilon0) / (8 * PETSC_PI) * ctx->t_0 * ctx->n_0 / PetscPowReal(ctx->v_0, 3);
1421     }
1422   }
1423   if (ctx->verbose == 4) {
1424     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "nu_alpha: "));
1425     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1426       PetscInt iii = ctx->species_offset[grid];
1427       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_alpha[ii]));
1428     }
1429     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_beta: "));
1430     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1431       PetscInt iii = ctx->species_offset[grid];
1432       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_beta[ii]));
1433     }
1434     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_alpha[i]*nu_beta[j]*lambda[i][j]:\n"));
1435     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1436       PetscInt iii = ctx->species_offset[grid];
1437       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) {
1438         for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) {
1439           PetscInt jjj = ctx->species_offset[gridj];
1440           for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)(nu_alpha[ii] * nu_beta[jj] * ctx->lambdas[grid][gridj])));
1441         }
1442         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
1443       }
1444     }
1445     PetscCall(PetscPrintf(PETSC_COMM_WORLD, "lambda[i][j]:\n"));
1446     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1447       PetscInt iii = ctx->species_offset[grid];
1448       for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) {
1449         for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) {
1450           PetscInt jjj = ctx->species_offset[gridj];
1451           for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)ctx->lambdas[grid][gridj]));
1452         }
1453         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
1454       }
1455     }
1456   }
1457   PetscCall(DMGetDS(ctx->plex[0], &prob));    // same DS for all grids
1458   PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
1459   /* DS, Tab and quad is same on all grids */
1460   PetscCheck(ctx->plex[0], ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
1461   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
1462   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights));
1463   PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND);
1464   /* setup each grid */
1465   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1466     PetscInt cStart, cEnd;
1467     PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created");
1468     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1469     numCells[grid] = cEnd - cStart; // grids can have different topology
1470     PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
1471     PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
1472     PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
1473     ncellsTot += numCells[grid];
1474   }
1475   /* create GPU assembly data */
1476   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1477     PetscContainer container;
1478     PetscScalar   *elemMatrix, *elMat;
1479     pointInterpolationP4est(*pointMaps)[LANDAU_MAX_Q_FACE];
1480     P4estVertexMaps *maps;
1481     const PetscInt  *plex_batch = NULL, elMatSz = Nb * Nb * ctx->num_species * ctx->num_species;
1482     LandauIdx       *coo_elem_offsets = NULL, *coo_elem_fullNb = NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = NULL;
1483     /* create GPU assembly data */
1484     PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n", 1));
1485     PetscCall(PetscLogEventBegin(ctx->events[2], 0, 0, 0, 0));
1486     PetscCall(PetscMalloc(sizeof(*maps) * ctx->num_grids, &maps));
1487     PetscCall(PetscMalloc(sizeof(*pointMaps) * MAP_BF_SIZE, &pointMaps));
1488     PetscCall(PetscMalloc(sizeof(*elemMatrix) * elMatSz, &elemMatrix));
1489 
1490     {                                                                                                                             // setup COO assembly -- put COO metadata directly in ctx->SData_d
1491       PetscCall(PetscMalloc3(ncellsTot + 1, &coo_elem_offsets, ncellsTot, &coo_elem_fullNb, ncellsTot, &coo_elem_point_offsets)); // array of integer pointers
1492       coo_elem_offsets[0] = 0;                                                                                                    // finish later
1493       PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n", ncellsTot));
1494       ctx->SData_d.coo_n_cellsTot         = ncellsTot;
1495       ctx->SData_d.coo_elem_offsets       = (void *)coo_elem_offsets;
1496       ctx->SData_d.coo_elem_fullNb        = (void *)coo_elem_fullNb;
1497       ctx->SData_d.coo_elem_point_offsets = (void *)coo_elem_point_offsets;
1498     }
1499 
1500     ctx->SData_d.coo_max_fullnb = 0;
1501     for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1502       PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc * Nb;
1503       if (grid_batch_is_inv[grid]) PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch));
1504       PetscCheck(!plex_batch, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED");
1505       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1506       // make maps
1507       maps[grid].d_self       = NULL;
1508       maps[grid].num_elements = numCells[grid];
1509       maps[grid].num_face     = (PetscInt)(pow(Nq, 1. / ((double)dim)) + .001);                 // Q
1510       maps[grid].num_face     = (PetscInt)(pow(maps[grid].num_face, (double)(dim - 1)) + .001); // Q^2
1511       maps[grid].num_reduced  = 0;
1512       maps[grid].deviceType   = ctx->deviceType;
1513       maps[grid].numgrids     = ctx->num_grids;
1514       // count reduced and get
1515       PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx));
1516       for (PetscInt ej = cStart, eidx = 0; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1517         if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx + 1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1518         for (PetscInt fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1519           PetscInt fullNb = 0;
1520           for (PetscInt q = 0; q < Nb; ++q) {
1521             PetscInt     numindices, *indices;
1522             PetscScalar *valuesOrig = elMat = elemMatrix;
1523             PetscCall(PetscArrayzero(elMat, totDim * totDim));
1524             elMat[(fieldA * Nb + q) * totDim + fieldA * Nb + q] = 1;
1525             PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, &elMat));
1526             if (ctx->simplex) {
1527               PetscCheck(numindices == Nb, ctx->comm, PETSC_ERR_ARG_WRONG, "numindices != Nb numindices=%" PetscInt_FMT " Nb=%" PetscInt_FMT, numindices, Nb);
1528               for (PetscInt q = 0; q < numindices; ++q) maps[grid].gIdx[eidx][fieldA][q] = indices[q];
1529               fullNb++;
1530             } else {
1531               for (PetscInt f = 0; f < numindices; ++f) { // look for a non-zero on the diagonal (is this too complicated for simplices?)
1532                 if (PetscAbs(PetscRealPart(elMat[f * numindices + f])) > PETSC_MACHINE_EPSILON) {
1533                   // found it
1534                   if (PetscAbs(PetscRealPart(elMat[f * numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1535                     if (plex_batch) {
1536                       maps[grid].gIdx[eidx][fieldA][q] = plex_batch[indices[f]];
1537                     } else {
1538                       maps[grid].gIdx[eidx][fieldA][q] = indices[f];
1539                     }
1540                     fullNb++;
1541                   } else { //found a constraint
1542                     PetscInt       jj                = 0;
1543                     PetscReal      sum               = 0;
1544                     const PetscInt ff                = f;
1545                     maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1
1546                     PetscCheck(!ctx->simplex, ctx->comm, PETSC_ERR_ARG_WRONG, "No constraints with simplex");
1547                     do {                                                                                              // constraints are continuous in Plex - exploit that here
1548                       PetscInt ii;                                                                                    // get 'scale'
1549                       for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1550                         if (ff + ii < numindices) {                                                                   // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1551                           pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f * numindices + ff + ii]);
1552                         }
1553                       }
1554                       sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1555                       // get 'gid'
1556                       if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1557                       else {
1558                         if (plex_batch) {
1559                           pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1560                         } else {
1561                           pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1562                         }
1563                         fullNb++;
1564                       }
1565                     } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1566                     while (jj < maps[grid].num_face) {
1567                       pointMaps[maps[grid].num_reduced][jj].scale = 0;
1568                       pointMaps[maps[grid].num_reduced][jj].gid   = -1;
1569                       jj++;
1570                     }
1571                     if (PetscAbs(sum - 1.0) > 10 * PETSC_MACHINE_EPSILON) { // debug
1572                       PetscInt  d, f;
1573                       PetscReal tmp = 0;
1574                       PetscCall(
1575                         PetscPrintf(PETSC_COMM_SELF, "\t\t%" PetscInt_FMT ".%" PetscInt_FMT ".%" PetscInt_FMT ") ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%" PetscInt_FMT ")\n", eidx, q, fieldA, (double)sum, LANDAU_MAX_Q_FACE, maps[grid].num_face));
1576                       for (d = 0, tmp = 0; d < numindices; ++d) {
1577                         if (tmp != 0 && PetscAbs(tmp - 1.0) > 10 * PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") %3" PetscInt_FMT ": ", d, indices[d]));
1578                         for (f = 0; f < numindices; ++f) tmp += PetscRealPart(elMat[d * numindices + f]);
1579                         if (tmp != 0) PetscCall(PetscPrintf(ctx->comm, " | %22.16e\n", (double)tmp));
1580                       }
1581                     }
1582                     maps[grid].num_reduced++;
1583                     PetscCheck(maps[grid].num_reduced < MAP_BF_SIZE, PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %" PetscInt_FMT " > %" PetscInt_FMT, maps[grid].num_reduced, MAP_BF_SIZE);
1584                   }
1585                   break;
1586                 }
1587               }
1588             } // !simplex
1589             // cleanup
1590             PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, &elMat));
1591             if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices * numindices, MPIU_SCALAR, &elMat));
1592           }
1593           {                                                        // setup COO assembly
1594             coo_elem_offsets[glb_elem_idx + 1] += fullNb * fullNb; // one species block, adds a block for each species, on this element in this grid
1595             if (fieldA == 0) {                                     // cache full Nb for this element, on this grid per species
1596               coo_elem_fullNb[glb_elem_idx] = fullNb;
1597               if (fullNb > ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1598             } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %" PetscInt_FMT " %" PetscInt_FMT, coo_elem_fullNb[glb_elem_idx], fullNb);
1599           }
1600         } // field
1601       } // cell
1602       // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1603       PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps));
1604       for (PetscInt ej = 0; ej < maps[grid].num_reduced; ++ej) {
1605         for (PetscInt q = 0; q < maps[grid].num_face; ++q) {
1606           maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1607           maps[grid].c_maps[ej][q].gid   = pointMaps[ej][q].gid;
1608         }
1609       }
1610 #if defined(PETSC_HAVE_KOKKOS)
1611       if (ctx->deviceType == LANDAU_KOKKOS) PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, grid)); // implies Kokkos does
1612 #endif
1613       if (plex_batch) {
1614         PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch));
1615         PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this
1616       }
1617     } /* grids */
1618     // finish COO
1619     { // setup COO assembly
1620       PetscInt *oor, *ooc;
1621       ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot] * ctx->batch_sz;
1622       PetscCall(PetscMalloc2(ctx->SData_d.coo_size, &oor, ctx->SData_d.coo_size, &ooc));
1623       for (PetscInt i = 0; i < ctx->SData_d.coo_size; i++) oor[i] = ooc[i] = -1;
1624       // get
1625       for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1626         for (PetscInt ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1627           const PetscInt         fullNb           = coo_elem_fullNb[glb_elem_idx];
1628           const LandauIdx *const Idxs             = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1629           coo_elem_point_offsets[glb_elem_idx][0] = 0;
1630           for (PetscInt f = 0, cnt2 = 0; f < Nb; f++) {
1631             PetscInt idx                                = Idxs[f];
1632             coo_elem_point_offsets[glb_elem_idx][f + 1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1633             if (idx >= 0) {
1634               cnt2++;
1635               coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1636             } else {
1637               idx = -idx - 1;
1638               for (PetscInt q = 0; q < maps[grid].num_face; q++) {
1639                 if (maps[grid].c_maps[idx][q].gid < 0) break;
1640                 cnt2++;
1641                 coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1642               }
1643             }
1644             PetscCheck(cnt2 <= fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %" PetscInt_FMT " < %" PetscInt_FMT, fullNb, cnt2);
1645           }
1646           PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %" PetscInt_FMT " != fullNb=%" PetscInt_FMT, coo_elem_point_offsets[glb_elem_idx][Nb], fullNb);
1647         }
1648       }
1649       // set
1650       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1651         for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1652           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1653           for (PetscInt ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1654             const PetscInt fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
1655             // set (i,j)
1656             for (PetscInt fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1657               const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1658               PetscInt               rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
1659               for (PetscInt f = 0; f < Nb; ++f) {
1660                 const PetscInt nr = coo_elem_point_offsets[glb_elem_idx][f + 1] - coo_elem_point_offsets[glb_elem_idx][f];
1661                 if (nr == 1) rows[0] = Idxs[f];
1662                 else {
1663                   const PetscInt idx = -Idxs[f] - 1;
1664                   for (PetscInt q = 0; q < nr; q++) rows[q] = maps[grid].c_maps[idx][q].gid;
1665                 }
1666                 for (PetscInt g = 0; g < Nb; ++g) {
1667                   const PetscInt nc = coo_elem_point_offsets[glb_elem_idx][g + 1] - coo_elem_point_offsets[glb_elem_idx][g];
1668                   if (nc == 1) cols[0] = Idxs[g];
1669                   else {
1670                     const PetscInt idx = -Idxs[g] - 1;
1671                     for (PetscInt q = 0; q < nc; q++) cols[q] = maps[grid].c_maps[idx][q].gid;
1672                   }
1673                   const PetscInt idx0 = b_id * coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1674                   for (PetscInt q = 0, idx = idx0; q < nr; q++) {
1675                     for (PetscInt d = 0; d < nc; d++, idx++) {
1676                       oor[idx] = rows[q] + moffset;
1677                       ooc[idx] = cols[d] + moffset;
1678                     }
1679                   }
1680                 }
1681               }
1682             }
1683           } // cell
1684         } // grid
1685       } // batch
1686       PetscCall(MatSetPreallocationCOO(ctx->J, ctx->SData_d.coo_size, oor, ooc));
1687       PetscCall(PetscFree2(oor, ooc));
1688     }
1689     PetscCall(PetscFree(pointMaps));
1690     PetscCall(PetscFree(elemMatrix));
1691     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
1692     PetscCall(PetscContainerSetPointer(container, (void *)maps));
1693     PetscCall(PetscContainerSetCtxDestroy(container, LandauGPUMapsDestroy));
1694     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "assembly_maps", (PetscObject)container));
1695     PetscCall(PetscContainerDestroy(&container));
1696     PetscCall(PetscLogEventEnd(ctx->events[2], 0, 0, 0, 0));
1697   } // end GPU assembly
1698   { /* create static point data, Jacobian called first, only one vertex copy */
1699     PetscReal *invJe, *ww, *xx, *yy, *zz = NULL, *invJ_a;
1700     PetscInt   outer_ipidx, outer_ej, grid, nip_glb = 0;
1701     PetscFE    fe;
1702     PetscCall(PetscLogEventBegin(ctx->events[7], 0, 0, 0, 0));
1703     PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n"));
1704     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) nip_glb += Nq * numCells[grid];
1705     /* collect f data, first time is for Jacobian, but make mass now */
1706     if (ctx->verbose != 0) {
1707       PetscInt ncells = 0, N;
1708       MatInfo  info;
1709       PetscCall(MatGetInfo(ctx->J, MAT_LOCAL, &info));
1710       PetscCall(MatGetSize(ctx->J, &N, NULL));
1711       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ncells += numCells[grid];
1712       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT " nnz= %" PetscInt_FMT "\n", 0, "FormLandau", nip_glb, ncells, Nb, Nq, dim, Nb,
1713                             ctx->num_species, Nb, dim, N, (PetscInt)info.nz_used));
1714     }
1715     PetscCall(PetscMalloc4(nip_glb, &ww, nip_glb, &xx, nip_glb, &yy, nip_glb * dim * dim, &invJ_a));
1716     if (dim == 3) PetscCall(PetscMalloc1(nip_glb, &zz));
1717     if (ctx->use_energy_tensor_trick) {
1718       PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, ctx->simplex, prefix, PETSC_DECIDE, &fe));
1719       PetscCall(PetscObjectSetName((PetscObject)fe, "energy"));
1720     }
1721     /* init each grids static data - no batch */
1722     for (grid = 0, outer_ipidx = 0, outer_ej = 0; grid < ctx->num_grids; grid++) { // OpenMP (once)
1723       Vec          v2_2 = NULL;                                                    // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1724       PetscSection e_section;
1725       DM           dmEnergy;
1726       PetscInt     cStart, cEnd, ej;
1727 
1728       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1729       // prep energy trick, get v^2 / 2 vector
1730       if (ctx->use_energy_tensor_trick) {
1731         PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1732         Vec        glob_v2;
1733         PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};
1734 
1735         PetscCall(DMClone(ctx->plex[grid], &dmEnergy));
1736         PetscCall(PetscObjectSetName((PetscObject)dmEnergy, "energy"));
1737         PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe));
1738         PetscCall(DMCreateDS(dmEnergy));
1739         PetscCall(DMGetLocalSection(dmEnergy, &e_section));
1740         PetscCall(DMGetGlobalVector(dmEnergy, &glob_v2));
1741         PetscCall(PetscObjectSetName((PetscObject)glob_v2, "trick"));
1742         c2_0[0] = &data[0];
1743         PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void **)c2_0, INSERT_ALL_VALUES, glob_v2));
1744         PetscCall(DMGetLocalVector(dmEnergy, &v2_2));
1745         PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */
1746         PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1747         PetscCall(DMGlobalToLocalEnd(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1748         PetscCall(DMViewFromOptions(dmEnergy, NULL, "-energy_dm_view"));
1749         PetscCall(VecViewFromOptions(glob_v2, NULL, "-energy_vec_view"));
1750         PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2));
1751       }
1752       /* append part of the IP data for each grid */
1753       for (ej = 0; ej < numCells[grid]; ++ej, ++outer_ej) {
1754         PetscScalar *coefs = NULL;
1755         PetscReal    vj[LANDAU_MAX_NQND * LANDAU_DIM], detJj[LANDAU_MAX_NQND], Jdummy[LANDAU_MAX_NQND * LANDAU_DIM * LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1756         invJe = invJ_a + outer_ej * Nq * dim * dim;
1757         PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej + cStart, quad, vj, Jdummy, invJe, detJj));
1758         if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs));
1759         /* create static point data */
1760         for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1761           const PetscInt   gidx = outer_ipidx;
1762           const PetscReal *invJ = &invJe[qj * dim * dim];
1763           ww[gidx]              = detJj[qj] * quadWeights[qj];
1764           if (dim == 2) ww[gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */
1765           // get xx, yy, zz
1766           if (ctx->use_energy_tensor_trick) {
1767             double                 refSpaceDer[3], eGradPhi[3];
1768             const PetscReal *const DD = Tf[0]->T[1];
1769             const PetscReal       *Dq = &DD[qj * Nb * dim];
1770             for (PetscInt d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1771             for (PetscInt b = 0; b < Nb; ++b) {
1772               for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b * dim + d] * PetscRealPart(coefs[b]);
1773             }
1774             xx[gidx] = 1e10;
1775             if (ctx->use_relativistic_corrections) {
1776               double dg2_c2 = 0;
1777               //for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1778               for (PetscInt d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1779               dg2_c2 *= (double)c02;
1780               if (dg2_c2 >= .999) {
1781                 xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1782                 yy[gidx] = vj[qj * dim + 1];
1783                 if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1784                 PetscCall(PetscPrintf(ctx->comm, "Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n", (double)PetscSqrtReal(xx[gidx] * xx[gidx] + yy[gidx] * yy[gidx] + zz[gidx] * zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx]));
1785               } else {
1786                 PetscReal fact = c02 / PetscSqrtReal(1. - dg2_c2);
1787                 for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1788                 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1789               }
1790             }
1791             if (xx[gidx] == 1e10) {
1792               for (PetscInt d = 0; d < dim; ++d) {
1793                 for (PetscInt e = 0; e < dim; ++e) eGradPhi[d] += invJ[e * dim + d] * refSpaceDer[e];
1794               }
1795               xx[gidx] = eGradPhi[0];
1796               yy[gidx] = eGradPhi[1];
1797               if (dim == 3) zz[gidx] = eGradPhi[2];
1798             }
1799           } else {
1800             xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1801             yy[gidx] = vj[qj * dim + 1];
1802             if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1803           }
1804         } /* q */
1805         if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs));
1806       } /* ej */
1807       if (ctx->use_energy_tensor_trick) {
1808         PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2));
1809         PetscCall(DMDestroy(&dmEnergy));
1810       }
1811     } /* grid */
1812     if (ctx->use_energy_tensor_trick) PetscCall(PetscFEDestroy(&fe));
1813     /* cache static data */
1814     if (ctx->deviceType == LANDAU_KOKKOS) {
1815 #if defined(PETSC_HAVE_KOKKOS)
1816       PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, Nb, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, (PetscReal *)ctx->lambdas, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1817       /* free */
1818       PetscCall(PetscFree4(ww, xx, yy, invJ_a));
1819       if (dim == 3) PetscCall(PetscFree(zz));
1820 #else
1821       SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type kokkos not built");
1822 #endif
1823     } else {                                                                                                                                                                   /* CPU version, just copy in, only use part */
1824       PetscReal *nu_alpha_p = (PetscReal *)ctx->SData_d.alpha, *nu_beta_p = (PetscReal *)ctx->SData_d.beta, *invMass_p = (PetscReal *)ctx->SData_d.invMass, *lambdas_p = NULL; // why set these ?
1825       ctx->SData_d.w    = (void *)ww;
1826       ctx->SData_d.x    = (void *)xx;
1827       ctx->SData_d.y    = (void *)yy;
1828       ctx->SData_d.z    = (void *)zz;
1829       ctx->SData_d.invJ = (void *)invJ_a;
1830       PetscCall(PetscMalloc4(ctx->num_species, &nu_alpha_p, ctx->num_species, &nu_beta_p, ctx->num_species, &invMass_p, LANDAU_MAX_GRIDS * LANDAU_MAX_GRIDS, &lambdas_p));
1831       for (PetscInt ii = 0; ii < ctx->num_species; ii++) {
1832         nu_alpha_p[ii] = nu_alpha[ii];
1833         nu_beta_p[ii]  = nu_beta[ii];
1834         invMass_p[ii]  = invMass[ii];
1835       }
1836       ctx->SData_d.alpha   = (void *)nu_alpha_p;
1837       ctx->SData_d.beta    = (void *)nu_beta_p;
1838       ctx->SData_d.invMass = (void *)invMass_p;
1839       ctx->SData_d.lambdas = (void *)lambdas_p;
1840       for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1841         PetscReal (*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal (*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas;
1842         for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) (*lambdas)[grid][gridj] = ctx->lambdas[grid][gridj];
1843       }
1844     }
1845     PetscCall(PetscLogEventEnd(ctx->events[7], 0, 0, 0, 0));
1846   } // initialize
1847   PetscFunctionReturn(PETSC_SUCCESS);
1848 }
1849 
1850 /* < v, u > */
g0_1(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,PetscReal u_tShift,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar g0[])1851 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1852 {
1853   g0[0] = 1.;
1854 }
1855 
1856 /* < v, u > */
g0_fake(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,PetscReal u_tShift,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar g0[])1857 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1858 {
1859   static double ttt = 1e-12;
1860   g0[0]             = ttt++;
1861 }
1862 
1863 /* < v, u > */
g0_r(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,PetscReal u_tShift,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar g0[])1864 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1865 {
1866   g0[0] = 2. * PETSC_PI * x[0];
1867 }
1868 
1869 /*
1870  LandauCreateJacobianMatrix - creates ctx->J with without real data. Hard to keep sparse.
1871   - Like DMPlexLandauCreateMassMatrix. Should remove one and combine
1872   - has old support for field major ordering
1873  */
LandauCreateJacobianMatrix(MPI_Comm comm,Vec X,IS grid_batch_is_inv[LANDAU_MAX_GRIDS],LandauCtx * ctx)1874 static PetscErrorCode LandauCreateJacobianMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx)
1875 {
1876   PetscInt *idxs = NULL;
1877   Mat       subM[LANDAU_MAX_GRIDS];
1878 
1879   PetscFunctionBegin;
1880   if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1881     PetscFunctionReturn(PETSC_SUCCESS);
1882   }
1883   // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is' -- not used
1884   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, &idxs));
1885   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1886     const PetscInt *values, n = ctx->mat_offset[grid + 1] - ctx->mat_offset[grid];
1887     Mat             gMat;
1888     DM              massDM;
1889     PetscDS         prob;
1890     Vec             tvec;
1891     // get "mass" matrix for reordering
1892     PetscCall(DMClone(ctx->plex[grid], &massDM));
1893     PetscCall(DMCopyFields(ctx->plex[grid], PETSC_DETERMINE, PETSC_DETERMINE, massDM));
1894     PetscCall(DMCreateDS(massDM));
1895     PetscCall(DMGetDS(massDM, &prob));
1896     for (PetscInt ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL));
1897     PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); // this trick is need to both sparsify the matrix and avoid runtime error
1898     PetscCall(DMCreateMatrix(massDM, &gMat));
1899     PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
1900     PetscCall(MatSetOption(gMat, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
1901     PetscCall(MatSetOption(gMat, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
1902     PetscCall(DMCreateLocalVector(ctx->plex[grid], &tvec));
1903     PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx));
1904     PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view"));
1905     PetscCall(DMDestroy(&massDM));
1906     PetscCall(VecDestroy(&tvec));
1907     subM[grid] = gMat;
1908     if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1909       MatOrderingType rtype = MATORDERINGRCM;
1910       IS              isrow, isicol;
1911       PetscCall(MatGetOrdering(gMat, rtype, &isrow, &isicol));
1912       PetscCall(ISInvertPermutation(isrow, PETSC_DECIDE, &grid_batch_is_inv[grid]));
1913       PetscCall(ISGetIndices(isrow, &values));
1914       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
1915 #if !defined(LANDAU_SPECIES_MAJOR)
1916         PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id * N;
1917         for (PetscInt ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1918 #else
1919         PetscInt n0 = ctx->mat_offset[grid] * ctx->batch_sz + b_id * n;
1920         for (PetscInt ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1921 #endif
1922       }
1923       PetscCall(ISRestoreIndices(isrow, &values));
1924       PetscCall(ISDestroy(&isrow));
1925       PetscCall(ISDestroy(&isicol));
1926     }
1927   }
1928   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(ISCreateGeneral(comm, ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, idxs, PETSC_OWN_POINTER, &ctx->batch_is));
1929   // get a block matrix
1930   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1931     Mat      B = subM[grid];
1932     PetscInt nloc, nzl, *colbuf, row, COL_BF_SIZE = 1024;
1933     PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
1934     PetscCall(MatGetSize(B, &nloc, NULL));
1935     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1936       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1937       const PetscInt    *cols;
1938       const PetscScalar *vals;
1939       for (PetscInt i = 0; i < nloc; i++) {
1940         PetscCall(MatGetRow(B, i, &nzl, NULL, NULL));
1941         if (nzl > COL_BF_SIZE) {
1942           PetscCall(PetscFree(colbuf));
1943           PetscCall(PetscInfo(ctx->plex[grid], "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl));
1944           COL_BF_SIZE = nzl;
1945           PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
1946         }
1947         PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
1948         for (PetscInt j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
1949         row = i + moffset;
1950         PetscCall(MatSetValues(ctx->J, 1, &row, nzl, colbuf, vals, INSERT_VALUES));
1951         PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
1952       }
1953     }
1954     PetscCall(PetscFree(colbuf));
1955   }
1956   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid]));
1957   PetscCall(MatAssemblyBegin(ctx->J, MAT_FINAL_ASSEMBLY));
1958   PetscCall(MatAssemblyEnd(ctx->J, MAT_FINAL_ASSEMBLY));
1959 
1960   // debug
1961   PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view"));
1962   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1963     Mat mat_block_order;
1964     PetscCall(MatCreateSubMatrix(ctx->J, ctx->batch_is, ctx->batch_is, MAT_INITIAL_MATRIX, &mat_block_order)); // use MatPermute
1965     PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_mat_view"));
1966     PetscCall(MatDestroy(&mat_block_order));
1967     PetscCall(VecScatterCreate(X, ctx->batch_is, X, NULL, &ctx->plex_batch));
1968     PetscCall(VecDuplicate(X, &ctx->work_vec));
1969   }
1970   PetscFunctionReturn(PETSC_SUCCESS);
1971 }
1972 
LandauSphereMapping(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar f[])1973 static void LandauSphereMapping(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar f[])
1974 {
1975   PetscReal u_max = 0, u_norm = 0, scale, square_inner_radius = PetscRealPart(constants[0]), square_radius = PetscRealPart(constants[1]);
1976   PetscInt  d;
1977 
1978   for (d = 0; d < dim; ++d) {
1979     PetscReal val = PetscAbsReal(PetscRealPart(u[d]));
1980     if (val > u_max) u_max = val;
1981     u_norm += PetscRealPart(u[d]) * PetscRealPart(u[d]);
1982   }
1983   u_norm = PetscSqrtReal(u_norm);
1984 
1985   if (u_max < square_inner_radius) {
1986     for (d = 0; d < dim; ++d) f[d] = u[d];
1987     return;
1988   }
1989 
1990   /*
1991     A outer cube has corners at |u| = square_radius.
1992     u_1 is the intersection of the ray with the outer cube face.
1993     R_max = square_radius * sqrt(3) is radius of sphere we want points on outer cube mapped to.
1994     u_0 is the intersection of the ray with the inner cube face.
1995     The cube has corners at |u| = square_inner_radius.
1996     scale to point linearly between u_0 and u_1 so that a point on the inner face does not move, and a point on the outer face moves to the sphere.
1997   */
1998   if (u_max > square_radius + 1e-5) (void)PetscPrintf(PETSC_COMM_SELF, "Error: Point outside outer radius: u_max %g > %g\n", (double)u_max, (double)square_radius);
1999   /*  if (PetscAbsReal(u_max - square_inner_radius) < 1e-5 || PetscAbsReal(u_max - square_radius) < 1e-5) {
2000     (void)PetscPrintf(PETSC_COMM_SELF, "Warning: Point near corner of inner and outer cube: u_max %g, inner %g, outer %g\n", (double)u_max, (double)square_inner_radius, (double)square_radius);
2001   } */
2002   {
2003     PetscReal u_0_norm  = u_norm * square_inner_radius / u_max;
2004     PetscReal R_max     = square_radius * PetscSqrtReal((PetscReal)dim);
2005     PetscReal t         = (u_max - square_inner_radius) / (square_radius - square_inner_radius);
2006     PetscReal rho_prime = (1.0 - t) * u_0_norm + t * R_max;
2007     scale               = rho_prime / u_norm;
2008   }
2009   for (d = 0; d < dim; ++d) f[d] = u[d] * scale;
2010 }
2011 
LandauSphereMesh(DM dm,PetscReal inner,PetscReal radius)2012 static PetscErrorCode LandauSphereMesh(DM dm, PetscReal inner, PetscReal radius)
2013 {
2014   DM          cdm;
2015   PetscDS     cds;
2016   PetscScalar consts[2];
2017 
2018   PetscFunctionBegin;
2019   consts[0] = inner;
2020   consts[1] = radius;
2021   PetscCall(DMGetCoordinateDM(dm, &cdm));
2022   PetscCall(DMGetDS(cdm, &cds));
2023   PetscCall(PetscDSSetConstants(cds, 2, consts));
2024   PetscCall(DMPlexRemapGeometry(dm, 0.0, LandauSphereMapping));
2025   PetscFunctionReturn(PETSC_SUCCESS);
2026 }
2027 
2028 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
2029 
2030 /*@C
2031   DMPlexLandauCreateVelocitySpace - Create a `DMPLEX` velocity space mesh
2032 
2033   Collective
2034 
2035   Input Parameters:
2036 + comm   - The MPI communicator
2037 . dim    - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
2038 - prefix - prefix for options (not tested)
2039 
2040   Output Parameters:
2041 + pack - The `DM` object representing the mesh
2042 . X    - A vector (user destroys)
2043 - J    - Optional matrix (object destroys)
2044 
2045   Level: beginner
2046 
2047 .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()`
2048  @*/
DMPlexLandauCreateVelocitySpace(MPI_Comm comm,PetscInt dim,const char prefix[],Vec * X,Mat * J,DM * pack)2049 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
2050 {
2051   LandauCtx *ctx;
2052   Vec        Xsub[LANDAU_MAX_GRIDS];
2053   IS         grid_batch_is_inv[LANDAU_MAX_GRIDS];
2054 
2055   PetscFunctionBegin;
2056   PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
2057   PetscCheck(LANDAU_DIM == dim, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d", dim, LANDAU_DIM);
2058   PetscCall(PetscNew(&ctx));
2059   ctx->comm = comm; /* used for diagnostics and global errors */
2060   /* process options */
2061   PetscCall(ProcessOptions(ctx, prefix));
2062   if (dim == 2) ctx->use_relativistic_corrections = PETSC_FALSE;
2063   /* Create Mesh */
2064   PetscCall(DMCompositeCreate(PETSC_COMM_SELF, pack));
2065   PetscCall(PetscLogEventBegin(ctx->events[13], 0, 0, 0, 0));
2066   PetscCall(PetscLogEventBegin(ctx->events[15], 0, 0, 0, 0));
2067   PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR)
2068   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2069     /* create FEM */
2070     PetscCall(SetupDS(ctx->plex[grid], dim, grid, prefix, ctx));
2071     /* set initial state */
2072     PetscCall(DMCreateGlobalVector(ctx->plex[grid], &Xsub[grid]));
2073     PetscCall(PetscObjectSetName((PetscObject)Xsub[grid], "u_orig"));
2074     /* initial static refinement, no solve */
2075     PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx));
2076     /* forest refinement - forest goes in (if forest), plex comes out */
2077     if (ctx->use_p4est) {
2078       DM plex;
2079       PetscCall(adapt(grid, ctx, &Xsub[grid])); // forest goes in, plex comes out
2080       // convert to plex, all done with this level
2081       PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex));
2082       PetscCall(DMDestroy(&ctx->plex[grid]));
2083       ctx->plex[grid] = plex;
2084     } else if (ctx->sphere && dim == 3) {
2085       if (ctx->map_sphere) PetscCall(LandauSphereMesh(ctx->plex[grid], ctx->radius[grid] * ctx->sphere_inner_radius_90degree[grid], ctx->radius[grid]));
2086       PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx));
2087     }
2088     if (grid == 0) {
2089       PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, "-dm_landau_amr_dm_view"));
2090       PetscCall(VecSetOptionsPrefix(Xsub[grid], prefix));
2091       PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view"));
2092     }
2093 #if !defined(LANDAU_SPECIES_MAJOR)
2094     PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
2095 #else
2096     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
2097       PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
2098     }
2099 #endif
2100     PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx));
2101   }
2102 #if !defined(LANDAU_SPECIES_MAJOR)
2103   // stack the batched DMs, could do it all here!!! b_id=0
2104   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
2105     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid]));
2106   }
2107 #endif
2108   // create ctx->mat_offset
2109   ctx->mat_offset[0] = 0;
2110   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2111     PetscInt n;
2112     PetscCall(VecGetLocalSize(Xsub[grid], &n));
2113     ctx->mat_offset[grid + 1] = ctx->mat_offset[grid] + n;
2114   }
2115   // creat DM & Jac
2116   PetscCall(DMSetApplicationContext(*pack, ctx));
2117   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only"));
2118   PetscCall(DMCreateMatrix(*pack, &ctx->J));
2119   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
2120   PetscCall(MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2121   PetscCall(MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2122   PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac"));
2123   // construct initial conditions in X
2124   PetscCall(DMCreateGlobalVector(*pack, X));
2125   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2126     PetscInt n;
2127     PetscCall(VecGetLocalSize(Xsub[grid], &n));
2128     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2129       PetscScalar const *values;
2130       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2131       PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx->batch_sz, ctx));
2132       PetscCall(VecGetArrayRead(Xsub[grid], &values)); // Drop whole grid in Plex ordering
2133       for (PetscInt i = 0, idx = moffset; i < n; i++, idx++) PetscCall(VecSetValue(*X, idx, values[i], INSERT_VALUES));
2134       PetscCall(VecRestoreArrayRead(Xsub[grid], &values));
2135     }
2136   }
2137   // cleanup
2138   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(VecDestroy(&Xsub[grid]));
2139   /* check for correct matrix type */
2140   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2141     PetscBool flg;
2142     if (ctx->deviceType == LANDAU_KOKKOS) {
2143       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, MATAIJKOKKOS, ""));
2144 #if defined(PETSC_HAVE_KOKKOS)
2145       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2146 #else
2147       PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2148 #endif
2149     }
2150   }
2151   PetscCall(PetscLogEventEnd(ctx->events[15], 0, 0, 0, 0));
2152 
2153   // create field major ordering
2154   ctx->work_vec   = NULL;
2155   ctx->plex_batch = NULL;
2156   ctx->batch_is   = NULL;
2157   for (PetscInt i = 0; i < LANDAU_MAX_GRIDS; i++) grid_batch_is_inv[i] = NULL;
2158   PetscCall(PetscLogEventBegin(ctx->events[12], 0, 0, 0, 0));
2159   PetscCall(LandauCreateJacobianMatrix(comm, *X, grid_batch_is_inv, ctx));
2160   PetscCall(PetscLogEventEnd(ctx->events[12], 0, 0, 0, 0));
2161 
2162   // create AMR GPU assembly maps and static GPU data
2163   PetscCall(CreateStaticData(dim, grid_batch_is_inv, prefix, ctx));
2164 
2165   PetscCall(PetscLogEventEnd(ctx->events[13], 0, 0, 0, 0));
2166 
2167   // create mass matrix
2168   PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL));
2169 
2170   if (J) *J = ctx->J;
2171 
2172   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2173     PetscContainer container;
2174     // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2175     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2176     PetscCall(PetscContainerSetPointer(container, (void *)ctx));
2177     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "LandauCtx", (PetscObject)container));
2178     PetscCall(PetscContainerDestroy(&container));
2179     // batch solvers need to map -- can batch solvers work
2180     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2181     PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch));
2182     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "plex_batch_is", (PetscObject)container));
2183     PetscCall(PetscContainerDestroy(&container));
2184   }
2185   // for batch solvers
2186   {
2187     PetscContainer container;
2188     PetscInt      *pNf;
2189     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2190     PetscCall(PetscMalloc1(sizeof(*pNf), &pNf));
2191     *pNf = ctx->batch_sz;
2192     PetscCall(PetscContainerSetPointer(container, (void *)pNf));
2193     PetscCall(PetscContainerSetCtxDestroy(container, PetscCtxDestroyDefault));
2194     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject)container));
2195     PetscCall(PetscContainerDestroy(&container));
2196   }
2197   PetscFunctionReturn(PETSC_SUCCESS);
2198 }
2199 
2200 /*@C
2201   DMPlexLandauAccess - Access to the distribution function with user callback
2202 
2203   Collective
2204 
2205   Input Parameters:
2206 + pack     - the `DMCOMPOSITE`
2207 . func     - call back function
2208 - user_ctx - user context
2209 
2210   Input/Output Parameter:
2211 . X - Vector to data to
2212 
2213   Level: advanced
2214 
2215 .seealso: `DMPlexLandauCreateVelocitySpace()`
2216  @*/
DMPlexLandauAccess(DM pack,Vec X,PetscErrorCode (* func)(DM,Vec,PetscInt,PetscInt,PetscInt,void *),void * user_ctx)2217 PetscErrorCode DMPlexLandauAccess(DM pack, Vec X, PetscErrorCode (*func)(DM, Vec, PetscInt, PetscInt, PetscInt, void *), void *user_ctx)
2218 {
2219   LandauCtx *ctx;
2220 
2221   PetscFunctionBegin;
2222   PetscCall(DMGetApplicationContext(pack, &ctx)); // uses ctx->num_grids; ctx->plex[grid]; ctx->batch_sz; ctx->mat_offset
2223   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2224     PetscInt dim, n;
2225     PetscCall(DMGetDimension(pack, &dim));
2226     for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) {
2227       Vec      vec;
2228       PetscInt vf[1] = {i0};
2229       IS       vis;
2230       DM       vdm;
2231       PetscCall(DMCreateSubDM(ctx->plex[grid], 1, vf, &vis, &vdm));
2232       PetscCall(DMSetApplicationContext(vdm, ctx)); // the user might want this
2233       PetscCall(DMCreateGlobalVector(vdm, &vec));
2234       PetscCall(VecGetSize(vec, &n));
2235       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2236         const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2237         PetscCall(VecZeroEntries(vec));
2238         /* Add your data with 'dm' for species 'sp' to 'vec' */
2239         PetscCall(func(vdm, vec, i0, grid, b_id, user_ctx));
2240         /* add to global */
2241         PetscScalar const *values;
2242         const PetscInt    *offsets;
2243         PetscCall(VecGetArrayRead(vec, &values));
2244         PetscCall(ISGetIndices(vis, &offsets));
2245         for (PetscInt i = 0; i < n; i++) PetscCall(VecSetValue(X, moffset + offsets[i], values[i], ADD_VALUES));
2246         PetscCall(VecRestoreArrayRead(vec, &values));
2247         PetscCall(ISRestoreIndices(vis, &offsets));
2248       } // batch
2249       PetscCall(VecDestroy(&vec));
2250       PetscCall(ISDestroy(&vis));
2251       PetscCall(DMDestroy(&vdm));
2252     }
2253   } // grid
2254   PetscFunctionReturn(PETSC_SUCCESS);
2255 }
2256 
2257 /*@
2258   DMPlexLandauDestroyVelocitySpace - Destroy a `DMPLEX` velocity space mesh
2259 
2260   Collective
2261 
2262   Input/Output Parameters:
2263 . dm - the `DM` to destroy
2264 
2265   Level: beginner
2266 
2267 .seealso: `DMPlexLandauCreateVelocitySpace()`
2268  @*/
DMPlexLandauDestroyVelocitySpace(DM * dm)2269 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm)
2270 {
2271   LandauCtx *ctx;
2272 
2273   PetscFunctionBegin;
2274   PetscCall(DMGetApplicationContext(*dm, &ctx));
2275   PetscCall(MatDestroy(&ctx->M));
2276   PetscCall(MatDestroy(&ctx->J));
2277   for (PetscInt ii = 0; ii < ctx->num_species; ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii]));
2278   PetscCall(ISDestroy(&ctx->batch_is));
2279   PetscCall(VecDestroy(&ctx->work_vec));
2280   PetscCall(VecScatterDestroy(&ctx->plex_batch));
2281   if (ctx->deviceType == LANDAU_KOKKOS) {
2282 #if defined(PETSC_HAVE_KOKKOS)
2283     PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d));
2284 #else
2285     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
2286 #endif
2287   } else {
2288     if (ctx->SData_d.x) { /* in a CPU run */
2289       PetscReal *invJ = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
2290       LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = (LandauIdx(*)[LANDAU_MAX_NQND + 1]) ctx->SData_d.coo_elem_point_offsets;
2291       PetscCall(PetscFree4(ww, xx, yy, invJ));
2292       if (zz) PetscCall(PetscFree(zz));
2293       if (coo_elem_offsets) PetscCall(PetscFree3(coo_elem_offsets, coo_elem_fullNb, coo_elem_point_offsets)); // could be NULL
2294       PetscCall(PetscFree4(ctx->SData_d.alpha, ctx->SData_d.beta, ctx->SData_d.invMass, ctx->SData_d.lambdas));
2295     }
2296   }
2297 
2298   if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2299     PetscCall(PetscPrintf(ctx->comm, "TSStep               N  1.0 %10.3e\n", ctx->times[LANDAU_EX2_TSSOLVE]));
2300     PetscCall(PetscPrintf(ctx->comm, "2:           Solve:  %10.3e with %" PetscInt_FMT " threads\n", ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL], ctx->batch_sz));
2301     PetscCall(PetscPrintf(ctx->comm, "3:          Landau:  %10.3e\n", ctx->times[LANDAU_MATRIX_TOTAL]));
2302     PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian       %" PetscInt_FMT " 1.0 %10.3e\n", (PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT], ctx->times[LANDAU_JACOBIAN]));
2303     PetscCall(PetscPrintf(ctx->comm, "Landau Operator       N 1.0  %10.3e\n", ctx->times[LANDAU_OPERATOR]));
2304     PetscCall(PetscPrintf(ctx->comm, "Landau Mass           N 1.0  %10.3e\n", ctx->times[LANDAU_MASS]));
2305     PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU)       N 1.0  %10.3e\n", ctx->times[LANDAU_F_DF]));
2306     PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU)         N 1.0  %10.3e\n", ctx->times[LANDAU_KERNEL]));
2307     PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum        X 1.0 %10.3e\n", ctx->times[KSP_FACTOR]));
2308     PetscCall(PetscPrintf(ctx->comm, "MatSolve              X 1.0 %10.3e\n", ctx->times[KSP_SOLVE]));
2309   }
2310   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMDestroy(&ctx->plex[grid]));
2311   PetscCall(PetscFree(ctx));
2312   PetscCall(DMDestroy(dm));
2313   PetscFunctionReturn(PETSC_SUCCESS);
2314 }
2315 
2316 /* < v, ru > */
f0_s_den(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar * f0)2317 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2318 {
2319   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2320   f0[0]       = u[ii];
2321 }
2322 
2323 /* < v, ru > */
f0_s_mom(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar * f0)2324 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2325 {
2326   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2327   f0[0] = x[jj] * u[ii]; /* x momentum */
2328 }
2329 
f0_s_v2(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar * f0)2330 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2331 {
2332   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2333   double   tmp1 = 0.;
2334   for (i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2335   f0[0] = tmp1 * u[ii];
2336 }
2337 
gamma_n_f(PetscInt dim,PetscReal time,const PetscReal x[],PetscInt Nf,PetscScalar * u,void * actx)2338 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
2339 {
2340   const PetscReal *c2_0_arr = ((PetscReal *)actx);
2341   const PetscReal  c02      = c2_0_arr[0];
2342 
2343   PetscFunctionBegin;
2344   for (PetscInt s = 0; s < Nf; s++) {
2345     PetscReal tmp1 = 0.;
2346     for (PetscInt i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2347 #if defined(PETSC_USE_DEBUG)
2348     u[s] = PetscSqrtReal(1. + tmp1 / c02); //  u[0] = PetscSqrtReal(1. + xx);
2349 #else
2350     {
2351       PetscReal xx = tmp1 / c02;
2352       u[s]         = xx / (PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2353     }
2354 #endif
2355   }
2356   PetscFunctionReturn(PETSC_SUCCESS);
2357 }
2358 
2359 /* < v, ru > */
f0_s_rden(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar * f0)2360 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2361 {
2362   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2363   f0[0]       = 2. * PETSC_PI * x[0] * u[ii];
2364 }
2365 
2366 /* < v, ru > */
f0_s_rmom(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar * f0)2367 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2368 {
2369   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2370   f0[0]       = 2. * PETSC_PI * x[0] * x[1] * u[ii];
2371 }
2372 
f0_s_rv2(PetscInt dim,PetscInt Nf,PetscInt NfAux,const PetscInt uOff[],const PetscInt uOff_x[],const PetscScalar u[],const PetscScalar u_t[],const PetscScalar u_x[],const PetscInt aOff[],const PetscInt aOff_x[],const PetscScalar a[],const PetscScalar a_t[],const PetscScalar a_x[],PetscReal t,const PetscReal x[],PetscInt numConstants,const PetscScalar constants[],PetscScalar * f0)2373 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2374 {
2375   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2376   f0[0]       = 2. * PETSC_PI * x[0] * (x[0] * x[0] + x[1] * x[1]) * u[ii];
2377 }
2378 
2379 /*@
2380   DMPlexLandauPrintNorms - collects moments and prints them
2381 
2382   Collective
2383 
2384   Input Parameters:
2385 + X     - the state
2386 - stepi - current step to print
2387 
2388   Level: beginner
2389 
2390 .seealso: `DMPlexLandauCreateVelocitySpace()`
2391  @*/
DMPlexLandauPrintNorms(Vec X,PetscInt stepi)2392 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi)
2393 {
2394   LandauCtx  *ctx;
2395   PetscDS     prob;
2396   DM          pack;
2397   PetscInt    cStart, cEnd, dim, ii, i0, nDMs;
2398   PetscScalar xmomentumtot = 0, ymomentumtot = 0, zmomentumtot = 0, energytot = 0, densitytot = 0, tt[LANDAU_MAX_SPECIES];
2399   PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2400   Vec        *globXArray;
2401 
2402   PetscFunctionBegin;
2403   PetscCall(VecGetDM(X, &pack));
2404   PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM");
2405   PetscCall(DMGetDimension(pack, &dim));
2406   PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]", dim);
2407   PetscCall(DMGetApplicationContext(pack, &ctx));
2408   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2409   /* print momentum and energy */
2410   PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
2411   PetscCheck(nDMs == ctx->num_grids * ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT, nDMs, ctx->num_grids * ctx->batch_sz);
2412   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
2413   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
2414   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2415     Vec Xloc = globXArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2416     PetscCall(DMGetDS(ctx->plex[grid], &prob));
2417     for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
2418       PetscScalar user[2] = {(PetscScalar)i0, ctx->charges[ii]};
2419       PetscCall(PetscDSSetConstants(prob, 2, user));
2420       if (dim == 2) { /* 2/3X + 3V (cylindrical coordinates) */
2421         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden));
2422         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2423         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2424         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom));
2425         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2426         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2427         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2));
2428         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2429         energy[ii] = tt[0] * 0.5 * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2430         zmomentumtot += zmomentum[ii];
2431         energytot += energy[ii];
2432         densitytot += density[ii];
2433         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii])));
2434       } else { /* 2/3Xloc + 3V */
2435         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den));
2436         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2437         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2438         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom));
2439         user[1] = 0;
2440         PetscCall(PetscDSSetConstants(prob, 2, user));
2441         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2442         xmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2443         user[1]       = 1;
2444         PetscCall(PetscDSSetConstants(prob, 2, user));
2445         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2446         ymomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2447         user[1]       = 2;
2448         PetscCall(PetscDSSetConstants(prob, 2, user));
2449         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2450         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2451         if (ctx->use_relativistic_corrections) {
2452           /* gamma * M * f */
2453           if (ii == 0 && grid == 0) { // do all at once
2454             Vec Mf, globGamma, *globMfArray, *globGammaArray;
2455             PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {gamma_n_f};
2456             PetscReal *c2_0[1], data[1];
2457 
2458             PetscCall(VecDuplicate(X, &globGamma));
2459             PetscCall(VecDuplicate(X, &Mf));
2460             PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globMfArray));
2461             PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globGammaArray));
2462             /* M * f */
2463             PetscCall(MatMult(ctx->M, X, Mf));
2464             /* gamma */
2465             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2466             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2467               Vec v1  = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2468               data[0] = PetscSqr(C_0(ctx->v_0));
2469               c2_0[0] = &data[0];
2470               PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void **)c2_0, INSERT_ALL_VALUES, v1));
2471             }
2472             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2473             /* gamma * Mf */
2474             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2475             PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2476             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice
2477               PetscInt Nf    = ctx->species_offset[grid + 1] - ctx->species_offset[grid], N, bs;
2478               Vec      Mfsub = globMfArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], Gsub = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], v1, v2;
2479               // get each component
2480               PetscCall(VecGetSize(Mfsub, &N));
2481               PetscCall(VecCreate(ctx->comm, &v1));
2482               PetscCall(VecSetSizes(v1, PETSC_DECIDE, N / Nf));
2483               PetscCall(VecCreate(ctx->comm, &v2));
2484               PetscCall(VecSetSizes(v2, PETSC_DECIDE, N / Nf));
2485               PetscCall(VecSetFromOptions(v1)); // ???
2486               PetscCall(VecSetFromOptions(v2));
2487               // get each component
2488               PetscCall(VecGetBlockSize(Gsub, &bs));
2489               PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub", bs, Nf);
2490               PetscCall(VecGetBlockSize(Mfsub, &bs));
2491               PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT, bs, Nf);
2492               for (PetscInt i = 0, ix = ctx->species_offset[grid]; i < Nf; i++, ix++) {
2493                 PetscScalar val;
2494                 PetscCall(VecStrideGather(Gsub, i, v1, INSERT_VALUES)); // this is not right -- TODO
2495                 PetscCall(VecStrideGather(Mfsub, i, v2, INSERT_VALUES));
2496                 PetscCall(VecDot(v1, v2, &val));
2497                 energy[ix] = PetscRealPart(val) * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ix];
2498               }
2499               PetscCall(VecDestroy(&v1));
2500               PetscCall(VecDestroy(&v2));
2501             } /* grids */
2502             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2503             PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2504             PetscCall(PetscFree(globGammaArray));
2505             PetscCall(PetscFree(globMfArray));
2506             PetscCall(VecDestroy(&globGamma));
2507             PetscCall(VecDestroy(&Mf));
2508           }
2509         } else {
2510           PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2));
2511           PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx));
2512           energy[ii] = 0.5 * tt[0] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2513         }
2514         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(xmomentum[ii]), (double)PetscRealPart(ymomentum[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii])));
2515         xmomentumtot += xmomentum[ii];
2516         ymomentumtot += ymomentum[ii];
2517         zmomentumtot += zmomentum[ii];
2518         energytot += energy[ii];
2519         densitytot += density[ii];
2520       }
2521       if (ctx->num_species > 1) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
2522     }
2523   }
2524   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
2525   PetscCall(PetscFree(globXArray));
2526   /* totals */
2527   PetscCall(DMPlexGetHeightStratum(ctx->plex[0], 0, &cStart, &cEnd));
2528   if (ctx->num_species > 1) {
2529     if (dim == 2) {
2530       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2531                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2532     } else {
2533       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(xmomentumtot), (double)PetscRealPart(ymomentumtot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2534                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2535     }
2536   } else PetscCall(PetscPrintf(PETSC_COMM_WORLD, " -- %" PetscInt_FMT " cells", cEnd - cStart));
2537   PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n"));
2538   PetscFunctionReturn(PETSC_SUCCESS);
2539 }
2540 
2541 /*@
2542   DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2543   - puts mass matrix into ctx->M
2544 
2545   Collective
2546 
2547   Input Parameter:
2548 . pack - the `DM` object. Puts matrix in Landau context M field
2549 
2550   Output Parameter:
2551 . Amat - The mass matrix (optional), mass matrix is added to the `DM` context
2552 
2553   Level: beginner
2554 
2555 .seealso: `DMPlexLandauCreateVelocitySpace()`
2556  @*/
DMPlexLandauCreateMassMatrix(DM pack,Mat * Amat)2557 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat)
2558 {
2559   DM         mass_pack, massDM[LANDAU_MAX_GRIDS];
2560   PetscDS    prob;
2561   PetscInt   ii, dim, N1 = 1, N2;
2562   LandauCtx *ctx;
2563   Mat        packM, subM[LANDAU_MAX_GRIDS];
2564 
2565   PetscFunctionBegin;
2566   PetscValidHeaderSpecific(pack, DM_CLASSID, 1);
2567   if (Amat) PetscAssertPointer(Amat, 2);
2568   PetscCall(DMGetApplicationContext(pack, &ctx));
2569   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2570   PetscCall(PetscLogEventBegin(ctx->events[14], 0, 0, 0, 0));
2571   PetscCall(DMGetDimension(pack, &dim));
2572   PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject)pack), &mass_pack));
2573   /* create pack mass matrix */
2574   for (PetscInt grid = 0, ix = 0; grid < ctx->num_grids; grid++) {
2575     PetscCall(DMClone(ctx->plex[grid], &massDM[grid]));
2576     PetscCall(DMCopyFields(ctx->plex[grid], PETSC_DETERMINE, PETSC_DETERMINE, massDM[grid]));
2577     PetscCall(DMCreateDS(massDM[grid]));
2578     PetscCall(DMGetDS(massDM[grid], &prob));
2579     for (ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) {
2580       if (dim == 3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL));
2581       else PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL));
2582     }
2583 #if !defined(LANDAU_SPECIES_MAJOR)
2584     PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2585 #else
2586     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
2587       PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2588     }
2589 #endif
2590     PetscCall(DMCreateMatrix(massDM[grid], &subM[grid]));
2591   }
2592 #if !defined(LANDAU_SPECIES_MAJOR)
2593   // stack the batched DMs
2594   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
2595     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2596   }
2597 #endif
2598   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only"));
2599   PetscCall(DMCreateMatrix(mass_pack, &packM));
2600   PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false"));
2601   PetscCall(MatSetOption(packM, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2602   PetscCall(MatSetOption(packM, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2603   PetscCall(DMDestroy(&mass_pack));
2604   /* make mass matrix for each block */
2605   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2606     Vec locX;
2607     DM  plex = massDM[grid];
2608     PetscCall(DMGetLocalVector(plex, &locX));
2609     /* Mass matrix is independent of the input, so no need to fill locX */
2610     PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx));
2611     PetscCall(DMRestoreLocalVector(plex, &locX));
2612     PetscCall(DMDestroy(&massDM[grid]));
2613   }
2614   PetscCall(MatGetSize(ctx->J, &N1, NULL));
2615   PetscCall(MatGetSize(packM, &N2, NULL));
2616   PetscCheck(N1 == N2, PetscObjectComm((PetscObject)pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT, N1, N2);
2617   /* assemble block diagonals */
2618   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2619     Mat      B = subM[grid];
2620     PetscInt nloc, nzl, *colbuf, COL_BF_SIZE = 1024, row;
2621     PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
2622     PetscCall(MatGetSize(B, &nloc, NULL));
2623     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2624       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2625       const PetscInt    *cols;
2626       const PetscScalar *vals;
2627       for (PetscInt i = 0; i < nloc; i++) {
2628         PetscCall(MatGetRow(B, i, &nzl, NULL, NULL));
2629         if (nzl > COL_BF_SIZE) {
2630           PetscCall(PetscFree(colbuf));
2631           PetscCall(PetscInfo(pack, "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl));
2632           COL_BF_SIZE = nzl;
2633           PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf));
2634         }
2635         PetscCall(MatGetRow(B, i, &nzl, &cols, &vals));
2636         for (PetscInt j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
2637         row = i + moffset;
2638         PetscCall(MatSetValues(packM, 1, &row, nzl, colbuf, vals, INSERT_VALUES));
2639         PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals));
2640       }
2641     }
2642     PetscCall(PetscFree(colbuf));
2643   }
2644   // cleanup
2645   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid]));
2646   PetscCall(MatAssemblyBegin(packM, MAT_FINAL_ASSEMBLY));
2647   PetscCall(MatAssemblyEnd(packM, MAT_FINAL_ASSEMBLY));
2648   PetscCall(PetscObjectSetName((PetscObject)packM, "mass"));
2649   PetscCall(MatViewFromOptions(packM, NULL, "-dm_landau_mass_view"));
2650   ctx->M = packM;
2651   if (Amat) *Amat = packM;
2652   PetscCall(PetscLogEventEnd(ctx->events[14], 0, 0, 0, 0));
2653   PetscFunctionReturn(PETSC_SUCCESS);
2654 }
2655 
2656 /*@
2657   DMPlexLandauIFunction - `TS` residual calculation, confusingly this computes the Jacobian w/o mass
2658 
2659   Collective
2660 
2661   Input Parameters:
2662 + ts         - The time stepping context
2663 . time_dummy - current time (not used)
2664 . X          - Current state
2665 . X_t        - Time derivative of current state
2666 - actx       - Landau context
2667 
2668   Output Parameter:
2669 . F - The residual
2670 
2671   Level: beginner
2672 
2673 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()`
2674  @*/
DMPlexLandauIFunction(TS ts,PetscReal time_dummy,Vec X,Vec X_t,Vec F,void * actx)2675 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2676 {
2677   LandauCtx *ctx = (LandauCtx *)actx;
2678   PetscInt   dim;
2679   DM         pack;
2680 #if defined(PETSC_HAVE_THREADSAFETY)
2681   double starttime, endtime;
2682 #endif
2683   PetscObjectState state;
2684 
2685   PetscFunctionBegin;
2686   PetscCall(TSGetDM(ts, &pack));
2687   PetscCall(DMGetApplicationContext(pack, &ctx));
2688   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2689   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2690   PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0));
2691   PetscCall(PetscLogEventBegin(ctx->events[0], 0, 0, 0, 0));
2692 #if defined(PETSC_HAVE_THREADSAFETY)
2693   starttime = MPI_Wtime();
2694 #endif
2695   PetscCall(DMGetDimension(pack, &dim));
2696   PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2697   if (state != ctx->norm_state) {
2698     PetscCall(MatZeroEntries(ctx->J));
2699     PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, 0.0, (void *)ctx));
2700     PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view"));
2701     PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2702     ctx->norm_state = state;
2703   } else {
2704     PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n", state));
2705   }
2706   /* mat vec for op */
2707   PetscCall(MatMult(ctx->J, X, F)); /* C*f */
2708   /* add time term */
2709   if (X_t) PetscCall(MatMultAdd(ctx->M, X_t, F, F));
2710 #if defined(PETSC_HAVE_THREADSAFETY)
2711   if (ctx->stage) {
2712     endtime = MPI_Wtime();
2713     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2714     ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2715     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2716     ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2717   }
2718 #endif
2719   PetscCall(PetscLogEventEnd(ctx->events[0], 0, 0, 0, 0));
2720   PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0));
2721   if (ctx->stage) PetscCall(PetscLogStagePop());
2722   PetscFunctionReturn(PETSC_SUCCESS);
2723 }
2724 
2725 /*@
2726   DMPlexLandauIJacobian - `TS` Jacobian construction, confusingly this adds mass
2727 
2728   Collective
2729 
2730   Input Parameters:
2731 + ts         - The time stepping context
2732 . time_dummy - current time (not used)
2733 . X          - Current state
2734 . U_tdummy   - Time derivative of current state (not used)
2735 . shift      - shift for du/dt term
2736 - actx       - Landau context
2737 
2738   Output Parameters:
2739 + Amat - Jacobian
2740 - Pmat - same as Amat
2741 
2742   Level: beginner
2743 
2744 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()`
2745  @*/
DMPlexLandauIJacobian(TS ts,PetscReal time_dummy,Vec X,Vec U_tdummy,PetscReal shift,Mat Amat,Mat Pmat,void * actx)2746 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2747 {
2748   LandauCtx *ctx = NULL;
2749   PetscInt   dim;
2750   DM         pack;
2751 #if defined(PETSC_HAVE_THREADSAFETY)
2752   double starttime, endtime;
2753 #endif
2754   PetscObjectState state;
2755 
2756   PetscFunctionBegin;
2757   PetscCall(TSGetDM(ts, &pack));
2758   PetscCall(DMGetApplicationContext(pack, &ctx));
2759   PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2760   PetscCheck(Amat == Pmat && Amat == ctx->J, ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2761   PetscCall(DMGetDimension(pack, &dim));
2762   /* get collision Jacobian into A */
2763   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2764   PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0));
2765   PetscCall(PetscLogEventBegin(ctx->events[9], 0, 0, 0, 0));
2766 #if defined(PETSC_HAVE_THREADSAFETY)
2767   starttime = MPI_Wtime();
2768 #endif
2769   PetscCheck(shift != 0.0, ctx->comm, PETSC_ERR_PLIB, "zero shift");
2770   PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state));
2771   PetscCheck(state == ctx->norm_state, ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT, ctx->norm_state, state);
2772   if (!ctx->use_matrix_mass) {
2773     PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, shift, (void *)ctx));
2774   } else { /* add mass */
2775     PetscCall(MatAXPY(Pmat, shift, ctx->M, SAME_NONZERO_PATTERN));
2776   }
2777 #if defined(PETSC_HAVE_THREADSAFETY)
2778   if (ctx->stage) {
2779     endtime = MPI_Wtime();
2780     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2781     ctx->times[LANDAU_MASS] += (endtime - starttime);
2782     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2783   }
2784 #endif
2785   PetscCall(PetscLogEventEnd(ctx->events[9], 0, 0, 0, 0));
2786   PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0));
2787   if (ctx->stage) PetscCall(PetscLogStagePop());
2788   PetscFunctionReturn(PETSC_SUCCESS);
2789 }
2790