xref: /petsc/src/ts/utils/dmplexlandau/plexland.c (revision ebead697dbf761eb322f829370bbe90b3bd93fa3)
1 #include <../src/mat/impls/aij/seq/aij.h>
2 #include <petsc/private/dmpleximpl.h>   /*I "petscdmplex.h" I*/
3 #include <petsclandau.h>                /*I "petsclandau.h"   I*/
4 #include <petscts.h>
5 #include <petscdmforest.h>
6 #include <petscdmcomposite.h>
7 
8 /* Landau collision operator */
9 
10 /* relativistic terms */
11 #if defined(PETSC_USE_REAL_SINGLE)
12 #define SPEED_OF_LIGHT 2.99792458e8F
13 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
14 #else
15 #define SPEED_OF_LIGHT 2.99792458e8
16 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
17 #endif
18 
19 #define PETSC_THREAD_SYNC
20 #include "land_tensors.h"
21 
22 #if defined(PETSC_HAVE_OPENMP)
23 #include <omp.h>
24 #endif
25 
26 static PetscErrorCode LandauGPUMapsDestroy(void *ptr)
27 {
28   P4estVertexMaps *maps = (P4estVertexMaps*)ptr;
29   PetscFunctionBegin;
30   // free device data
31   if (maps[0].deviceType != LANDAU_CPU) {
32 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
33     if (maps[0].deviceType == LANDAU_KOKKOS) {
34       PetscCall(LandauKokkosDestroyMatMaps(maps,  maps[0].numgrids)); // imples Kokkos does
35     } // else could be CUDA
36 #elif defined(PETSC_HAVE_CUDA)
37     if (maps[0].deviceType == LANDAU_CUDA) {
38       PetscCall(LandauCUDADestroyMatMaps(maps, maps[0].numgrids));
39     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %d ?????",maps->deviceType);
40 #endif
41   }
42   // free host data
43   for (PetscInt grid=0 ; grid < maps[0].numgrids ; grid++) {
44     PetscCall(PetscFree(maps[grid].c_maps));
45     PetscCall(PetscFree(maps[grid].gIdx));
46   }
47   PetscCall(PetscFree(maps));
48 
49   PetscFunctionReturn(0);
50 }
51 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
52 {
53   PetscReal     v2 = 0;
54   PetscFunctionBegin;
55   /* compute v^2 / 2 */
56   for (int i = 0; i < dim; ++i) v2 += x[i]*x[i];
57   /* evaluate the Maxwellian */
58   u[0] = v2/2;
59   PetscFunctionReturn(0);
60 }
61 
62 /* needs double */
63 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
64 {
65   PetscReal     *c2_0_arr = ((PetscReal*)actx);
66   double        u2 = 0, c02 = (double)*c2_0_arr, xx;
67 
68   PetscFunctionBegin;
69   /* compute u^2 / 2 */
70   for (int i = 0; i < dim; ++i) u2 += x[i]*x[i];
71   /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
72   xx = u2/c02;
73 #if defined(PETSC_USE_DEBUG)
74   u[0] = PetscSqrtReal(1. + xx);
75 #else
76   u[0] = xx/(PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
77 #endif
78   PetscFunctionReturn(0);
79 }
80 
81 /*
82  LandauFormJacobian_Internal - Evaluates Jacobian matrix.
83 
84  Input Parameters:
85  .  globX - input vector
86  .  actx - optional user-defined context
87  .  dim - dimension
88 
89  Output Parameters:
90  .  J0acP - Jacobian matrix filled, not created
91  */
92 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
93 {
94   LandauCtx         *ctx = (LandauCtx*)a_ctx;
95   PetscInt          numCells[LANDAU_MAX_GRIDS],Nq,Nb;
96   PetscQuadrature   quad;
97   PetscReal         Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
98   PetscScalar       *cellClosure=NULL;
99   const PetscScalar *xdata=NULL;
100   PetscDS           prob;
101   PetscContainer    container;
102   P4estVertexMaps   *maps;
103   Mat               subJ[LANDAU_MAX_GRIDS*LANDAU_MAX_BATCH_SZ];
104 
105   PetscFunctionBegin;
106   PetscValidHeaderSpecific(a_X,VEC_CLASSID,1);
107   PetscValidHeaderSpecific(JacP,MAT_CLASSID,2);
108   PetscValidPointer(ctx,5);
109   /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
110   PetscCheck(ctx->plex[0] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
111   PetscCall(PetscLogEventBegin(ctx->events[10],0,0,0,0));
112   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
113   PetscCall(PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container));
114   if (container) {
115     PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"maps but no GPU assembly");
116     PetscCall(PetscContainerGetPointer(container, (void **) &maps));
117     PetscCheck(maps,ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container");
118     for (PetscInt i=0;i<ctx->num_grids*ctx->batch_sz;i++) subJ[i] = NULL;
119   } else {
120     PetscCheck(!ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"No maps but GPU assembly");
121     for (PetscInt tid=0 ; tid<ctx->batch_sz ; tid++) {
122       for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
123         PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[ LAND_PACK_IDX(tid,grid) ]));
124       }
125     }
126     maps = NULL;
127   }
128   // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
129   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
130   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); Nb = Nq;
131   PetscCheck(Nq <=LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)",Nq,LANDAU_MAX_NQ);
132   // get metadata for collecting dynamic data
133   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
134     PetscInt cStart, cEnd;
135     PetscCheck(ctx->plex[grid] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
136     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
137     numCells[grid] = cEnd - cStart; // grids can have different topology
138   }
139   PetscCall(PetscLogEventEnd(ctx->events[10],0,0,0,0));
140   if (shift==0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
141     DM pack;
142     PetscCall(VecGetDM(a_X, &pack));
143     PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM");
144     PetscCall(PetscLogEventBegin(ctx->events[1],0,0,0,0));
145     for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
146       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
147       if (dim==2) Eq_m[fieldA] *=  2 * PETSC_PI; /* add the 2pi term that is not in Landau */
148     }
149     if (!ctx->gpu_assembly) {
150       Vec          *locXArray,*globXArray;
151       PetscScalar  *cellClosure_it;
152       PetscInt     cellClosure_sz=0,nDMs,Nf[LANDAU_MAX_GRIDS];
153       PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
154       for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
155         PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
156         PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
157         PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
158       }
159       /* count cellClosure size */
160       PetscCall(DMCompositeGetNumberDM(pack,&nDMs));
161       for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) cellClosure_sz += Nb*Nf[grid]*numCells[grid];
162       PetscCall(PetscMalloc1(cellClosure_sz*ctx->batch_sz,&cellClosure));
163       cellClosure_it = cellClosure;
164       PetscCall(PetscMalloc(sizeof(*locXArray)*nDMs, &locXArray));
165       PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray));
166       PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
167       PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray));
168       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP (once)
169         for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
170           Vec         locX = locXArray[ LAND_PACK_IDX(b_id,grid) ], globX = globXArray[ LAND_PACK_IDX(b_id,grid) ], locX2;
171           PetscInt    cStart, cEnd, ei;
172           PetscCall(VecDuplicate(locX,&locX2));
173           PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2));
174           PetscCall(DMGlobalToLocalEnd  (ctx->plex[grid], globX, INSERT_VALUES, locX2));
175           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
176           for (ei = cStart ; ei < cEnd; ++ei) {
177             PetscScalar *coef = NULL;
178             PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
179             PetscCall(PetscMemcpy(cellClosure_it,coef,Nb*Nf[grid]*sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */
180             PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef));
181             cellClosure_it += Nb*Nf[grid];
182           }
183           PetscCall(VecDestroy(&locX2));
184         }
185       }
186       PetscCheck(cellClosure_it-cellClosure == cellClosure_sz*ctx->batch_sz,PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscCount_FMT " != cellClosure_sz = %" PetscInt_FMT,(PetscCount)(cellClosure_it-cellClosure),cellClosure_sz*ctx->batch_sz);
187       PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray));
188       PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray));
189       PetscCall(PetscFree(locXArray));
190       PetscCall(PetscFree(globXArray));
191       xdata = NULL;
192     } else {
193       PetscMemType mtype;
194       if (ctx->jacobian_field_major_order) { // get data in batch ordering
195         PetscCall(VecScatterBegin(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
196         PetscCall(VecScatterEnd(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD));
197         PetscCall(VecGetArrayReadAndMemType(ctx->work_vec,&xdata,&mtype));
198       } else {
199         PetscCall(VecGetArrayReadAndMemType(a_X,&xdata,&mtype));
200       }
201       if (mtype!=PETSC_MEMTYPE_HOST && ctx->deviceType == LANDAU_CPU) {
202         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"CPU run with device data: use -mat_type aij");
203       }
204       cellClosure = NULL;
205     }
206     PetscCall(PetscLogEventEnd(ctx->events[1],0,0,0,0));
207   } else xdata = cellClosure = NULL;
208 
209   /* do it */
210   if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
211     if (ctx->deviceType == LANDAU_CUDA) {
212 #if defined(PETSC_HAVE_CUDA)
213       PetscCall(LandauCUDAJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ, JacP));
214 #else
215       SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
216 #endif
217     } else if (ctx->deviceType == LANDAU_KOKKOS) {
218 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
219       PetscCall(LandauKokkosJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ,JacP));
220 #else
221       SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
222 #endif
223     }
224   } else {   /* CPU version */
225     PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
226     PetscInt        ip_offset[LANDAU_MAX_GRIDS+1], ipf_offset[LANDAU_MAX_GRIDS+1], elem_offset[LANDAU_MAX_GRIDS+1],IPf_sz_glb,IPf_sz_tot,num_grids=ctx->num_grids,Nf[LANDAU_MAX_GRIDS];
227     PetscReal       *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
228     PetscReal       Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
229     PetscSection    section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
230     PetscScalar     *coo_vals=NULL;
231     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
232       PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
233       PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
234       PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
235     }
236     /* count IPf size, etc */
237     PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
238     const PetscReal *const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
239     ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
240     for (PetscInt grid=0 ; grid<num_grids ; grid++) {
241       PetscInt nfloc = ctx->species_offset[grid+1] - ctx->species_offset[grid];
242       elem_offset[grid+1] = elem_offset[grid] + numCells[grid];
243       ip_offset[grid+1]   = ip_offset[grid]   + numCells[grid]*Nq;
244       ipf_offset[grid+1]  = ipf_offset[grid]  + Nq*nfloc*numCells[grid];
245     }
246     IPf_sz_glb = ipf_offset[num_grids];
247     IPf_sz_tot = IPf_sz_glb*ctx->batch_sz;
248     // prep COO
249     if (ctx->coo_assembly) {
250       PetscCall(PetscMalloc1(ctx->SData_d.coo_size,&coo_vals)); // allocate every time?
251       PetscCall(PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n",(PetscInt)ctx->SData_d.coo_size));
252     }
253     if (shift==0.0) { /* compute dynamic data f and df and init data for Jacobian */
254 #if defined(PETSC_HAVE_THREADSAFETY)
255       double         starttime, endtime;
256       starttime = MPI_Wtime();
257 #endif
258       PetscCall(PetscLogEventBegin(ctx->events[8],0,0,0,0));
259       for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) {
260         invMass[fieldA]  = ctx->m_0/ctx->masses[fieldA];
261         Eq_m[fieldA]     = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
262         if (dim==2) Eq_m[fieldA] *=  2 * PETSC_PI; /* add the 2pi term that is not in Landau */
263         nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->m_0)*ctx->m_0/ctx->masses[fieldA];
264         nu_beta[fieldA]  = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
265       }
266       PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim==3 ? IPf_sz_tot : 0, &dudz));
267       // F df/dx
268       for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
269         const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; // b_id == OMP thd_id in batch
270         // find my grid:
271         PetscInt       grid = 0;
272         while (b_elem_idx >= elem_offset[grid+1]) grid++; // yuck search for grid
273         {
274           const PetscInt     loc_nip = numCells[grid]*Nq, loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
275           const PetscInt     moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
276           PetscScalar        *coef, coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ];
277           PetscReal          *invJe  = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; // ingJ is static data on batch 0
278           PetscInt           b,f,q;
279           if (cellClosure) {
280             coef = &cellClosure[b_id*IPf_sz_glb + ipf_offset[grid] + loc_elem*Nb*loc_Nf]; // this is const
281           } else {
282             coef = coef_buff;
283             for (f = 0; f < loc_Nf; ++f) {
284               LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
285               for (b = 0; b < Nb; ++b) {
286                 PetscInt idx = Idxs[b];
287                 if (idx >= 0) {
288                   coef[f*Nb+b] = xdata[idx+moffset];
289                 } else {
290                   idx = -idx - 1;
291                   coef[f*Nb+b] = 0;
292                   for (q = 0; q < maps[grid].num_face; q++) {
293                     PetscInt    id    = maps[grid].c_maps[idx][q].gid;
294                     PetscScalar scale = maps[grid].c_maps[idx][q].scale;
295                     coef[f*Nb+b] += scale*xdata[id+moffset];
296                   }
297                 }
298               }
299             }
300           }
301           /* get f and df */
302           for (PetscInt qi = 0; qi < Nq; qi++) {
303             const PetscReal  *invJ = &invJe[qi*dim*dim];
304             const PetscReal  *Bq   = &BB[qi*Nb];
305             const PetscReal  *Dq   = &DD[qi*Nb*dim];
306             PetscReal        u_x[LANDAU_DIM];
307             /* get f & df */
308             for (f = 0; f < loc_Nf; ++f) {
309               const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid] + f*loc_nip + loc_elem*Nq + qi;
310               PetscInt       b, e;
311               PetscReal      refSpaceDer[LANDAU_DIM];
312               ff[idx] = 0.0;
313               for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
314               for (b = 0; b < Nb; ++b) {
315                 const PetscInt    cidx = b;
316                 ff[idx] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]);
317                 for (int d = 0; d < dim; ++d) {
318                   refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]);
319                 }
320               }
321               for (int d = 0; d < LANDAU_DIM; ++d) {
322                 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) {
323                   u_x[d] += invJ[e*dim+d]*refSpaceDer[e];
324                 }
325               }
326               dudx[idx] = u_x[0];
327               dudy[idx] = u_x[1];
328  #if LANDAU_DIM==3
329               dudz[idx] = u_x[2];
330 #endif
331             }
332           } // q
333         } // grid
334       } // grid*batch
335       PetscCall(PetscLogEventEnd(ctx->events[8],0,0,0,0));
336 #if defined(PETSC_HAVE_THREADSAFETY)
337       endtime = MPI_Wtime();
338       if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
339 #endif
340     } // Jacobian setup
341     // assemble Jacobian (or mass)
342     for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element
343       const PetscInt b_Nelem      = elem_offset[num_grids];
344       const PetscInt glb_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem;
345       PetscInt       grid         = 0;
346 #if defined(PETSC_HAVE_THREADSAFETY)
347       double         starttime, endtime;
348       starttime                   = MPI_Wtime();
349 #endif
350       while (glb_elem_idx >= elem_offset[grid+1]) grid++;
351       {
352         const PetscInt     loc_Nf  = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
353         const PetscInt     moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset), totDim = loc_Nf*Nq, elemMatSize = totDim*totDim;
354         PetscScalar        *elemMat;
355          const PetscReal   *invJe  = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim];
356         PetscCall(PetscMalloc1(elemMatSize, &elemMat));
357         PetscCall(PetscMemzero(elemMat, elemMatSize*sizeof(*elemMat)));
358         if (shift==0.0) { // Jacobian
359           PetscCall(PetscLogEventBegin(ctx->events[4],0,0,0,0));
360         } else {          // mass
361           PetscCall(PetscLogEventBegin(ctx->events[16],0,0,0,0));
362         }
363         for (PetscInt qj = 0; qj < Nq; ++qj) {
364           const PetscInt   jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
365           PetscReal        g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
366           PetscInt         d,d2,dp,d3,IPf_idx;
367           if (shift==0.0) { // Jacobian
368             const PetscReal * const invJj = &invJe[qj*dim*dim];
369             PetscReal               gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
370             const PetscReal         vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
371             // create g2 & g3
372             for (d=0;d<LANDAU_DIM;d++) { // clear accumulation data D & K
373               gg2_temp[d] = 0;
374               for (d2=0;d2<LANDAU_DIM;d2++) gg3_temp[d][d2] = 0;
375             }
376             /* inner beta reduction */
377             IPf_idx = 0;
378             for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids ; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
379               PetscInt  nip_loc_r = numCells[grid_r]*Nq, Nfloc_r = Nf[grid_r];
380               for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
381                 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
382                   const PetscReal wi       = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
383                   PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
384 #if LANDAU_DIM==2
385                   PetscReal       Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
386                   LandauTensor2D(vj, x, y, Ud, Uk, mask);
387 #else
388                   PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2]-z) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
389                   if (ctx->use_relativistic_corrections) {
390                     LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
391                   } else {
392                     LandauTensor3D(vj, x, y, z, U, mask);
393                   }
394 #endif
395                   for (int f = 0; f < Nfloc_r ; ++f) {
396                     const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid_r] + f*nip_loc_r + ei_r*Nq + qi;  // IPf_idx + f*nip_loc_r + loc_fdf_idx;
397                     temp1[0] += dudx[idx]*nu_beta[f+f_off]*invMass[f+f_off];
398                     temp1[1] += dudy[idx]*nu_beta[f+f_off]*invMass[f+f_off];
399 #if LANDAU_DIM==3
400                     temp1[2] += dudz[idx]*nu_beta[f+f_off]*invMass[f+f_off];
401 #endif
402                     temp2    += ff[idx]*nu_beta[f+f_off];
403                   }
404                   temp1[0] *= wi;
405                   temp1[1] *= wi;
406 #if LANDAU_DIM==3
407                   temp1[2] *= wi;
408 #endif
409                   temp2    *= wi;
410 #if LANDAU_DIM==2
411                   for (d2 = 0; d2 < 2; d2++) {
412                     for (d3 = 0; d3 < 2; ++d3) {
413                       /* K = U * grad(f): g2=e: i,A */
414                       gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
415                       /* D = -U * (I \kron (fx)): g3=f: i,j,A */
416                       gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
417                     }
418                   }
419 #else
420                   for (d2 = 0; d2 < 3; ++d2) {
421                     for (d3 = 0; d3 < 3; ++d3) {
422                       /* K = U * grad(f): g2 = e: i,A */
423                       gg2_temp[d2] += U[d2][d3]*temp1[d3];
424                       /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
425                       gg3_temp[d2][d3] += U[d2][d3]*temp2;
426                     }
427                   }
428 #endif
429                 } // qi
430               } // ei_r
431               IPf_idx += nip_loc_r*Nfloc_r;
432             } /* grid_r - IPs */
433             PetscCheck(IPf_idx == IPf_sz_glb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT,IPf_idx,IPf_sz_glb);
434             // add alpha and put in gg2/3
435             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
436               for (d2 = 0; d2 < LANDAU_DIM; d2++) {
437                 gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA+f_off];
438                 for (d3 = 0; d3 < LANDAU_DIM; d3++) {
439                   gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA+f_off]*invMass[fieldA+f_off];
440                 }
441               }
442             }
443             /* add electric field term once per IP */
444             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid] ; fieldA < loc_Nf; ++fieldA) {
445               gg2[fieldA][LANDAU_DIM-1] += Eq_m[fieldA+f_off];
446             }
447             /* Jacobian transform - g2, g3 */
448             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
449               for (d = 0; d < dim; ++d) {
450                 g2[fieldA][d] = 0.0;
451                 for (d2 = 0; d2 < dim; ++d2) {
452                   g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2];
453                   g3[fieldA][d][d2] = 0.0;
454                   for (d3 = 0; d3 < dim; ++d3) {
455                     for (dp = 0; dp < dim; ++dp) {
456                       g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp];
457                     }
458                   }
459                   g3[fieldA][d][d2] *= wj;
460                 }
461                 g2[fieldA][d] *= wj;
462               }
463             }
464           } else { // mass
465             PetscReal wj = ww[jpidx_glb];
466             /* Jacobian transform - g0 */
467             for (PetscInt fieldA = 0; fieldA < loc_Nf ; ++fieldA) {
468               if (dim==2) {
469                 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
470               } else {
471                 g0[fieldA] = wj * shift; // move this to below and remove g0
472               }
473             }
474           }
475           /* FE matrix construction */
476           {
477             PetscInt  fieldA,d,f,d2,g;
478             const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
479             /* assemble - on the diagonal (I,I) */
480             for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
481               for (f = 0; f < Nb ; f++) {
482                 const PetscInt i = fieldA*Nb + f; /* Element matrix row */
483                 for (g = 0; g < Nb; ++g) {
484                   const PetscInt j    = fieldA*Nb + g; /* Element matrix column */
485                   const PetscInt fOff = i*totDim + j;
486                   if (shift==0.0) {
487                     for (d = 0; d < dim; ++d) {
488                       elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g];
489                       for (d2 = 0; d2 < dim; ++d2) {
490                         elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2];
491                       }
492                     }
493                   } else { // mass
494                     elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g];
495                   }
496                 }
497               }
498             }
499           }
500         } /* qj loop */
501         if (shift==0.0) { // Jacobian
502           PetscCall(PetscLogEventEnd(ctx->events[4],0,0,0,0));
503         } else {
504           PetscCall(PetscLogEventEnd(ctx->events[16],0,0,0,0));
505         }
506 #if defined(PETSC_HAVE_THREADSAFETY)
507         endtime = MPI_Wtime();
508         if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
509 #endif
510         /* assemble matrix */
511         if (!container) {
512           PetscInt cStart;
513           PetscCall(PetscLogEventBegin(ctx->events[6],0,0,0,0));
514           PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL));
515           PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[ LAND_PACK_IDX(b_id,grid) ], loc_elem + cStart, elemMat, ADD_VALUES));
516           PetscCall(PetscLogEventEnd(ctx->events[6],0,0,0,0));
517         } else {  // GPU like assembly for debugging
518           PetscInt      fieldA,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE]={0},cols0[LANDAU_MAX_Q_FACE]={0},rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
519           PetscScalar   vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]={0},row_scale[LANDAU_MAX_Q_FACE]={0},col_scale[LANDAU_MAX_Q_FACE]={0};
520           LandauIdx     *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
521           /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
522           for (fieldA = 0; fieldA < loc_Nf ; fieldA++) {
523             LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
524             for (f = 0; f < Nb ; f++) {
525               PetscInt idx = Idxs[f];
526               if (idx >= 0) {
527                 nr           = 1;
528                 rows0[0]     = idx;
529                 row_scale[0] = 1.;
530               } else {
531                 idx = -idx - 1;
532                 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
533                   if (maps[grid].c_maps[idx][q].gid < 0) break;
534                   rows0[q]     = maps[grid].c_maps[idx][q].gid;
535                   row_scale[q] = maps[grid].c_maps[idx][q].scale;
536                 }
537               }
538               for (g = 0; g < Nb; ++g) {
539                 idx = Idxs[g];
540                 if (idx >= 0) {
541                   nc = 1;
542                   cols0[0]     = idx;
543                   col_scale[0] = 1.;
544                 } else {
545                   idx = -idx - 1;
546                   nc = maps[grid].num_face;
547                   for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
548                     if (maps[grid].c_maps[idx][q].gid < 0) break;
549                     cols0[q]     = maps[grid].c_maps[idx][q].gid;
550                     col_scale[q] = maps[grid].c_maps[idx][q].scale;
551                   }
552                 }
553                 const PetscInt    i   = fieldA*Nb + f; /* Element matrix row */
554                 const PetscInt    j   = fieldA*Nb + g; /* Element matrix column */
555                 const PetscScalar Aij = elemMat[i*totDim + j];
556                 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
557                   const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
558                   const int idx0   = b_id*coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
559                   for (int q = 0, idx2 = idx0; q < nr; q++) {
560                     for (int d = 0; d < nc; d++, idx2++) {
561                       coo_vals[idx2] = row_scale[q]*col_scale[d]*Aij;
562                     }
563                   }
564                 } else {
565                   for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
566                   for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
567                   for (q = 0; q < nr; q++) {
568                     for (d = 0; d < nc; d++) {
569                       vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij;
570                     }
571                   }
572                   PetscCall(MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES));
573                 }
574               }
575             }
576           }
577         }
578         if (loc_elem==-1) {
579           PetscCall(PetscPrintf(ctx->comm,"CPU Element matrix\n"));
580           for (int d = 0; d < totDim; ++d) {
581             for (int f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm," %12.5e",  (double)PetscRealPart(elemMat[d*totDim + f])));
582             PetscCall(PetscPrintf(ctx->comm,"\n"));
583           }
584           exit(12);
585         }
586         PetscCall(PetscFree(elemMat));
587       } /* grid */
588     } /* outer element & batch loop */
589     if (shift==0.0) { // mass
590       PetscCall(PetscFree4(ff, dudx, dudy, dudz));
591     }
592     if (!container) {   // 'CPU' assembly move nest matrix to global JacP
593       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP
594         for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
595           const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
596           PetscInt          nloc, nzl, colbuf[1024], row;
597           const PetscInt    *cols;
598           const PetscScalar *vals;
599           Mat               B = subJ[ LAND_PACK_IDX(b_id,grid) ];
600           PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
601           PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
602           PetscCall(MatGetSize(B, &nloc, NULL));
603           for (int i=0 ; i<nloc ; i++) {
604             PetscCall(MatGetRow(B,i,&nzl,&cols,&vals));
605             PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl);
606             for (int j=0; j<nzl; j++) colbuf[j] = moffset + cols[j];
607             row  = moffset + i;
608             PetscCall(MatSetValues(JacP,1,&row,nzl,colbuf,vals,ADD_VALUES));
609             PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals));
610           }
611           PetscCall(MatDestroy(&B));
612         }
613       }
614     }
615     if (coo_vals) {
616       PetscCall(MatSetValuesCOO(JacP,coo_vals,ADD_VALUES));
617       PetscCall(PetscFree(coo_vals));
618     }
619   } /* CPU version */
620   PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY));
621   PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY));
622   /* clean up */
623   if (cellClosure) PetscCall(PetscFree(cellClosure));
624   if (xdata) {
625     PetscCall(VecRestoreArrayReadAndMemType(a_X,&xdata));
626   }
627   PetscFunctionReturn(0);
628 }
629 
630 #if defined(LANDAU_ADD_BCS)
631 static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux,
632                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
633                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
634                     PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[])
635 {
636   uexact[0] = 0;
637 }
638 #endif
639 
640 #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }}
641 static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y,
642                           PetscReal *outX, PetscReal *outY)
643 {
644   PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact;
645   if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
646     *outX = x; *outY = y;
647   } else {
648     const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr;
649     PetscReal       cth,sth,xyprime[2],Rth[2][2],rotcos,newrr;
650     if (num_sections==2) {
651       rotcos  = 0.70710678118654;
652       outfact = 1.5; efact = 2.5;
653       /* rotate normalized vector into [-pi/4,pi/4) */
654       if (sinphi >= 0.) {         /* top cell, -pi/2 */
655         cth = 0.707106781186548; sth = -0.707106781186548;
656       } else {                    /* bottom cell -pi/8 */
657         cth = 0.707106781186548; sth = .707106781186548;
658       }
659     } else if (num_sections==3) {
660       rotcos  = 0.86602540378443;
661       outfact = 1.5; efact = 2.5;
662       /* rotate normalized vector into [-pi/6,pi/6) */
663       if (sinphi >= 0.5) {         /* top cell, -pi/3 */
664         cth = 0.5; sth = -0.866025403784439;
665       } else if (sinphi >= -.5) {  /* mid cell 0 */
666         cth = 1.; sth = .0;
667       } else { /* bottom cell +pi/3 */
668         cth = 0.5; sth = 0.866025403784439;
669       }
670     } else if (num_sections==4) {
671       rotcos  = 0.9238795325112;
672       outfact = 1.5; efact = 3;
673       /* rotate normalized vector into [-pi/8,pi/8) */
674       if (sinphi >= 0.707106781186548) {         /* top cell, -3pi/8 */
675         cth = 0.38268343236509;  sth = -0.923879532511287;
676       } else if (sinphi >= 0.) {                 /* mid top cell -pi/8 */
677         cth = 0.923879532511287; sth = -.38268343236509;
678       } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
679         cth = 0.923879532511287; sth = 0.38268343236509;
680       } else {                                   /* bottom cell + 3pi/8 */
681         cth = 0.38268343236509;  sth = .923879532511287;
682       }
683     } else {
684       cth = 0.; sth = 0.; rotcos = 0; efact = 0;
685     }
686     Rth[0][0] = cth; Rth[0][1] =-sth;
687     Rth[1][0] = sth; Rth[1][1] = cth;
688     MATVEC2(Rth,xy,xyprime);
689     if (num_sections==2) {
690       newrr = xyprime[0]/rotcos;
691     } else {
692       PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin;
693       PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax;
694       newrr = rin + routfrac*nroutmax;
695     }
696     *outX = cosphi*newrr; *outY = sinphi*newrr;
697     /* grade */
698     PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
699     if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */
700     else {         rs = r1; re = r2; fact = efact;} /* electron zone */
701     tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr;
702     *outX *= tt;
703     *outY *= tt;
704   }
705 }
706 
707 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
708 {
709   LandauCtx   *ctx = (LandauCtx*)a_ctx;
710   PetscReal   r = abc[0], z = abc[1];
711   if (ctx->inflate) {
712     PetscReal absR, absZ;
713     absR = PetscAbs(r);
714     absZ = PetscAbs(z);
715     CircleInflate(ctx->i_radius[0],ctx->e_radius,ctx->radius[0],ctx->num_sections,absR,absZ,&absR,&absZ); // wrong: how do I know what grid I am on?
716     r = (r > 0) ? absR : -absR;
717     z = (z > 0) ? absZ : -absZ;
718   }
719   xyz[0] = r;
720   xyz[1] = z;
721   if (dim==3) xyz[2] = abc[2];
722 
723   PetscFunctionReturn(0);
724 }
725 
726 /* create DMComposite of meshes for each species group */
727 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack)
728 {
729   PetscFunctionBegin;
730   { /* p4est, quads */
731     /* Create plex mesh of Landau domain */
732     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
733       PetscReal radius = ctx->radius[grid];
734       if (!ctx->sphere) {
735         PetscInt       cells[] = {2,2,2};
736         PetscReal      lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius};
737         DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
738         if (dim==2) { lo[0] = 0; cells[0] /* = cells[1] */ = 1; }
739         PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid])); // todo: make composite and create dm[grid] here
740         PetscCall(DMLocalizeCoordinates(ctx->plex[grid])); /* needed for periodic */
741         if (dim==3) PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "cube"));
742         else PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "half-plane"));
743       } else if (dim==2) { // sphere is all wrong. should just have one inner radius
744         PetscInt       numCells,cells[16][4],i,j;
745         PetscInt       numVerts;
746         PetscReal      inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius;
747         PetscReal      *flatCoords   = NULL;
748         PetscInt       *flatCells    = NULL, *pcell;
749         if (ctx->num_sections==2) {
750 #if 1
751           numCells = 5;
752           numVerts = 10;
753           int cells2[][4] = { {0,1,4,3},
754                               {1,2,5,4},
755                               {3,4,7,6},
756                               {4,5,8,7},
757                               {6,7,8,9} };
758           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
759           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
760           {
761             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
762             for (j = 0; j < numVerts-1; j++) {
763               PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2;
764               PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid];
765               z = rad * PetscSinReal(theta);
766               coords[j][1] = z;
767               r = rad * PetscCosReal(theta);
768               coords[j][0] = r;
769             }
770             coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
771           }
772 #else
773           numCells = 4;
774           numVerts = 8;
775           static int     cells2[][4] = {{0,1,2,3},
776                                         {4,5,1,0},
777                                         {5,6,2,1},
778                                         {6,7,3,2}};
779           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
780           PetscCall(loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
781           {
782             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
783             PetscInt j;
784             for (j = 0; j < 8; j++) {
785               PetscReal z, r;
786               PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.;
787               PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0);
788               z = rad * PetscSinReal(theta);
789               coords[j][1] = z;
790               r = rad * PetscCosReal(theta);
791               coords[j][0] = r;
792             }
793           }
794 #endif
795         } else if (ctx->num_sections==3) {
796           numCells = 7;
797           numVerts = 12;
798           int cells2[][4] = { {0,1,5,4},
799                               {1,2,6,5},
800                               {2,3,7,6},
801                               {4,5,9,8},
802                               {5,6,10,9},
803                               {6,7,11,10},
804                               {8,9,10,11} };
805           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
806           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
807           {
808             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
809             for (j = 0; j < numVerts; j++) {
810               PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3;
811               PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid];
812               z = rad * PetscSinReal(theta);
813               coords[j][1] = z;
814               r = rad * PetscCosReal(theta);
815               coords[j][0] = r;
816             }
817           }
818         } else if (ctx->num_sections==4) {
819           numCells = 10;
820           numVerts = 16;
821           int cells2[][4] = { {0,1,6,5},
822                               {1,2,7,6},
823                               {2,3,8,7},
824                               {3,4,9,8},
825                               {5,6,11,10},
826                               {6,7,12,11},
827                               {7,8,13,12},
828                               {8,9,14,13},
829                               {10,11,12,15},
830                               {12,13,14,15}};
831           for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
832           PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells));
833           {
834             PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
835             for (j = 0; j < numVerts-1; j++) {
836               PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4;
837               PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid];
838               z = rad * PetscSinReal(theta);
839               coords[j][1] = z;
840               r = rad * PetscCosReal(theta);
841               coords[j][0] = r;
842             }
843             coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
844           }
845         } else {
846           numCells = 0;
847           numVerts = 0;
848         }
849         for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
850           pcell[0] = cells[j][0]; pcell[1] = cells[j][1];
851           pcell[2] = cells[j][2]; pcell[3] = cells[j][3];
852         }
853         PetscCall(DMPlexCreateFromCellListPetsc(comm_self,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,&ctx->plex[grid]));
854         PetscCall(PetscFree2(flatCoords,flatCells));
855         PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "semi-circle"));
856       } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");
857 
858       PetscCall(DMSetFromOptions(ctx->plex[grid]));
859     } // grid loop
860     PetscCall(PetscObjectSetOptionsPrefix((PetscObject)pack,prefix));
861 
862     { /* convert to p4est (or whatever), wait for discretization to create pack */
863       char           convType[256];
864       PetscBool      flg;
865 
866       PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
867       PetscCall(PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (p4est)","plexland.c",DMList,DMPLEX,convType,256,&flg));
868       PetscOptionsEnd();
869       if (flg) {
870         ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
871         for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
872           DM dmforest;
873           PetscCall(DMConvert(ctx->plex[grid],convType,&dmforest));
874           if (dmforest) {
875             PetscBool isForest;
876             PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix));
877             PetscCall(DMIsForest(dmforest,&isForest));
878             if (isForest) {
879               if (ctx->sphere && ctx->inflate) {
880                 PetscCall(DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx));
881               }
882               PetscCall(DMDestroy(&ctx->plex[grid]));
883               ctx->plex[grid] = dmforest; // Forest for adaptivity
884             } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
885           } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
886         }
887       } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
888     }
889   } /* non-file */
890   PetscCall(DMSetDimension(pack, dim));
891   PetscCall(PetscObjectSetName((PetscObject) pack, "Mesh"));
892   PetscCall(DMSetApplicationContext(pack, ctx));
893 
894   PetscFunctionReturn(0);
895 }
896 
897 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx)
898 {
899   PetscInt        ii,i0;
900   char            buf[256];
901   PetscSection    section;
902 
903   PetscFunctionBegin;
904   for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
905     if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e"));
906     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii));
907     /* Setup Discretization - FEM */
908     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]));
909     PetscCall(PetscObjectSetName((PetscObject) ctx->fe[ii], buf));
910     PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject) ctx->fe[ii]));
911   }
912   PetscCall(DMCreateDS(ctx->plex[grid]));
913   PetscCall(DMGetSection(ctx->plex[grid], &section));
914   for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
915     if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se"));
916     else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii));
917     PetscCall(PetscSectionSetComponentName(section, i0, 0, buf));
918   }
919   PetscFunctionReturn(0);
920 }
921 
922 /* Define a Maxwellian function for testing out the operator. */
923 
924 /* Using cartesian velocity space coordinates, the particle */
925 /* density, [1/m^3], is defined according to */
926 
927 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
928 
929 /* Using some constant, c, we normalize the velocity vector into a */
930 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
931 
932 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
933 
934 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
935 /* for finding the particle within the interval in a box dx^3 around x is */
936 
937 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
938 
939 typedef struct {
940   PetscReal v_0;
941   PetscReal kT_m;
942   PetscReal n;
943   PetscReal shift;
944 } MaxwellianCtx;
945 
946 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
947 {
948   MaxwellianCtx *mctx = (MaxwellianCtx*)actx;
949   PetscInt      i;
950   PetscReal     v2 = 0, theta = 2*mctx->kT_m/(mctx->v_0*mctx->v_0); /* theta = 2kT/mc^2 */
951   PetscFunctionBegin;
952   /* compute the exponents, v^2 */
953   for (i = 0; i < dim; ++i) v2 += x[i]*x[i];
954   /* evaluate the Maxwellian */
955   u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
956   if (mctx->shift!=0.) {
957     v2 = 0;
958     for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i];
959     v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift);
960     /* evaluate the shifted Maxwellian */
961     u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
962   }
963   PetscFunctionReturn(0);
964 }
965 
966 /*@
967  DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state
968 
969  Collective on X
970 
971  Input Parameters:
972  .   dm - The mesh (local)
973  +   time - Current time
974  -   temps - Temperatures of each species (global)
975  .   ns - Number density of each species (global)
976  -   grid - index into current grid - just used for offset into temp and ns
977  .   b_id - batch index
978  -   n_batch - number of batches
979  +   actx - Landau context
980 
981  Output Parameter:
982  .   X  - The state (local to this grid)
983 
984  Level: beginner
985 
986  .keywords: mesh
987  .seealso: `DMPlexLandauCreateVelocitySpace()`
988  @*/
989 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
990 {
991   LandauCtx      *ctx = (LandauCtx*)actx;
992   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *);
993   PetscInt       dim;
994   MaxwellianCtx  *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
995 
996   PetscFunctionBegin;
997   PetscCall(DMGetDimension(dm, &dim));
998   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
999   for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
1000     mctxs[i0]      = &data[i0];
1001     data[i0].v_0   = ctx->v_0; // v_0 same for all grids
1002     data[i0].kT_m  = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */
1003     data[i0].n     = ns[ii] * (1+0.1*(double)b_id/(double)n_batch); // ramp density up 10% to mimic application, n[0] use for Conner-Hastie
1004     initu[i0]      = maxwellian;
1005     data[i0].shift = 0;
1006   }
1007   data[0].shift = ctx->electronShift;
1008   /* need to make ADD_ALL_VALUES work - TODO */
1009   PetscCall(DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X));
1010   PetscFunctionReturn(0);
1011 }
1012 
1013 /*
1014  LandauSetInitialCondition - Addes Maxwellians with context
1015 
1016  Collective on X
1017 
1018  Input Parameters:
1019  .   dm - The mesh
1020  -   grid - index into current grid - just used for offset into temp and ns
1021  .   b_id - batch index
1022  -   n_batch - number of batches
1023  +   actx - Landau context with T and n
1024 
1025  Output Parameter:
1026  .   X  - The state
1027 
1028  Level: beginner
1029 
1030  .keywords: mesh
1031  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()`
1032  */
1033 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
1034 {
1035   LandauCtx        *ctx = (LandauCtx*)actx;
1036   PetscFunctionBegin;
1037   if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx));
1038   PetscCall(VecZeroEntries(X));
1039   PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, n_batch, ctx));
1040   PetscFunctionReturn(0);
1041 }
1042 
1043 // adapt a level once. Forest in/out
1044 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
1045 {
1046   DM               forest, plex, adaptedDM = NULL;
1047   PetscDS          prob;
1048   PetscBool        isForest;
1049   PetscQuadrature  quad;
1050   PetscInt         Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1051   DMLabel          adaptLabel = NULL;
1052 
1053   PetscFunctionBegin;
1054   forest = ctx->plex[grid];
1055   PetscCall(DMCreateDS(forest));
1056   PetscCall(DMGetDS(forest, &prob));
1057   PetscCall(DMGetDimension(forest, &dim));
1058   PetscCall(DMIsForest(forest, &isForest));
1059   PetscCheck(isForest,ctx->comm,PETSC_ERR_ARG_WRONG,"! Forest");
1060   PetscCall(DMConvert(forest, DMPLEX, &plex));
1061   PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd));
1062   PetscCall(DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel));
1063   PetscCall(PetscFEGetQuadrature(fem, &quad));
1064   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL));
1065   PetscCheck(Nq <=LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)",Nq,LANDAU_MAX_NQ);
1066   PetscCall(PetscDSGetDimensions(prob, &Nb));
1067   if (type==4) {
1068     for (c = cStart; c < cEnd; c++) {
1069       PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
1070     }
1071     PetscCall(PetscInfo(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM"));
1072   } else if (type==2) {
1073     PetscInt  rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2;
1074     PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1075     for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; }
1076     for (c = cStart; c < cEnd; c++) {
1077       PetscReal    tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ];
1078       PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ));
1079       for (qj = 0; qj < Nq; ++qj) {
1080         tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0));
1081         r  = PetscSqrtReal(tt);
1082         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) {
1083           minRad = r;
1084           nr     = 0;
1085           rCellIdx[nr++]= c;
1086           PetscCall(PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", (double)r, c, qj+1, Nq));
1087         } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) {
1088           for (k=0;k<nr;k++) if (c == rCellIdx[k]) break;
1089           if (k==nr) {
1090             rCellIdx[nr++]= c;
1091             PetscCall(PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", (double)r, c, qj+1, Nq, (double)(r-minRad)));
1092           }
1093         }
1094         if (ctx->sphere) {
1095           if ((tt=r-ctx->e_radius) > 0) {
1096             PetscCall(PetscInfo(sol, "\t\t\t %" PetscInt_FMT " cell r=%g\n",c,(double)tt));
1097             if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) {
1098               eMinRad = tt;
1099               eMaxIdx = 0;
1100               eCellIdx[eMaxIdx++] = c;
1101             } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) {
1102               eCellIdx[eMaxIdx++] = c;
1103             }
1104           }
1105           if ((tt=r-ctx->i_radius[grid]) > 0) {
1106             if (tt < iMinRad - 1.e-5) {
1107               iMinRad = tt;
1108               iMaxIdx = 0;
1109               iCellIdx[iMaxIdx++] = c;
1110             } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) {
1111               iCellIdx[iMaxIdx++] = c;
1112             }
1113           }
1114         }
1115       }
1116     }
1117     for (k=0;k<nr;k++) {
1118       PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE));
1119     }
1120     if (ctx->sphere) {
1121       for (c = 0; c < eMaxIdx; c++) {
1122         PetscCall(DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE));
1123         PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",eCellIdx[c],(double)eMinRad));
1124       }
1125       for (c = 0; c < iMaxIdx; c++) {
1126         PetscCall(DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE));
1127         PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",iCellIdx[c],(double)iMinRad));
1128       }
1129     }
1130     PetscCall(PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],(double)minRad));
1131   } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */
1132     PetscScalar  *coef = NULL;
1133     Vec          coords;
1134     PetscInt     csize,Nv,d,nz;
1135     DM           cdm;
1136     PetscSection cs;
1137     PetscCall(DMGetCoordinatesLocal(forest, &coords));
1138     PetscCall(DMGetCoordinateDM(forest, &cdm));
1139     PetscCall(DMGetLocalSection(cdm, &cs));
1140     for (c = cStart; c < cEnd; c++) {
1141       PetscInt doit = 0, outside = 0;
1142       PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef));
1143       Nv = csize/dim;
1144       for (nz = d = 0; d < Nv; d++) {
1145         PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0);
1146         x = PetscSqrtReal(x);
1147         if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1;             /* refine origin */
1148         else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++;   /* first pass don't refine bottom */
1149         else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */
1150         else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */
1151         if (x < PETSC_MACHINE_EPSILON*10.) nz++;
1152       }
1153       PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef));
1154       if (doit || (outside<Nv && nz)) {
1155         PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE));
1156       }
1157     }
1158     PetscCall(PetscInfo(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM"));
1159   }
1160   PetscCall(DMDestroy(&plex));
1161   PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM));
1162   PetscCall(DMLabelDestroy(&adaptLabel));
1163   *newForest = adaptedDM;
1164   if (adaptedDM) {
1165     if (isForest) {
1166       PetscCall(DMForestSetAdaptivityForest(adaptedDM,NULL)); // ????
1167     } else exit(33); // ???????
1168     PetscCall(DMConvert(adaptedDM, DMPLEX, &plex));
1169     PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd));
1170     PetscCall(PetscInfo(sol, "\tPhase: adaptToleranceFEM: %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart)));
1171     PetscCall(DMDestroy(&plex));
1172   } else *newForest = NULL;
1173   PetscFunctionReturn(0);
1174 }
1175 
1176 // forest goes in (ctx->plex[grid]), plex comes out
1177 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
1178 {
1179   PetscInt        adaptIter;
1180 
1181   PetscFunctionBegin;
1182   PetscInt  type, limits[5] = {(grid==0) ? ctx->numRERefine : 0, (grid==0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid==0) ? ctx->nZRefine2 : 0,ctx->postAMRRefine[grid]};
1183   for (type=0;type<5;type++) {
1184     for (adaptIter = 0; adaptIter<limits[type];adaptIter++) {
1185       DM  newForest = NULL;
1186       PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest));
1187       if (newForest)  {
1188         PetscCall(DMDestroy(&ctx->plex[grid]));
1189         PetscCall(VecDestroy(uu));
1190         PetscCall(DMCreateGlobalVector(newForest,uu));
1191         PetscCall(PetscObjectSetName((PetscObject) *uu, "uAMR"));
1192         PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, 1, ctx));
1193         ctx->plex[grid] = newForest;
1194       } else {
1195         exit(4); // can happen with no AMR and post refinement
1196       }
1197     }
1198   }
1199   PetscFunctionReturn(0);
1200 }
1201 
1202 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1203 {
1204   PetscBool         flg, sph_flg;
1205   PetscInt          ii,nt,nm,nc,num_species_grid[LANDAU_MAX_GRIDS];
1206   PetscReal         v0_grid[LANDAU_MAX_GRIDS];
1207   DM                dummy;
1208 
1209   PetscFunctionBegin;
1210   PetscCall(DMCreate(ctx->comm,&dummy));
1211   /* get options - initialize context */
1212   ctx->verbose = 1; // should be 0 for silent compliance
1213 #if defined(PETSC_HAVE_THREADSAFETY)
1214   ctx->batch_sz = PetscNumOMPThreads;
1215 #else
1216   ctx->batch_sz = 1;
1217 #endif
1218   ctx->batch_view_idx = 0;
1219   ctx->interpolate    = PETSC_TRUE;
1220   ctx->gpu_assembly   = PETSC_TRUE;
1221   ctx->norm_state     = 0;
1222   ctx->electronShift  = 0;
1223   ctx->M              = NULL;
1224   ctx->J              = NULL;
1225   /* geometry and grids */
1226   ctx->sphere         = PETSC_FALSE;
1227   ctx->inflate        = PETSC_FALSE;
1228   ctx->use_p4est      = PETSC_FALSE;
1229   ctx->num_sections   = 3; /* 2, 3 or 4 */
1230   for (PetscInt grid=0;grid<LANDAU_MAX_GRIDS;grid++) {
1231     ctx->radius[grid]           = 5.; /* thermal radius (velocity) */
1232     ctx->numAMRRefine[grid]     = 5;
1233     ctx->postAMRRefine[grid]    = 0;
1234     ctx->species_offset[grid+1] = 1; // one species default
1235     num_species_grid[grid]      = 0;
1236     ctx->plex[grid] = NULL;     /* cache as expensive to Convert */
1237   }
1238   ctx->species_offset[0] = 0;
1239   ctx->re_radius         = 0.;
1240   ctx->vperp0_radius1    = 0;
1241   ctx->vperp0_radius2    = 0;
1242   ctx->nZRefine1         = 0;
1243   ctx->nZRefine2         = 0;
1244   ctx->numRERefine       = 0;
1245   num_species_grid[0]    = 1; // one species default
1246   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1247   ctx->charges[0]        = -1;  /* electron charge (MKS) */
1248   ctx->masses[0]         = 1/1835.469965278441013; /* temporary value in proton mass */
1249   ctx->n[0]              = 1;
1250   ctx->v_0               = 1; /* thermal velocity, we could start with a scale != 1 */
1251   ctx->thermal_temps[0]  = 1;
1252   /* constants, etc. */
1253   ctx->epsilon0          = 8.8542e-12; /* permittivity of free space (MKS) F/m */
1254   ctx->k                 = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1255   ctx->lnLam             = 10;         /* cross section ratio large - small angle collisions */
1256   ctx->n_0               = 1.e20;        /* typical plasma n, but could set it to 1 */
1257   ctx->Ez                = 0;
1258   for (PetscInt grid=0;grid<LANDAU_NUM_TIMERS;grid++) ctx->times[grid] = 0;
1259   ctx->use_matrix_mass   =  PETSC_FALSE;
1260   ctx->use_relativistic_corrections = PETSC_FALSE;
1261   ctx->use_energy_tensor_trick      = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1262   ctx->SData_d.w         = NULL;
1263   ctx->SData_d.x         = NULL;
1264   ctx->SData_d.y         = NULL;
1265   ctx->SData_d.z         = NULL;
1266   ctx->SData_d.invJ      = NULL;
1267   ctx->jacobian_field_major_order     = PETSC_FALSE;
1268   ctx->SData_d.coo_elem_offsets       = NULL;
1269   ctx->SData_d.coo_elem_point_offsets = NULL;
1270   ctx->coo_assembly                   = PETSC_FALSE;
1271   ctx->SData_d.coo_elem_fullNb        = NULL;
1272   ctx->SData_d.coo_size               = 0;
1273   PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1274   {
1275     char opstring[256];
1276 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1277     ctx->deviceType = LANDAU_KOKKOS;
1278     PetscCall(PetscStrcpy(opstring,"kokkos"));
1279 #elif defined(PETSC_HAVE_CUDA)
1280     ctx->deviceType = LANDAU_CUDA;
1281     PetscCall(PetscStrcpy(opstring,"cuda"));
1282 #else
1283     ctx->deviceType = LANDAU_CPU;
1284     PetscCall(PetscStrcpy(opstring,"cpu"));
1285 #endif
1286     PetscCall(PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,sizeof(opstring),NULL));
1287     PetscCall(PetscStrcmp("cpu",opstring,&flg));
1288     if (flg) {
1289       ctx->deviceType = LANDAU_CPU;
1290     } else {
1291       PetscCall(PetscStrcmp("cuda",opstring,&flg));
1292       if (flg) {
1293         ctx->deviceType = LANDAU_CUDA;
1294       } else {
1295         PetscCall(PetscStrcmp("kokkos",opstring,&flg));
1296         if (flg) ctx->deviceType = LANDAU_KOKKOS;
1297         else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring);
1298       }
1299     }
1300   }
1301   PetscCall(PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL));
1302   PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL));
1303   PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL));
1304   PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"LANDAU_MAX_BATCH_SZ %" PetscInt_FMT " < ctx->batch_sz %" PetscInt_FMT,(PetscInt)LANDAU_MAX_BATCH_SZ,ctx->batch_sz);
1305   PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL));
1306   PetscCheck(ctx->batch_view_idx < ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT,ctx->batch_view_idx,ctx->batch_sz);
1307   PetscCall(PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie critical field","plexland.c",ctx->Ez,&ctx->Ez, NULL));
1308   PetscCall(PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL));
1309   PetscCall(PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL));
1310   PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL));
1311   PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL));
1312   PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, &ctx->use_energy_tensor_trick, NULL));
1313 
1314   /* get num species with temperature, set defaults */
1315   for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) {
1316     ctx->thermal_temps[ii] = 1;
1317     ctx->charges[ii]       = 1;
1318     ctx->masses[ii]        = 1;
1319     ctx->n[ii]             = 1;
1320   }
1321   nt = LANDAU_MAX_SPECIES;
1322   PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg));
1323   if (flg) {
1324     PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n",nt));
1325     ctx->num_species = nt;
1326   } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1327   for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1328   nm = LANDAU_MAX_SPECIES-1;
1329   PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg));
1330   if (flg && nm != ctx->num_species-1) {
1331     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT,nm,ctx->num_species-1);
1332   }
1333   nm = LANDAU_MAX_SPECIES;
1334   PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg));
1335   PetscCheck(!flg || nm == ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT,nm,ctx->num_species);
1336   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1337   ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */
1338   ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */
1339   nc = LANDAU_MAX_SPECIES-1;
1340   PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg));
1341   if (flg) PetscCheck(nc == ctx->num_species-1,ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %" PetscInt_FMT " != num species %" PetscInt_FMT,nc,ctx->num_species-1);
1342   for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1343   /* geometry and grids */
1344   nt = LANDAU_MAX_GRIDS;
1345   PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid","Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid","plexland.c", num_species_grid, &nt, &flg));
1346   if (flg) {
1347     ctx->num_grids = nt;
1348     for (ii=nt=0;ii<ctx->num_grids;ii++) nt += num_species_grid[ii];
1349     PetscCheck(ctx->num_species == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %d)",nt,ctx->num_species,ctx->num_grids,LANDAU_MAX_GRIDS);
1350   } else {
1351     ctx->num_grids = 1; // go back to a single grid run
1352     num_species_grid[0] = ctx->num_species;
1353   }
1354   for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids ; ii++) ctx->species_offset[ii+1] = ctx->species_offset[ii] + num_species_grid[ii];
1355   PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????",ctx->species_offset[ctx->num_grids],ctx->num_species);
1356   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1357     int iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid
1358     v0_grid[grid] = PetscSqrtReal(ctx->k*ctx->thermal_temps[iii]/ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */
1359   }
1360   ii = 0;
1361   PetscCall(PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL));
1362   ctx->v_0 = v0_grid[ii]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1363   ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */
1364   /* domain */
1365   nt = LANDAU_MAX_GRIDS;
1366   PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius","Phase space size in units of thermal velocity of grid","plexland.c",ctx->radius,&nt, &flg));
1367   if (flg) PetscCheck(nt >= ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT,nt,ctx->num_grids);
1368   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1369     if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */
1370       if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1371       else ctx->radius[grid] = -ctx->radius[grid];
1372       ctx->radius[grid] = ctx->radius[grid]*SPEED_OF_LIGHT/ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1373       PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n",(double)ctx->radius[grid],grid));
1374     }
1375     ctx->radius[grid] *= v0_grid[grid]/ctx->v_0; // scale domain by thermal radius relative to v_0
1376   }
1377   /* amr parametres */
1378   nt = LANDAU_MAX_GRIDS;
1379   PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg));
1380   PetscCheck(!flg || nt >= ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT,nt,ctx->num_grids);
1381   nt = LANDAU_MAX_GRIDS;
1382   PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg));
1383   for (ii=1;ii<ctx->num_grids;ii++)  ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1384   PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg));
1385   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine1",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg));
1386   PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine2",  "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg));
1387   PetscCall(PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg));
1388   PetscCall(PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg));
1389   PetscCall(PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg));
1390   /* spherical domain (not used) */
1391   PetscCall(PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL));
1392   PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg));
1393   PetscCall(PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg));
1394   PetscCall(PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius, &ctx->e_radius, &flg));
1395   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1396   if (!flg) {
1397     ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0;
1398   }
1399   nt = LANDAU_MAX_GRIDS;
1400   PetscCall(PetscOptionsRealArray("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius, &nt, &flg));
1401   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE;
1402   if (!flg) {
1403     ctx->i_radius[0] = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; // need to correct for ion grid domain
1404   }
1405   if (flg) PetscCheck(ctx->num_grids == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_i_radius: %" PetscInt_FMT " != num_species = %" PetscInt_FMT,nt,ctx->num_grids);
1406   if (ctx->sphere) PetscCheck(ctx->e_radius > ctx->i_radius[0],ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",(double)ctx->i_radius[0],(double)ctx->e_radius,(double)ctx->radius[0]);
1407   /* processing options */
1408   PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL));
1409   if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos
1410     PetscCall(PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL));
1411     if (ctx->coo_assembly) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"COO assembly requires 'gpu assembly' even if Kokkos 'CPU' back-end %d",ctx->coo_assembly);
1412   }
1413   PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering (DEPRECATED)", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL));
1414   if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly");
1415   PetscCheck(!ctx->jacobian_field_major_order,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_jacobian_field_major_order DEPRECATED");
1416   PetscOptionsEnd();
1417 
1418   for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii]  = ctx->charges[ii] = 0;
1419   if (ctx->verbose > 0) {
1420     PetscCall(PetscPrintf(ctx->comm, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n",(double)ctx->masses[0],(double)(ctx->masses[1]/1.6720e-27),(double)(ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0)));
1421     PetscCall(PetscPrintf(ctx->comm, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0],(double)(-ctx->charges[1]/ctx->charges[0]),(double)(ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0)));
1422     PetscCall(PetscPrintf(ctx->comm, "n:             e: %10.3e                           i: %10.3e %10.3e\n", (double)ctx->n[0],(double)ctx->n[1],(double)(ctx->num_species>2 ? ctx->n[2] : 0)));
1423     PetscCall(PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %" PetscInt_FMT " batched\n", (double)ctx->thermal_temps[0], (double)ctx->thermal_temps[1], (double)((ctx->num_species>2) ? ctx->thermal_temps[2] : 0), (double)ctx->v_0, (double)(ctx->v_0/SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",ctx->batch_sz));
1424     PetscCall(PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %d: %10.3e (%" PetscInt_FMT ") ",0,(double)ctx->radius[0],ctx->numAMRRefine[0]));
1425     for (ii=1;ii<ctx->num_grids;ii++) PetscCall(PetscPrintf(ctx->comm, ", %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",ii,(double)ctx->radius[ii],ctx->numAMRRefine[ii]));
1426     PetscCall(PetscPrintf(ctx->comm,"\n"));
1427     if (ctx->jacobian_field_major_order) {
1428       PetscCall(PetscPrintf(ctx->comm,"Using field major order for GPU Jacobian\n"));
1429     } else {
1430       PetscCall(PetscPrintf(ctx->comm,"Using default Plex order for all matrices\n"));
1431     }
1432   }
1433   PetscCall(DMDestroy(&dummy));
1434   {
1435     PetscMPIInt    rank;
1436     PetscCallMPI(MPI_Comm_rank(ctx->comm, &rank));
1437     ctx->stage = 0;
1438     PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13])); /* 13 */
1439     PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2])); /* 2 */
1440     PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12])); /* 12 */
1441     PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15])); /* 15 */
1442     PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */
1443     PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */
1444     PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0])); /* 0 */
1445     PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9])); /* 9 */
1446     PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10])); /* 10 */
1447     PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7])); /* 7 */
1448     PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1])); /* 1 */
1449     PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3])); /* 3 */
1450     PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8])); /* 8 */
1451     PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4])); /* 4 */
1452     PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */
1453     PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5])); /* 5 */
1454     PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6])); /* 6 */
1455 
1456     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1457       PetscCall(PetscOptionsClearValue(NULL,"-snes_converged_reason"));
1458       PetscCall(PetscOptionsClearValue(NULL,"-ksp_converged_reason"));
1459       PetscCall(PetscOptionsClearValue(NULL,"-snes_monitor"));
1460       PetscCall(PetscOptionsClearValue(NULL,"-ksp_monitor"));
1461       PetscCall(PetscOptionsClearValue(NULL,"-ts_monitor"));
1462       PetscCall(PetscOptionsClearValue(NULL,"-ts_view"));
1463       PetscCall(PetscOptionsClearValue(NULL,"-ts_adapt_monitor"));
1464       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view"));
1465       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view"));
1466       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_dm_view"));
1467       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_view"));
1468       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_jacobian_view"));
1469       PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mat_view"));
1470       PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_converged_reason"));
1471       PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_monitor"));
1472       PetscCall(PetscOptionsClearValue(NULL,"-"));
1473       PetscCall(PetscOptionsClearValue(NULL,"-info"));
1474     }
1475   }
1476   PetscFunctionReturn(0);
1477 }
1478 
1479 static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx)
1480 {
1481   PetscSection      section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
1482   PetscQuadrature   quad;
1483   const PetscReal   *quadWeights;
1484   PetscInt          numCells[LANDAU_MAX_GRIDS],Nq,Nf[LANDAU_MAX_GRIDS], ncellsTot=0, MAP_BF_SIZE = 64*LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES;
1485   PetscTabulation   *Tf;
1486   PetscDS           prob;
1487 
1488   PetscFunctionBegin;
1489   PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids
1490   PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids
1491   /* DS, Tab and quad is same on all grids */
1492   PetscCheck(ctx->plex[0],ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
1493   PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad));
1494   PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL,  &quadWeights));
1495   PetscCheck(Nq <= LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)",Nq,LANDAU_MAX_NQ);
1496   /* setup each grid */
1497   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
1498     PetscInt cStart, cEnd;
1499     PetscCheck(ctx->plex[grid] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
1500     PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1501     numCells[grid] = cEnd - cStart; // grids can have different topology
1502     PetscCall(DMGetLocalSection(ctx->plex[grid], &section[grid]));
1503     PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid]));
1504     PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid]));
1505     ncellsTot += numCells[grid];
1506   }
1507   /* create GPU assembly data */
1508   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1509     PetscContainer          container;
1510     PetscScalar             *elemMatrix, *elMat;
1511     pointInterpolationP4est (*pointMaps)[LANDAU_MAX_Q_FACE];
1512     P4estVertexMaps         *maps;
1513     const PetscInt          *plex_batch=NULL,Nb=Nq,elMatSz=Nq*Nq*ctx->num_species*ctx->num_species; // tensor elements;
1514     LandauIdx               *coo_elem_offsets=NULL, *coo_elem_fullNb=NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = NULL;
1515     /* create GPU asssembly data */
1516     PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n",1));
1517     PetscCall(PetscLogEventBegin(ctx->events[2],0,0,0,0));
1518     PetscCall(PetscMalloc(sizeof(*maps)*ctx->num_grids, &maps));
1519     PetscCall(PetscMalloc(sizeof(*pointMaps)*MAP_BF_SIZE, &pointMaps));
1520     PetscCall(PetscMalloc(sizeof(*elemMatrix)*elMatSz, &elemMatrix));
1521 
1522     if (ctx->coo_assembly) { // setup COO assembly -- put COO metadata directly in ctx->SData_d
1523       PetscCall(PetscMalloc3(ncellsTot+1,&coo_elem_offsets,ncellsTot,&coo_elem_fullNb,ncellsTot, &coo_elem_point_offsets)); // array of integer pointers
1524       coo_elem_offsets[0] = 0; // finish later
1525       PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n",ncellsTot));
1526       ctx->SData_d.coo_n_cellsTot         = ncellsTot;
1527       ctx->SData_d.coo_elem_offsets       = (void*)coo_elem_offsets;
1528       ctx->SData_d.coo_elem_fullNb        = (void*)coo_elem_fullNb;
1529       ctx->SData_d.coo_elem_point_offsets = (void*)coo_elem_point_offsets;
1530     } else {
1531       ctx->SData_d.coo_elem_offsets       = ctx->SData_d.coo_elem_fullNb = NULL;
1532       ctx->SData_d.coo_elem_point_offsets = NULL;
1533       ctx->SData_d.coo_n_cellsTot         = 0;
1534     }
1535 
1536     ctx->SData_d.coo_max_fullnb = 0;
1537     for (PetscInt grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1538       PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc*Nq;
1539       if (grid_batch_is_inv[grid]) {
1540         PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch));
1541       }
1542       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1543       // make maps
1544       maps[grid].d_self       = NULL;
1545       maps[grid].num_elements = numCells[grid];
1546       maps[grid].num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q
1547       maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face,(double)(dim-1))+.001); // Q^2
1548       maps[grid].num_reduced  = 0;
1549       maps[grid].deviceType   = ctx->deviceType;
1550       maps[grid].numgrids     = ctx->num_grids;
1551       // count reduced and get
1552       PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx));
1553       for (int ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1554         if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx+1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1555         for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1556           int fullNb = 0;
1557           for (int q = 0; q < Nb; ++q) {
1558             PetscInt    numindices,*indices;
1559             PetscScalar *valuesOrig = elMat = elemMatrix;
1560             PetscCall(PetscArrayzero(elMat, totDim*totDim));
1561             elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1;
1562             PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat));
1563             for (PetscInt f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal
1564               if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) {
1565                 // found it
1566                 if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1567                   if (plex_batch) {
1568                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx) plex_batch[indices[f]];
1569                   } else {
1570                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f];
1571                   }
1572                   fullNb++;
1573                 } else { //found a constraint
1574                   int       jj      = 0;
1575                   PetscReal sum     = 0;
1576                   const PetscInt ff = f;
1577                   maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1
1578 
1579                   do {  // constraints are continuous in Plex - exploit that here
1580                     int ii; // get 'scale'
1581                     for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1582                       if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1583                         pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]);
1584                       }
1585                     }
1586                     sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1587                     // get 'gid'
1588                     if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1589                     else {
1590                       if (plex_batch) {
1591                         pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1592                       } else {
1593                         pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1594                       }
1595                       fullNb++;
1596                     }
1597                   } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1598                   while (jj < maps[grid].num_face) {
1599                     pointMaps[maps[grid].num_reduced][jj].scale = 0;
1600                     pointMaps[maps[grid].num_reduced][jj].gid = -1;
1601                     jj++;
1602                   }
1603                   if (PetscAbs(sum-1.0) > 10*PETSC_MACHINE_EPSILON) { // debug
1604                     int       d,f;
1605                     PetscReal tmp = 0;
1606                     PetscCall(PetscPrintf(PETSC_COMM_SELF,"\t\t%d.%d.%d) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%d)\n",eidx,q,fieldA,(double)sum,LANDAU_MAX_Q_FACE,maps[grid].num_face));
1607                     for (d = 0, tmp = 0; d < numindices; ++d) {
1608                       if (tmp!=0 && PetscAbs(tmp-1.0) > 10*PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD,"%3d) %3" PetscInt_FMT ": ",d,indices[d]));
1609                       for (f = 0; f < numindices; ++f) {
1610                         tmp += PetscRealPart(elMat[d*numindices + f]);
1611                       }
1612                       if (tmp!=0) PetscCall(PetscPrintf(ctx->comm," | %22.16e\n",(double)tmp));
1613                     }
1614                   }
1615                   maps[grid].num_reduced++;
1616                   PetscCheck(maps[grid].num_reduced<MAP_BF_SIZE,PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %" PetscInt_FMT,maps[grid].num_reduced,MAP_BF_SIZE);
1617                 }
1618                 break;
1619               }
1620             }
1621             // cleanup
1622             PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat));
1623             if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices*numindices, MPIU_SCALAR, &elMat));
1624           }
1625           if (ctx->coo_assembly) { // setup COO assembly
1626             coo_elem_offsets[glb_elem_idx+1] += fullNb*fullNb; // one species block, adds a block for each species, on this element in this grid
1627             if (fieldA==0) { // cache full Nb for this element, on this grid per species
1628               coo_elem_fullNb[glb_elem_idx] = fullNb;
1629               if (fullNb>ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1630             } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %d %d",coo_elem_fullNb[glb_elem_idx],fullNb);
1631           }
1632         } // field
1633       } // cell
1634       // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1635       PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps));
1636       for (int ej = 0; ej < maps[grid].num_reduced; ++ej) {
1637         for (int q = 0; q < maps[grid].num_face; ++q) {
1638           maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1639           maps[grid].c_maps[ej][q].gid   = pointMaps[ej][q].gid;
1640         }
1641       }
1642 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1643       if (ctx->deviceType == LANDAU_KOKKOS) {
1644         PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid)); // imples Kokkos does
1645       } // else could be CUDA
1646 #endif
1647 #if defined(PETSC_HAVE_CUDA)
1648       if (ctx->deviceType == LANDAU_CUDA) {
1649         PetscCall(LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid));
1650       }
1651 #endif
1652       if (plex_batch) {
1653         PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch));
1654         PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this
1655       }
1656     } /* grids */
1657     // finish COO
1658     if (ctx->coo_assembly) { // setup COO assembly
1659       PetscInt *oor, *ooc;
1660       ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot]*ctx->batch_sz;
1661       PetscCall(PetscMalloc2(ctx->SData_d.coo_size,&oor,ctx->SData_d.coo_size,&ooc));
1662       for (int i=0;i<ctx->SData_d.coo_size;i++) oor[i] = ooc[i] = -1;
1663       // get
1664       for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1665         for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1666           const int              fullNb = coo_elem_fullNb[glb_elem_idx];
1667           const LandauIdx *const Idxs = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1668           coo_elem_point_offsets[glb_elem_idx][0] = 0;
1669           for (int f=0, cnt2=0;f<Nb;f++) {
1670             int idx = Idxs[f];
1671             coo_elem_point_offsets[glb_elem_idx][f+1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1672             if (idx >= 0) {
1673               cnt2++;
1674               coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1675             } else {
1676               idx = -idx - 1;
1677               for (int q = 0 ; q < maps[grid].num_face; q++) {
1678                 if (maps[grid].c_maps[idx][q].gid < 0) break;
1679                 cnt2++;
1680                 coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc
1681               }
1682             }
1683             PetscCheck(cnt2 <= fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %d < %d",fullNb,cnt2);
1684           }
1685           PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb]==fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %d != fullNb=%d",coo_elem_point_offsets[glb_elem_idx][Nb],fullNb);
1686         }
1687       }
1688       // set
1689       for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
1690         for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) {
1691           const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
1692           for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) {
1693             const int  fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb;
1694             // set (i,j)
1695             for (int fieldA=0;fieldA<Nf[grid];fieldA++) {
1696               const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1697               int                    rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
1698               for (int f = 0; f < Nb; ++f) {
1699                 const int nr =  coo_elem_point_offsets[glb_elem_idx][f+1] - coo_elem_point_offsets[glb_elem_idx][f];
1700                 if (nr==1) rows[0] = Idxs[f];
1701                 else {
1702                   const int idx = -Idxs[f] - 1;
1703                   for (int q = 0; q < nr; q++) {
1704                     rows[q] = maps[grid].c_maps[idx][q].gid;
1705                   }
1706                 }
1707                 for (int g = 0; g < Nb; ++g) {
1708                   const int nc =  coo_elem_point_offsets[glb_elem_idx][g+1] - coo_elem_point_offsets[glb_elem_idx][g];
1709                   if (nc==1) cols[0] = Idxs[g];
1710                   else {
1711                     const int idx = -Idxs[g] - 1;
1712                     for (int q = 0; q < nc; q++) {
1713                       cols[q] = maps[grid].c_maps[idx][q].gid;
1714                     }
1715                   }
1716                   const int idx0 = b_id*coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1717                   for (int q = 0, idx = idx0; q < nr; q++) {
1718                     for (int d = 0; d < nc; d++, idx++) {
1719                       oor[idx] = rows[q] + moffset;
1720                       ooc[idx] = cols[d] + moffset;
1721                     }
1722                   }
1723                 }
1724               }
1725             }
1726           } // cell
1727         } // grid
1728       } // batch
1729       PetscCall(MatSetPreallocationCOO(ctx->J,ctx->SData_d.coo_size,oor,ooc));
1730       PetscCall(PetscFree2(oor,ooc));
1731     }
1732     PetscCall(PetscFree(pointMaps));
1733     PetscCall(PetscFree(elemMatrix));
1734     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
1735     PetscCall(PetscContainerSetPointer(container, (void *)maps));
1736     PetscCall(PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy));
1737     PetscCall(PetscObjectCompose((PetscObject) ctx->J, "assembly_maps", (PetscObject) container));
1738     PetscCall(PetscContainerDestroy(&container));
1739     PetscCall(PetscLogEventEnd(ctx->events[2],0,0,0,0));
1740   } // end GPU assembly
1741   { /* create static point data, Jacobian called first, only one vertex copy */
1742     PetscReal      *invJe,*ww,*xx,*yy,*zz=NULL,*invJ_a;
1743     PetscInt       outer_ipidx, outer_ej,grid, nip_glb = 0;
1744     PetscFE        fe;
1745     const PetscInt Nb = Nq;
1746     PetscCall(PetscLogEventBegin(ctx->events[7],0,0,0,0));
1747     PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n"));
1748     for (PetscInt grid=0;grid<ctx->num_grids;grid++) nip_glb += Nq*numCells[grid];
1749     /* collect f data, first time is for Jacobian, but make mass now */
1750     if (ctx->verbose > 0) {
1751       PetscInt ncells = 0, N;
1752       PetscCall(MatGetSize(ctx->J,&N,NULL));
1753       for (PetscInt grid=0;grid<ctx->num_grids;grid++) ncells += numCells[grid];
1754       PetscCall(PetscPrintf(ctx->comm,"%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n",0,"FormLandau",nip_glb,ncells, Nb, Nq, dim, Nb, ctx->num_species, Nb, dim, N));
1755     }
1756     PetscCall(PetscMalloc4(nip_glb,&ww,nip_glb,&xx,nip_glb,&yy,nip_glb*dim*dim,&invJ_a));
1757     if (dim==3) {
1758       PetscCall(PetscMalloc1(nip_glb,&zz));
1759     }
1760     if (ctx->use_energy_tensor_trick) {
1761       PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe));
1762       PetscCall(PetscObjectSetName((PetscObject) fe, "energy"));
1763     }
1764     /* init each grids static data - no batch */
1765     for (grid=0, outer_ipidx=0, outer_ej=0 ; grid < ctx->num_grids ; grid++) { // OpenMP (once)
1766       Vec             v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1767       PetscSection    e_section;
1768       DM              dmEnergy;
1769       PetscInt        cStart, cEnd, ej;
1770 
1771       PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd));
1772       // prep energy trick, get v^2 / 2 vector
1773       if (ctx->use_energy_tensor_trick) {
1774         PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1775         Vec            glob_v2;
1776         PetscReal      *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};
1777 
1778         PetscCall(DMClone(ctx->plex[grid], &dmEnergy));
1779         PetscCall(PetscObjectSetName((PetscObject) dmEnergy, "energy"));
1780         PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe));
1781         PetscCall(DMCreateDS(dmEnergy));
1782         PetscCall(DMGetSection(dmEnergy, &e_section));
1783         PetscCall(DMGetGlobalVector(dmEnergy,&glob_v2));
1784         PetscCall(PetscObjectSetName((PetscObject) glob_v2, "trick"));
1785         c2_0[0] = &data[0];
1786         PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void**)c2_0, INSERT_ALL_VALUES, glob_v2));
1787         PetscCall(DMGetLocalVector(dmEnergy, &v2_2));
1788         PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */
1789         PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1790         PetscCall(DMGlobalToLocalEnd  (dmEnergy, glob_v2, INSERT_VALUES, v2_2));
1791         PetscCall(DMViewFromOptions(dmEnergy,NULL, "-energy_dm_view"));
1792         PetscCall(VecViewFromOptions(glob_v2,NULL, "-energy_vec_view"));
1793         PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2));
1794       }
1795       /* append part of the IP data for each grid */
1796       for (ej = 0 ; ej < numCells[grid]; ++ej, ++outer_ej) {
1797         PetscScalar *coefs = NULL;
1798         PetscReal    vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1799         invJe = invJ_a + outer_ej*Nq*dim*dim;
1800         PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej+cStart, quad, vj, Jdummy, invJe, detJj));
1801         if (ctx->use_energy_tensor_trick) {
1802           PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs));
1803         }
1804         /* create static point data */
1805         for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1806           const PetscInt  gidx = outer_ipidx;
1807           const PetscReal *invJ = &invJe[qj*dim*dim];
1808           ww    [gidx] = detJj[qj] * quadWeights[qj];
1809           if (dim==2) ww    [gidx] *=              vj[qj * dim + 0];  /* cylindrical coordinate, w/o 2pi */
1810           // get xx, yy, zz
1811           if (ctx->use_energy_tensor_trick) {
1812             double                  refSpaceDer[3],eGradPhi[3];
1813             const PetscReal * const DD = Tf[0]->T[1];
1814             const PetscReal         *Dq = &DD[qj*Nb*dim];
1815             for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1816             for (int b = 0; b < Nb; ++b) {
1817               for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b*dim+d]*PetscRealPart(coefs[b]);
1818             }
1819             xx[gidx] = 1e10;
1820             if (ctx->use_relativistic_corrections) {
1821               double dg2_c2 = 0;
1822               //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1823               for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1824               dg2_c2 *= (double)c02;
1825               if (dg2_c2 >= .999) {
1826                 xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1827                 yy[gidx] = vj[qj * dim + 1];
1828                 if (dim==3) zz[gidx] = vj[qj * dim + 2];
1829                 PetscCall(PetscPrintf(ctx->comm,"Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n",(double)PetscSqrtReal(xx[gidx]*xx[gidx] + yy[gidx]*yy[gidx] + zz[gidx]*zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx]));
1830               } else {
1831                 PetscReal fact = c02/PetscSqrtReal(1. - dg2_c2);
1832                 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1833                 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1834               }
1835             }
1836             if (xx[gidx] == 1e10) {
1837               for (int d = 0; d < dim; ++d) {
1838                 for (int e = 0 ; e < dim; ++e) {
1839                   eGradPhi[d] += invJ[e*dim+d]*refSpaceDer[e];
1840                 }
1841               }
1842               xx[gidx] = eGradPhi[0];
1843               yy[gidx] = eGradPhi[1];
1844               if (dim==3) zz[gidx] = eGradPhi[2];
1845             }
1846           } else {
1847             xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1848             yy[gidx] = vj[qj * dim + 1];
1849             if (dim==3) zz[gidx] = vj[qj * dim + 2];
1850           }
1851         } /* q */
1852         if (ctx->use_energy_tensor_trick) {
1853           PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs));
1854         }
1855       } /* ej */
1856       if (ctx->use_energy_tensor_trick) {
1857         PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2));
1858         PetscCall(DMDestroy(&dmEnergy));
1859       }
1860     } /* grid */
1861     if (ctx->use_energy_tensor_trick) {
1862       PetscCall(PetscFEDestroy(&fe));
1863     }
1864     /* cache static data */
1865     if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
1866 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS)
1867       PetscReal invMass[LANDAU_MAX_SPECIES],nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1868       for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1869         for (PetscInt ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++) {
1870           invMass[ii]  = ctx->m_0/ctx->masses[ii];
1871           nu_alpha[ii] = PetscSqr(ctx->charges[ii]/ctx->m_0)*ctx->m_0/ctx->masses[ii];
1872           nu_beta[ii]  = PetscSqr(ctx->charges[ii]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
1873         }
1874       }
1875       if (ctx->deviceType == LANDAU_CUDA) {
1876 #if defined(PETSC_HAVE_CUDA)
1877         PetscCall(LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1878                                         nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d));
1879 #else
1880         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type cuda not built");
1881 #endif
1882       } else if (ctx->deviceType == LANDAU_KOKKOS) {
1883 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1884         PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset,
1885                                           nu_alpha, nu_beta, invMass,invJ_a,xx,yy,zz,ww,&ctx->SData_d));
1886 #else
1887         SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type kokkos not built");
1888 #endif
1889       }
1890 #endif
1891       /* free */
1892       PetscCall(PetscFree4(ww,xx,yy,invJ_a));
1893       if (dim==3) PetscCall(PetscFree(zz));
1894     } else { /* CPU version, just copy in, only use part */
1895       ctx->SData_d.w = (void*)ww;
1896       ctx->SData_d.x = (void*)xx;
1897       ctx->SData_d.y = (void*)yy;
1898       ctx->SData_d.z = (void*)zz;
1899       ctx->SData_d.invJ = (void*)invJ_a;
1900     }
1901     PetscCall(PetscLogEventEnd(ctx->events[7],0,0,0,0));
1902   } // initialize
1903   PetscFunctionReturn(0);
1904 }
1905 
1906 /* < v, u > */
1907 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1908                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1909                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1910                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1911 {
1912   g0[0] = 1.;
1913 }
1914 
1915 /* < v, u > */
1916 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1917                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1918                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1919                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1920 {
1921   static double ttt = 1e-12;
1922   g0[0] = ttt++;
1923 }
1924 
1925 /* < v, u > */
1926 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1927                  const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1928                  const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1929                  PetscReal t, PetscReal u_tShift, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1930 {
1931   g0[0] = 2.*PETSC_PI*x[0];
1932 }
1933 
1934 static PetscErrorCode MatrixNfDestroy(void *ptr)
1935 {
1936   PetscInt *nf = (PetscInt *)ptr;
1937   PetscFunctionBegin;
1938   PetscCall(PetscFree(nf));
1939   PetscFunctionReturn(0);
1940 }
1941 
1942 /*
1943  LandauCreateJacobianMatrix - creates ctx->J with without real data. Hard to keep sparse.
1944   - Like DMPlexLandauCreateMassMatrix. Should remove one and combine
1945   - has old support for field major ordering
1946  */
1947 static PetscErrorCode LandauCreateJacobianMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx)
1948 {
1949   PetscInt       *idxs=NULL;
1950   Mat            subM[LANDAU_MAX_GRIDS];
1951 
1952   PetscFunctionBegin;
1953   if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1954     PetscFunctionReturn(0);
1955   }
1956   // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is'
1957   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1958     PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids]*ctx->batch_sz, &idxs));
1959   }
1960   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
1961     const PetscInt *values, n = ctx->mat_offset[grid+1] - ctx->mat_offset[grid];
1962     Mat             gMat;
1963     DM              massDM;
1964     PetscDS         prob;
1965     Vec             tvec;
1966     // get "mass" matrix for reordering
1967     PetscCall(DMClone(ctx->plex[grid], &massDM));
1968     PetscCall(DMCopyFields(ctx->plex[grid], massDM));
1969     PetscCall(DMCreateDS(massDM));
1970     PetscCall(DMGetDS(massDM, &prob));
1971     for (int ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
1972       PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL));
1973     }
1974     PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only")); // this trick is need to both sparsify the matrix and avoid runtime error
1975     PetscCall(DMCreateMatrix(massDM, &gMat));
1976     PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false"));
1977     PetscCall(MatSetOption(gMat,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
1978     PetscCall(MatSetOption(gMat,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE));
1979     PetscCall(DMCreateLocalVector(ctx->plex[grid],&tvec));
1980     PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx));
1981     PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view"));
1982     PetscCall(DMDestroy(&massDM));
1983     PetscCall(VecDestroy(&tvec));
1984     subM[grid] = gMat;
1985     if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1986       MatOrderingType rtype = MATORDERINGRCM;
1987       IS              isrow,isicol;
1988       PetscCall(MatGetOrdering(gMat,rtype,&isrow,&isicol));
1989       PetscCall(ISInvertPermutation(isrow,PETSC_DECIDE,&grid_batch_is_inv[grid]));
1990       PetscCall(ISGetIndices(isrow, &values));
1991       for (PetscInt b_id=0 ; b_id < ctx->batch_sz ; b_id++) { // add batch size DMs for this species grid
1992 #if !defined(LANDAU_SPECIES_MAJOR)
1993         PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id*N;
1994         for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
1995 #else
1996         PetscInt n0 = ctx->mat_offset[grid]*ctx->batch_sz + b_id*n;
1997         for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0;
1998 #endif
1999       }
2000       PetscCall(ISRestoreIndices(isrow, &values));
2001       PetscCall(ISDestroy(&isrow));
2002       PetscCall(ISDestroy(&isicol));
2003     }
2004   }
2005   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2006     PetscCall(ISCreateGeneral(comm,ctx->mat_offset[ctx->num_grids]*ctx->batch_sz,idxs,PETSC_OWN_POINTER,&ctx->batch_is));
2007   }
2008   // get a block matrix
2009   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2010     Mat               B = subM[grid];
2011     PetscInt          nloc, nzl, *colbuf, row, COL_BF_SIZE=1024;
2012     PetscCall(PetscMalloc(sizeof(*colbuf)*COL_BF_SIZE, &colbuf));
2013     PetscCall(MatGetSize(B, &nloc, NULL));
2014     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2015       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2016       const PetscInt    *cols;
2017       const PetscScalar *vals;
2018       for (int i=0 ; i<nloc ; i++) {
2019         PetscCall(MatGetRow(B,i,&nzl,NULL,NULL));
2020         if (nzl>COL_BF_SIZE) {
2021           PetscCall(PetscFree(colbuf));
2022           PetscCall(PetscInfo(ctx->plex[grid], "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n",COL_BF_SIZE,2*COL_BF_SIZE,nzl));
2023           COL_BF_SIZE = nzl;
2024           PetscCall(PetscMalloc(sizeof(*colbuf)*COL_BF_SIZE, &colbuf));
2025         }
2026         PetscCall(MatGetRow(B,i,&nzl,&cols,&vals));
2027         for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2028         row = i + moffset;
2029         PetscCall(MatSetValues(ctx->J,1,&row,nzl,colbuf,vals,INSERT_VALUES));
2030         PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals));
2031       }
2032     }
2033     PetscCall(PetscFree(colbuf));
2034   }
2035   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2036     PetscCall(MatDestroy(&subM[grid]));
2037   }
2038   PetscCall(MatAssemblyBegin(ctx->J,MAT_FINAL_ASSEMBLY));
2039   PetscCall(MatAssemblyEnd(ctx->J,MAT_FINAL_ASSEMBLY));
2040 
2041   // debug
2042   PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view"));
2043   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2044     Mat mat_block_order;
2045     PetscCall(MatCreateSubMatrix(ctx->J,ctx->batch_is,ctx->batch_is,MAT_INITIAL_MATRIX,&mat_block_order)); // use MatPermute
2046     PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_mat_view"));
2047     PetscCall(MatDestroy(&mat_block_order));
2048     PetscCall(VecScatterCreate(X, ctx->batch_is, X, NULL, &ctx->plex_batch));
2049     PetscCall(VecDuplicate(X,&ctx->work_vec));
2050   }
2051 
2052   PetscFunctionReturn(0);
2053 }
2054 
2055 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
2056 /*@C
2057  DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh
2058 
2059  Collective on comm
2060 
2061  Input Parameters:
2062  +   comm  - The MPI communicator
2063  .   dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
2064  -   prefix - prefix for options (not tested)
2065 
2066  Output Parameter:
2067  .   pack  - The DM object representing the mesh
2068  +   X - A vector (user destroys)
2069  -   J - Optional matrix (object destroys)
2070 
2071  Level: beginner
2072 
2073  .keywords: mesh
2074  .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()`
2075  @*/
2076 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
2077 {
2078   LandauCtx      *ctx;
2079   Vec            Xsub[LANDAU_MAX_GRIDS];
2080   IS             grid_batch_is_inv[LANDAU_MAX_GRIDS];
2081 
2082   PetscFunctionBegin;
2083   PetscCheck(dim == 2 || dim == 3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
2084   PetscCheck(LANDAU_DIM == dim,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d",dim,LANDAU_DIM);
2085   PetscCall(PetscNew(&ctx));
2086   ctx->comm = comm; /* used for diagnostics and global errors */
2087   /* process options */
2088   PetscCall(ProcessOptions(ctx,prefix));
2089   if (dim==2) ctx->use_relativistic_corrections = PETSC_FALSE;
2090   /* Create Mesh */
2091   PetscCall(DMCompositeCreate(PETSC_COMM_SELF,pack));
2092   PetscCall(PetscLogEventBegin(ctx->events[13],0,0,0,0));
2093   PetscCall(PetscLogEventBegin(ctx->events[15],0,0,0,0));
2094   PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR)
2095   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2096     /* create FEM */
2097     PetscCall(SetupDS(ctx->plex[grid],dim,grid,ctx));
2098     /* set initial state */
2099     PetscCall(DMCreateGlobalVector(ctx->plex[grid],&Xsub[grid]));
2100     PetscCall(PetscObjectSetName((PetscObject) Xsub[grid], "u_orig"));
2101     /* initial static refinement, no solve */
2102     PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx));
2103     /* forest refinement - forest goes in (if forest), plex comes out */
2104     if (ctx->use_p4est) {
2105       DM plex;
2106       PetscCall(adapt(grid,ctx,&Xsub[grid])); // forest goes in, plex comes out
2107       PetscCall(DMViewFromOptions(ctx->plex[grid],NULL,"-dm_landau_amr_dm_view")); // need to differentiate - todo
2108       PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view"));
2109       // convert to plex, all done with this level
2110       PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex));
2111       PetscCall(DMDestroy(&ctx->plex[grid]));
2112       ctx->plex[grid] = plex;
2113     }
2114 #if !defined(LANDAU_SPECIES_MAJOR)
2115     PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid]));
2116 #else
2117     for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2118       PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid]));
2119     }
2120 #endif
2121     PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx));
2122   }
2123 #if !defined(LANDAU_SPECIES_MAJOR)
2124   // stack the batched DMs, could do it all here!!! b_id=0
2125   for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2126     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2127       PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid]));
2128     }
2129   }
2130 #endif
2131   // create ctx->mat_offset
2132   ctx->mat_offset[0] = 0;
2133   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2134     PetscInt    n;
2135     PetscCall(VecGetLocalSize(Xsub[grid],&n));
2136     ctx->mat_offset[grid+1] = ctx->mat_offset[grid] + n;
2137   }
2138   // creat DM & Jac
2139   PetscCall(DMSetApplicationContext(*pack, ctx));
2140   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only"));
2141   PetscCall(DMCreateMatrix(*pack, &ctx->J));
2142   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false"));
2143   PetscCall(MatSetOption(ctx->J,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2144   PetscCall(MatSetOption(ctx->J,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE));
2145   PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac"));
2146   // construct initial conditions in X
2147   PetscCall(DMCreateGlobalVector(*pack,X));
2148   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2149     PetscInt n;
2150     PetscCall(VecGetLocalSize(Xsub[grid],&n));
2151     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2152       PetscScalar const *values;
2153       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2154       PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx->batch_sz, ctx));
2155       PetscCall(VecGetArrayRead(Xsub[grid],&values));
2156       for (int i=0, idx = moffset; i<n; i++, idx++) {
2157         PetscCall(VecSetValue(*X,idx,values[i],INSERT_VALUES));
2158       }
2159       PetscCall(VecRestoreArrayRead(Xsub[grid],&values));
2160     }
2161   }
2162   // cleanup
2163   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2164     PetscCall(VecDestroy(&Xsub[grid]));
2165   }
2166   /* check for correct matrix type */
2167   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2168     PetscBool flg;
2169     if (ctx->deviceType == LANDAU_CUDA) {
2170       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,""));
2171       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda or use '-dm_landau_device_type cpu'");
2172     } else if (ctx->deviceType == LANDAU_KOKKOS) {
2173       PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,""));
2174 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2175       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2176 #else
2177       PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'");
2178 #endif
2179     }
2180   }
2181   PetscCall(PetscLogEventEnd(ctx->events[15],0,0,0,0));
2182   // create field major ordering
2183 
2184   ctx->work_vec   = NULL;
2185   ctx->plex_batch = NULL;
2186   ctx->batch_is   = NULL;
2187   for (int i=0;i<LANDAU_MAX_GRIDS;i++) grid_batch_is_inv[i] = NULL;
2188   PetscCall(PetscLogEventBegin(ctx->events[12],0,0,0,0));
2189   PetscCall(LandauCreateJacobianMatrix(comm, *X, grid_batch_is_inv, ctx));
2190   PetscCall(PetscLogEventEnd(ctx->events[12],0,0,0,0));
2191 
2192   // create AMR GPU assembly maps and static GPU data
2193   PetscCall(CreateStaticGPUData(dim,grid_batch_is_inv,ctx));
2194 
2195   PetscCall(PetscLogEventEnd(ctx->events[13],0,0,0,0));
2196 
2197   // create mass matrix
2198   PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL));
2199 
2200   if (J) *J = ctx->J;
2201 
2202   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2203     PetscContainer container;
2204     // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2205     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2206     PetscCall(PetscContainerSetPointer(container, (void *)ctx));
2207     PetscCall(PetscObjectCompose((PetscObject) ctx->J, "LandauCtx", (PetscObject) container));
2208     PetscCall(PetscContainerDestroy(&container));
2209     // batch solvers need to map -- can batch solvers work
2210     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2211     PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch));
2212     PetscCall(PetscObjectCompose((PetscObject) ctx->J, "plex_batch_is", (PetscObject) container));
2213     PetscCall(PetscContainerDestroy(&container));
2214   }
2215   // for batch solvers
2216   {
2217     PetscContainer  container;
2218     PetscInt        *pNf;
2219     PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container));
2220     PetscCall(PetscMalloc1(sizeof(*pNf), &pNf));
2221     *pNf = ctx->batch_sz;
2222     PetscCall(PetscContainerSetPointer(container, (void *)pNf));
2223     PetscCall(PetscContainerSetUserDestroy(container, MatrixNfDestroy));
2224     PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject) container));
2225     PetscCall(PetscContainerDestroy(&container));
2226   }
2227 
2228   PetscFunctionReturn(0);
2229 }
2230 
2231 /*@
2232  DMPlexLandauAddToFunction - Add to the distribution function with user callback
2233 
2234  Collective on dm
2235 
2236  Input Parameters:
2237  .   pack - the DMComposite
2238  +   func - call back function
2239  .   user_ctx - user context
2240 
2241  Input/Output Parameters:
2242  +   X - Vector to data to
2243 
2244  Level: advanced
2245 
2246  .keywords: mesh
2247  .seealso: `DMPlexLandauCreateVelocitySpace()`
2248  @*/
2249 PetscErrorCode DMPlexLandauAddToFunction(DM pack, Vec X, PetscErrorCode (*func)(DM, Vec, PetscInt, PetscInt, PetscInt, void*), void* user_ctx)
2250 {
2251   LandauCtx      *ctx;
2252   PetscFunctionBegin;
2253   PetscCall(DMGetApplicationContext(pack, &ctx)); // uses ctx->num_grids; ctx->plex[grid]; ctx->batch_sz; ctx->mat_offset
2254   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2255     PetscInt dim,n, Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid];
2256     Vec      vec;
2257     DM       dm;
2258     PetscFE  fe;
2259     PetscCall(DMGetDimension(pack, &dim));
2260     PetscCall(DMClone(ctx->plex[grid], &dm));
2261     PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe));
2262     PetscCall(PetscObjectSetName((PetscObject) fe, "species"));
2263     PetscCall(DMSetField(dm, 0, NULL, (PetscObject)fe));
2264     PetscCall(PetscFEDestroy(&fe));
2265     PetscCall(DMSetApplicationContext(dm, ctx)); // does the user want this?
2266     PetscCall(DMCreateGlobalVector(dm,&vec));
2267     PetscCall(VecGetSize(vec,&n));
2268     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2269       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2270       for (PetscInt sp=ctx->species_offset[grid],i0=0;sp<ctx->species_offset[grid+1];sp++,i0++) {
2271         PetscCall(VecZeroEntries(vec));
2272         /* Add your data with 'dm' for species 'sp' to 'vec' */
2273         PetscCall(func(dm,vec,i0,grid,b_id,user_ctx));
2274         /* add to global */
2275         PetscScalar const *values;
2276         PetscCall(VecGetArrayRead(vec,&values));
2277         for (int i=0, idx = moffset + i0; i<n; i++, idx += Nf) { // works for Q1 & Q2 only -- TODO
2278           PetscCall(VecSetValue(X,idx,values[i],ADD_VALUES));
2279         }
2280         PetscCall(VecRestoreArrayRead(vec,&values));
2281       }
2282     } // batch
2283     PetscCall(VecDestroy(&vec));
2284     PetscCall(DMDestroy(&dm));
2285   } // grid
2286   PetscFunctionReturn(0);
2287 }
2288 
2289 /*@
2290  DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh
2291 
2292  Collective on dm
2293 
2294  Input/Output Parameters:
2295  .   dm - the dm to destroy
2296 
2297  Level: beginner
2298 
2299  .keywords: mesh
2300  .seealso: `DMPlexLandauCreateVelocitySpace()`
2301  @*/
2302 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm)
2303 {
2304   LandauCtx      *ctx;
2305   PetscFunctionBegin;
2306   PetscCall(DMGetApplicationContext(*dm, &ctx));
2307   PetscCall(MatDestroy(&ctx->M));
2308   PetscCall(MatDestroy(&ctx->J));
2309   for (PetscInt ii=0;ii<ctx->num_species;ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii]));
2310   PetscCall(ISDestroy(&ctx->batch_is));
2311   PetscCall(VecDestroy(&ctx->work_vec));
2312   PetscCall(VecScatterDestroy(&ctx->plex_batch));
2313   if (ctx->deviceType == LANDAU_CUDA) {
2314 #if defined(PETSC_HAVE_CUDA)
2315     PetscCall(LandauCUDAStaticDataClear(&ctx->SData_d));
2316 #else
2317     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
2318 #endif
2319   } else if (ctx->deviceType == LANDAU_KOKKOS) {
2320 #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2321     PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d));
2322 #else
2323     SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
2324 #endif
2325   } else {
2326     if (ctx->SData_d.x) { /* in a CPU run */
2327       PetscReal *invJ = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
2328       LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets;
2329       PetscCall(PetscFree4(ww,xx,yy,invJ));
2330       if (zz) PetscCall(PetscFree(zz));
2331       if (coo_elem_offsets) {
2332         PetscCall(PetscFree3(coo_elem_offsets,coo_elem_fullNb,coo_elem_point_offsets)); // could be NULL
2333       }
2334     }
2335   }
2336 
2337   if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2338     PetscCall(PetscPrintf(ctx->comm, "TSStep               N  1.0 %10.3e\n",ctx->times[LANDAU_EX2_TSSOLVE]));
2339     PetscCall(PetscPrintf(ctx->comm, "2:           Solve:  %10.3e with %" PetscInt_FMT " threads\n",ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL],ctx->batch_sz));
2340     PetscCall(PetscPrintf(ctx->comm, "3:          Landau:  %10.3e\n",ctx->times[LANDAU_MATRIX_TOTAL]));
2341     PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian       %" PetscInt_FMT " 1.0 %10.3e\n",(PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT],ctx->times[LANDAU_JACOBIAN]));
2342     PetscCall(PetscPrintf(ctx->comm, "Landau Operator       N 1.0  %10.3e\n",ctx->times[LANDAU_OPERATOR]));
2343     PetscCall(PetscPrintf(ctx->comm, "Landau Mass           N 1.0  %10.3e\n",ctx->times[LANDAU_MASS]));
2344     PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU)       N 1.0  %10.3e\n",ctx->times[LANDAU_F_DF]));
2345     PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU)         N 1.0  %10.3e\n",ctx->times[LANDAU_KERNEL]));
2346     PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum        X 1.0 %10.3e\n",ctx->times[KSP_FACTOR]));
2347     PetscCall(PetscPrintf(ctx->comm, "MatSolve              X 1.0 %10.3e\n",ctx->times[KSP_SOLVE]));
2348   }
2349   for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
2350     PetscCall(DMDestroy(&ctx->plex[grid]));
2351   }
2352   PetscFree(ctx);
2353   PetscCall(DMDestroy(dm));
2354   PetscFunctionReturn(0);
2355 }
2356 
2357 /* < v, ru > */
2358 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2359                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2360                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2361                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2362 {
2363   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2364   f0[0] = u[ii];
2365 }
2366 
2367 /* < v, ru > */
2368 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2369                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2370                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2371                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2372 {
2373   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2374   f0[0] = x[jj]*u[ii]; /* x momentum */
2375 }
2376 
2377 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2378                     const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2379                     const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2380                     PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2381 {
2382   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2383   double tmp1 = 0.;
2384   for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2385   f0[0] = tmp1*u[ii];
2386 }
2387 
2388 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
2389 {
2390   const PetscReal *c2_0_arr = ((PetscReal*)actx);
2391   const PetscReal c02 = c2_0_arr[0];
2392 
2393   PetscFunctionBegin;
2394   for (int s = 0 ; s < Nf ; s++) {
2395     PetscReal tmp1 = 0.;
2396     for (int i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
2397 #if defined(PETSC_USE_DEBUG)
2398     u[s] = PetscSqrtReal(1. + tmp1/c02);//  u[0] = PetscSqrtReal(1. + xx);
2399 #else
2400     {
2401       PetscReal xx = tmp1/c02;
2402       u[s] = xx/(PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2403     }
2404 #endif
2405   }
2406   PetscFunctionReturn(0);
2407 }
2408 
2409 /* < v, ru > */
2410 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2411                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2412                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2413                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2414 {
2415   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2416   f0[0] = 2.*PETSC_PI*x[0]*u[ii];
2417 }
2418 
2419 /* < v, ru > */
2420 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2421                       const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2422                       const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2423                       PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2424 {
2425   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2426   f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii];
2427 }
2428 
2429 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2430                      const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2431                      const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2432                      PetscReal t, const PetscReal x[],  PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2433 {
2434   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2435   f0[0] =  2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii];
2436 }
2437 
2438 /*@
2439  DMPlexLandauPrintNorms - collects moments and prints them
2440 
2441  Collective on dm
2442 
2443  Input Parameters:
2444  +   X  - the state
2445  -   stepi - current step to print
2446 
2447  Level: beginner
2448 
2449  .keywords: mesh
2450  .seealso: `DMPlexLandauCreateVelocitySpace()`
2451  @*/
2452 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi)
2453 {
2454   LandauCtx      *ctx;
2455   PetscDS        prob;
2456   DM             pack;
2457   PetscInt       cStart, cEnd, dim, ii, i0, nDMs;
2458   PetscScalar    xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES];
2459   PetscScalar    xmomentum[LANDAU_MAX_SPECIES],  ymomentum[LANDAU_MAX_SPECIES],  zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2460   Vec            *globXArray;
2461 
2462   PetscFunctionBegin;
2463   PetscCall(VecGetDM(X, &pack));
2464   PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM");
2465   PetscCall(DMGetDimension(pack, &dim));
2466   PetscCheck(dim == 2 || dim == 3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]",dim);
2467   PetscCall(DMGetApplicationContext(pack, &ctx));
2468   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2469   /* print momentum and energy */
2470   PetscCall(DMCompositeGetNumberDM(pack,&nDMs));
2471   PetscCheck(nDMs == ctx->num_grids*ctx->batch_sz,PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT,nDMs,ctx->num_grids*ctx->batch_sz);
2472   PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray));
2473   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
2474   for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
2475     Vec Xloc = globXArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2476     PetscCall(DMGetDS(ctx->plex[grid], &prob));
2477     for (ii=ctx->species_offset[grid],i0=0;ii<ctx->species_offset[grid+1];ii++,i0++) {
2478       PetscScalar user[2] = { (PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
2479       PetscCall(PetscDSSetConstants(prob, 2, user));
2480       if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */
2481         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden));
2482         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2483         density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2484         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom));
2485         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2486         zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2487         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2));
2488         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2489         energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2490         zmomentumtot += zmomentum[ii];
2491         energytot  += energy[ii];
2492         densitytot += density[ii];
2493         PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,(double)PetscRealPart(density[ii]),(double)PetscRealPart(zmomentum[ii]),(double)PetscRealPart(energy[ii])));
2494       } else { /* 2/3Xloc + 3V */
2495         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den));
2496         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2497         density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
2498         PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom));
2499         user[1] = 0;
2500         PetscCall(PetscDSSetConstants(prob, 2, user));
2501         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2502         xmomentum[ii]  = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2503         user[1] = 1;
2504         PetscCall(PetscDSSetConstants(prob, 2, user));
2505         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2506         ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2507         user[1] = 2;
2508         PetscCall(PetscDSSetConstants(prob, 2, user));
2509         PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2510         zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
2511         if (ctx->use_relativistic_corrections) {
2512           /* gamma * M * f */
2513           if (ii==0 && grid==0) { // do all at once
2514             Vec            Mf, globGamma, *globMfArray, *globGammaArray;
2515             PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {gamma_n_f};
2516             PetscReal      *c2_0[1], data[1];
2517 
2518             PetscCall(VecDuplicate(X,&globGamma));
2519             PetscCall(VecDuplicate(X,&Mf));
2520             PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globMfArray));
2521             PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globGammaArray));
2522             /* M * f */
2523             PetscCall(MatMult(ctx->M,X,Mf));
2524             /* gamma */
2525             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2526             for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2527               Vec v1 = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ];
2528               data[0] = PetscSqr(C_0(ctx->v_0));
2529               c2_0[0] = &data[0];
2530               PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void**)c2_0, INSERT_ALL_VALUES, v1));
2531             }
2532             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2533             /* gamma * Mf */
2534             PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2535             PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2536             for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice
2537               PetscInt Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], N, bs;
2538               Vec      Mfsub = globMfArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], Gsub = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], v1, v2;
2539               // get each component
2540               PetscCall(VecGetSize(Mfsub,&N));
2541               PetscCall(VecCreate(ctx->comm,&v1));
2542               PetscCall(VecSetSizes(v1,PETSC_DECIDE,N/Nf));
2543               PetscCall(VecCreate(ctx->comm,&v2));
2544               PetscCall(VecSetSizes(v2,PETSC_DECIDE,N/Nf));
2545               PetscCall(VecSetFromOptions(v1)); // ???
2546               PetscCall(VecSetFromOptions(v2));
2547               // get each component
2548               PetscCall(VecGetBlockSize(Gsub,&bs));
2549               PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub",bs,Nf);
2550               PetscCall(VecGetBlockSize(Mfsub,&bs));
2551               PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT,bs,Nf);
2552               for (int i=0, ix=ctx->species_offset[grid] ; i<Nf ; i++, ix++) {
2553                 PetscScalar val;
2554                 PetscCall(VecStrideGather(Gsub,i,v1,INSERT_VALUES));  // this is not right -- TODO
2555                 PetscCall(VecStrideGather(Mfsub,i,v2,INSERT_VALUES));
2556                 PetscCall(VecDot(v1,v2,&val));
2557                 energy[ix] = PetscRealPart(val)*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ix];
2558               }
2559               PetscCall(VecDestroy(&v1));
2560               PetscCall(VecDestroy(&v2));
2561             } /* grids */
2562             PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray));
2563             PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray));
2564             PetscCall(PetscFree(globGammaArray));
2565             PetscCall(PetscFree(globMfArray));
2566             PetscCall(VecDestroy(&globGamma));
2567             PetscCall(VecDestroy(&Mf));
2568           }
2569         } else {
2570           PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2));
2571           PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx));
2572           energy[ii]    = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2573         }
2574         PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",stepi,ii,(double)PetscRealPart(density[ii]),(double)PetscRealPart(xmomentum[ii]),(double)PetscRealPart(ymomentum[ii]),(double)PetscRealPart(zmomentum[ii]),(double)PetscRealPart(energy[ii])));
2575         xmomentumtot += xmomentum[ii];
2576         ymomentumtot += ymomentum[ii];
2577         zmomentumtot += zmomentum[ii];
2578         energytot    += energy[ii];
2579         densitytot   += density[ii];
2580       }
2581       if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n");
2582     }
2583   }
2584   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
2585   PetscCall(PetscFree(globXArray));
2586   /* totals */
2587   PetscCall(DMPlexGetHeightStratum(ctx->plex[0],0,&cStart,&cEnd));
2588   if (ctx->num_species>1) {
2589     if (dim==2) {
2590       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart));
2591     } else {
2592       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart));
2593     }
2594   } else PetscCall(PetscPrintf(ctx->comm, " -- %" PetscInt_FMT " cells",cEnd-cStart));
2595   PetscCall(PetscPrintf(ctx->comm,"\n"));
2596   PetscFunctionReturn(0);
2597 }
2598 
2599 /*@
2600  DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2601   - puts mass matrix into ctx->M
2602 
2603  Collective on pack
2604 
2605  Input/Output Parameters:
2606 . pack     - the DM object. Puts matrix in Landau context M field
2607 
2608  Output Parameters:
2609 . Amat - The mass matrix (optional), mass matrix is added to the DM context
2610 
2611  Level: beginner
2612 
2613  .keywords: mesh
2614  .seealso: `DMPlexLandauCreateVelocitySpace()`
2615  @*/
2616 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat)
2617 {
2618   DM             mass_pack,massDM[LANDAU_MAX_GRIDS];
2619   PetscDS        prob;
2620   PetscInt       ii,dim,N1=1,N2;
2621   LandauCtx      *ctx;
2622   Mat            packM,subM[LANDAU_MAX_GRIDS];
2623 
2624   PetscFunctionBegin;
2625   PetscValidHeaderSpecific(pack,DM_CLASSID,1);
2626   if (Amat) PetscValidPointer(Amat,2);
2627   PetscCall(DMGetApplicationContext(pack, &ctx));
2628   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2629   PetscCall(PetscLogEventBegin(ctx->events[14],0,0,0,0));
2630   PetscCall(DMGetDimension(pack, &dim));
2631   PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject) pack),&mass_pack));
2632   /* create pack mass matrix */
2633   for (PetscInt grid=0, ix=0 ; grid<ctx->num_grids ; grid++) {
2634     PetscCall(DMClone(ctx->plex[grid], &massDM[grid]));
2635     PetscCall(DMCopyFields(ctx->plex[grid], massDM[grid]));
2636     PetscCall(DMCreateDS(massDM[grid]));
2637     PetscCall(DMGetDS(massDM[grid], &prob));
2638     for (ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2639       if (dim==3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL));
2640       else        PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL));
2641     }
2642 #if !defined(LANDAU_SPECIES_MAJOR)
2643     PetscCall(DMCompositeAddDM(mass_pack,massDM[grid]));
2644 #else
2645     for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid
2646       PetscCall(DMCompositeAddDM(mass_pack,massDM[grid]));
2647     }
2648 #endif
2649     PetscCall(DMCreateMatrix(massDM[grid], &subM[grid]));
2650   }
2651 #if !defined(LANDAU_SPECIES_MAJOR)
2652   // stack the batched DMs
2653   for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) {
2654     for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2655       PetscCall(DMCompositeAddDM(mass_pack, massDM[grid]));
2656     }
2657   }
2658 #endif
2659   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only"));
2660   PetscCall(DMCreateMatrix(mass_pack, &packM));
2661   PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false"));
2662   PetscCall(MatSetOption(packM,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE));
2663   PetscCall(MatSetOption(packM,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE));
2664   PetscCall(DMDestroy(&mass_pack));
2665   /* make mass matrix for each block */
2666   for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2667     Vec locX;
2668     DM  plex = massDM[grid];
2669     PetscCall(DMGetLocalVector(plex, &locX));
2670     /* Mass matrix is independent of the input, so no need to fill locX */
2671     PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx));
2672     PetscCall(DMRestoreLocalVector(plex, &locX));
2673     PetscCall(DMDestroy(&massDM[grid]));
2674   }
2675   PetscCall(MatGetSize(ctx->J, &N1, NULL));
2676   PetscCall(MatGetSize(packM, &N2, NULL));
2677   PetscCheck(N1 == N2,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT,N1,N2);
2678   /* assemble block diagonals */
2679   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2680     Mat               B = subM[grid];
2681     PetscInt          nloc, nzl, *colbuf, COL_BF_SIZE=1024, row;
2682     PetscCall(PetscMalloc(sizeof(*colbuf)*COL_BF_SIZE, &colbuf));
2683     PetscCall(MatGetSize(B, &nloc, NULL));
2684     for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) {
2685       const PetscInt    moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset);
2686       const PetscInt    *cols;
2687       const PetscScalar *vals;
2688       for (int i=0 ; i<nloc ; i++) {
2689         PetscCall(MatGetRow(B,i,&nzl,NULL,NULL));
2690         if (nzl>COL_BF_SIZE) {
2691           PetscCall(PetscFree(colbuf));
2692           PetscCall(PetscInfo(pack, "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n",COL_BF_SIZE,2*COL_BF_SIZE,nzl));
2693           COL_BF_SIZE = nzl;
2694           PetscCall(PetscMalloc(sizeof(*colbuf)*COL_BF_SIZE, &colbuf));
2695         }
2696         PetscCall(MatGetRow(B,i,&nzl,&cols,&vals));
2697         for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
2698         row = i + moffset;
2699         PetscCall(MatSetValues(packM,1,&row,nzl,colbuf,vals,INSERT_VALUES));
2700         PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals));
2701       }
2702     }
2703     PetscCall(PetscFree(colbuf));
2704   }
2705   // cleanup
2706   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2707     PetscCall(MatDestroy(&subM[grid]));
2708   }
2709   PetscCall(MatAssemblyBegin(packM,MAT_FINAL_ASSEMBLY));
2710   PetscCall(MatAssemblyEnd(packM,MAT_FINAL_ASSEMBLY));
2711   PetscCall(PetscObjectSetName((PetscObject)packM, "mass"));
2712   PetscCall(MatViewFromOptions(packM,NULL,"-dm_landau_mass_view"));
2713   ctx->M = packM;
2714   if (Amat) *Amat = packM;
2715   PetscCall(PetscLogEventEnd(ctx->events[14],0,0,0,0));
2716   PetscFunctionReturn(0);
2717 }
2718 
2719 /*@
2720  DMPlexLandauIFunction - TS residual calculation, confusingly this computes the Jacobian w/o mass
2721 
2722  Collective on ts
2723 
2724  Input Parameters:
2725 +   TS  - The time stepping context
2726 .   time_dummy - current time (not used)
2727 .   X - Current state
2728 .   X_t - Time derivative of current state
2729 -   actx - Landau context
2730 
2731  Output Parameter:
2732 .   F  - The residual
2733 
2734  Level: beginner
2735 
2736  .keywords: mesh
2737  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()`
2738  @*/
2739 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2740 {
2741   LandauCtx        *ctx=(LandauCtx*)actx;
2742   PetscInt         dim;
2743   DM               pack;
2744 #if defined(PETSC_HAVE_THREADSAFETY)
2745   double           starttime, endtime;
2746 #endif
2747   PetscObjectState state;
2748 
2749   PetscFunctionBegin;
2750   PetscCall(TSGetDM(ts,&pack));
2751   PetscCall(DMGetApplicationContext(pack, &ctx));
2752   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2753   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2754   PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0));
2755   PetscCall(PetscLogEventBegin(ctx->events[0],0,0,0,0));
2756 #if defined(PETSC_HAVE_THREADSAFETY)
2757   starttime = MPI_Wtime();
2758 #endif
2759   PetscCall(DMGetDimension(pack, &dim));
2760   PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state));
2761   if (state != ctx->norm_state) {
2762     PetscCall(PetscInfo(ts, "Create Landau Jacobian t=%g J.state %" PetscInt64_FMT " --> %" PetscInt64_FMT "\n",(double)time_dummy, ctx->norm_state, state));
2763     PetscCall(MatZeroEntries(ctx->J));
2764     PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx));
2765     PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view"));
2766     PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state));
2767     ctx->norm_state = state;
2768   } else {
2769     PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n",state));
2770   }
2771   /* mat vec for op */
2772   PetscCall(MatMult(ctx->J,X,F)); /* C*f */
2773   /* add time term */
2774   if (X_t) PetscCall(MatMultAdd(ctx->M,X_t,F,F));
2775 #if defined(PETSC_HAVE_THREADSAFETY)
2776   if (ctx->stage) {
2777     endtime = MPI_Wtime();
2778     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2779     ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2780     ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2781   }
2782 #endif
2783   PetscCall(PetscLogEventEnd(ctx->events[0],0,0,0,0));
2784   PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0));
2785   if (ctx->stage) {
2786     PetscCall(PetscLogStagePop());
2787 #if defined(PETSC_HAVE_THREADSAFETY)
2788     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2789 #endif
2790   }
2791   PetscFunctionReturn(0);
2792 }
2793 
2794 /*@
2795  DMPlexLandauIJacobian - TS Jacobian construction, confusingly this adds mass
2796 
2797  Collective on ts
2798 
2799  Input Parameters:
2800 +   TS  - The time stepping context
2801 .   time_dummy - current time (not used)
2802 .   X - Current state
2803 .   U_tdummy - Time derivative of current state (not used)
2804 .   shift - shift for du/dt term
2805 -   actx - Landau context
2806 
2807  Output Parameters:
2808 +   Amat  - Jacobian
2809 -   Pmat  - same as Amat
2810 
2811  Level: beginner
2812 
2813  .keywords: mesh
2814  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()`
2815  @*/
2816 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2817 {
2818   LandauCtx        *ctx=NULL;
2819   PetscInt         dim;
2820   DM               pack;
2821 #if defined(PETSC_HAVE_THREADSAFETY)
2822   double           starttime, endtime;
2823 #endif
2824   PetscObjectState state;
2825 
2826   PetscFunctionBegin;
2827   PetscCall(TSGetDM(ts,&pack));
2828   PetscCall(DMGetApplicationContext(pack, &ctx));
2829   PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2830   PetscCheck(Amat == Pmat && Amat == ctx->J,ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2831   PetscCall(DMGetDimension(pack, &dim));
2832   /* get collision Jacobian into A */
2833   if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage));
2834   PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0));
2835   PetscCall(PetscLogEventBegin(ctx->events[9],0,0,0,0));
2836 #if defined(PETSC_HAVE_THREADSAFETY)
2837   starttime = MPI_Wtime();
2838 #endif
2839   PetscCall(PetscInfo(ts, "Adding mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift));
2840   PetscCheck(shift!=0.0,ctx->comm, PETSC_ERR_PLIB, "zero shift");
2841   PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state));
2842   PetscCheck(state == ctx->norm_state,ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT "",ctx->norm_state,state);
2843   if (!ctx->use_matrix_mass) {
2844     PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx));
2845   } else { /* add mass */
2846     PetscCall(MatAXPY(Pmat,shift,ctx->M,SAME_NONZERO_PATTERN));
2847   }
2848 #if defined(PETSC_HAVE_THREADSAFETY)
2849   if (ctx->stage) {
2850     endtime = MPI_Wtime();
2851     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2852     ctx->times[LANDAU_MASS] += (endtime - starttime);
2853   }
2854 #endif
2855   PetscCall(PetscLogEventEnd(ctx->events[9],0,0,0,0));
2856   PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0));
2857   if (ctx->stage) {
2858     PetscCall(PetscLogStagePop());
2859 #if defined(PETSC_HAVE_THREADSAFETY)
2860     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2861 #endif
2862   }
2863   PetscFunctionReturn(0);
2864 }
2865