xref: /petsc/src/ts/tests/ex30.c (revision a370cb8ab0ec859999cdbf41e4e4937db7025e2e)
1 static char help[] = "Grid based Landau collision operator with PIC interface with OpenMP setup. (one species per grid)\n";
2 
3 /*
4    Support 2.5V with axisymmetric coordinates
5      - r,z coordinates
6      - Domain and species data input by Landau operator
7      - "radius" for each grid, normalized with electron thermal velocity
8      - Domain: (0,radius) x (-radius,radius), thus first coordinate x[0] is perpendicular velocity and 2pi*x[0] term is added for axisymmetric
9    Supports full 3V
10 
11  */
12 
13 #include <petscdmplex.h>
14 #include <petscds.h>
15 #include <petscdmswarm.h>
16 #include <petscksp.h>
17 #include <petsc/private/petscimpl.h>
18 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
19   #include <omp.h>
20 #endif
21 #include <petsclandau.h>
22 #include <petscdmcomposite.h>
23 
24 typedef struct {
25   Mat MpTrans;
26   Mat Mp;
27   Vec ff;
28   Vec uu;
29 } MatShellCtx;
30 
31 typedef struct {
32   PetscInt   v_target;
33   PetscInt   g_target;
34   PetscInt   global_vertex_id_0;
35   DM        *globSwarmArray;
36   LandauCtx *ctx;
37   DM        *grid_dm;
38   Mat       *g_Mass;
39   Mat       *globMpArray;
40   Vec       *globXArray;
41   PetscBool  print;
42   PetscBool  print_entropy;
43 } PrintCtx;
44 
45 PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy)
46 {
47   MatShellCtx *matshellctx;
48 
49   PetscFunctionBeginUser;
50   PetscCall(MatShellGetContext(MtM, &matshellctx));
51   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
52   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
53   PetscCall(MatMult(matshellctx->MpTrans, matshellctx->ff, yy));
54   PetscFunctionReturn(PETSC_SUCCESS);
55 }
56 
57 PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy, Vec zz)
58 {
59   MatShellCtx *matshellctx;
60 
61   PetscFunctionBeginUser;
62   PetscCall(MatShellGetContext(MtM, &matshellctx));
63   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
64   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
65   PetscCall(MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz));
66   PetscFunctionReturn(PETSC_SUCCESS);
67 }
68 
69 PetscErrorCode createSwarm(const DM dm, PetscInt dim, DM *sw)
70 {
71   PetscInt Nc = 1;
72 
73   PetscFunctionBeginUser;
74   PetscCall(DMCreate(PETSC_COMM_SELF, sw));
75   PetscCall(DMSetType(*sw, DMSWARM));
76   PetscCall(DMSetDimension(*sw, dim));
77   PetscCall(DMSwarmSetType(*sw, DMSWARM_PIC));
78   PetscCall(DMSwarmSetCellDM(*sw, dm));
79   PetscCall(DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_REAL));
80   PetscCall(DMSwarmFinalizeFieldRegister(*sw));
81   PetscCall(DMSetFromOptions(*sw));
82   PetscCall(PetscObjectSetName((PetscObject)*sw, "Particle Grid"));
83   PetscFunctionReturn(PETSC_SUCCESS);
84 }
85 
86 static PetscErrorCode makeSwarm(DM sw, const PetscInt dim, const PetscInt Np, const PetscReal xx[], const PetscReal yy[], const PetscReal zz[])
87 {
88   PetscReal    *coords;
89   PetscDataType dtype;
90   PetscInt      bs, p, zero = 0;
91 
92   PetscFunctionBeginUser;
93   PetscCall(DMSwarmSetLocalSizes(sw, Np, zero));
94   PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
95   for (p = 0; p < Np; p++) {
96     coords[p * dim + 0] = xx[p];
97     coords[p * dim + 1] = yy[p];
98     if (dim == 3) coords[p * dim + 2] = zz[p];
99   }
100   PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
101   PetscCall(DMSwarmVectorDefineField(sw, "w_q"));
102   PetscFunctionReturn(PETSC_SUCCESS);
103 }
104 
105 static PetscErrorCode createMp(const DM dm, DM sw, Mat *Mp_out)
106 {
107   PetscBool removePoints = PETSC_TRUE;
108   Mat       M_p;
109 
110   PetscFunctionBeginUser;
111   // migrate after coords are set
112   PetscCall(DMSwarmMigrate(sw, removePoints));
113   //
114   PetscCall(PetscObjectSetName((PetscObject)sw, "Particle Grid"));
115 
116   /* PetscInt  N,*count,nmin=10000,nmax=0,ntot=0; */
117   /* // count */
118   /* PetscCall(DMSwarmCreatePointPerCellCount(sw, &N, &count)); */
119   /* for (int i=0, n; i< N ; i++) { */
120   /*   if ((n=count[i]) > nmax) nmax = n; */
121   /*   if (n < nmin) nmin = n; */
122   /*   PetscCall(PetscInfo(dm, " %d) %d particles\n", i, n)); */
123   /*   ntot += n; */
124   /* } */
125   /* PetscCall(PetscFree(count)); */
126   /* PetscCall(PetscInfo(dm, " %" PetscInt_FMT " max particle / cell, and %" PetscInt_FMT " min, ratio = %g,  %" PetscInt_FMT " total\n", nmax, nmin, (double)nmax/(double)nmin,ntot)); */
127 
128   /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */
129   PetscCall(DMCreateMassMatrix(sw, dm, &M_p));
130   PetscCall(DMViewFromOptions(sw, NULL, "-ex30_sw_view"));
131   // output
132   *Mp_out = M_p;
133   PetscFunctionReturn(PETSC_SUCCESS);
134 }
135 
136 static PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt a_tid, const PetscInt dim, const PetscReal a_wp[], Vec rho, Mat M_p)
137 {
138   PetscReal    *wq;
139   PetscDataType dtype;
140   Vec           ff;
141   PetscInt      bs, p, Np;
142 
143   PetscFunctionBeginUser;
144   PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq));
145   PetscCall(DMSwarmGetLocalSize(sw, &Np));
146   for (p = 0; p < Np; p++) wq[p] = a_wp[p];
147   PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq));
148   PetscCall(PetscObjectSetName((PetscObject)rho, "rho"));
149   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff));
150   PetscCall(PetscObjectSetName((PetscObject)ff, "weights"));
151   PetscCall(MatMultTranspose(M_p, ff, rho));
152   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
153   PetscFunctionReturn(PETSC_SUCCESS);
154 }
155 
156 //
157 // add grid to arg 'sw.w_q'
158 //
159 PetscErrorCode gridToParticles(const DM dm, DM sw, const Vec rhs, Vec work_ferhs, Mat M_p, Mat Mass)
160 {
161   PetscBool    is_lsqr;
162   KSP          ksp;
163   Mat          PM_p = NULL, MtM, D = NULL;
164   Vec          ff;
165   PetscInt     N, M, nzl;
166   MatShellCtx *matshellctx = NULL;
167   PC           pc;
168 
169   PetscFunctionBeginUser;
170   // 1) apply M in, for Moore-Penrose with mass: Mp (Mp' Mp)^-1 M
171   PetscCall(MatMult(Mass, rhs, work_ferhs));
172   // 2) pseudo-inverse, first part: (Mp' Mp)^-1
173   PetscCall(KSPCreate(PETSC_COMM_SELF, &ksp));
174   PetscCall(KSPSetType(ksp, KSPCG));
175   PetscCall(KSPGetPC(ksp, &pc));
176   PetscCall(PCSetType(pc, PCJACOBI));
177   PetscCall(KSPSetOptionsPrefix(ksp, "ftop_"));
178   PetscCall(KSPSetFromOptions(ksp));
179   PetscCall(PetscObjectTypeCompare((PetscObject)ksp, KSPLSQR, &is_lsqr));
180   if (!is_lsqr) {
181     PetscCall(MatGetLocalSize(M_p, &M, &N));
182     if (N > M) {
183       PetscCall(PetscInfo(ksp, " M (%" PetscInt_FMT ") < M (%" PetscInt_FMT ") more vertices than particles: revert to lsqr\n", M, N));
184       is_lsqr = PETSC_TRUE;
185       PetscCall(KSPSetType(ksp, KSPLSQR));
186       PetscCall(PCSetType(pc, PCNONE)); // should not happen, but could solve stable (Mp^T Mp), move projection Mp before solve
187     } else {
188       PetscCall(PetscNew(&matshellctx));
189       PetscCall(MatCreateVecs(M_p, &matshellctx->uu, &matshellctx->ff));
190       if (0) {
191         PetscCall(MatTransposeMatMult(M_p, M_p, MAT_INITIAL_MATRIX, 4, &MtM));
192         PetscCall(KSPSetOperators(ksp, MtM, MtM));
193         PetscCall(PetscInfo(M_p, "createMtM KSP with explicit Mp'Mp\n"));
194         PetscCall(MatViewFromOptions(MtM, NULL, "-ftop2_MtM_mat_view"));
195       } else {
196         PetscCall(MatCreateShell(PetscObjectComm((PetscObject)dm), N, N, PETSC_DECIDE, PETSC_DECIDE, matshellctx, &MtM));
197         PetscCall(MatTranspose(M_p, MAT_INITIAL_MATRIX, &matshellctx->MpTrans));
198         matshellctx->Mp = M_p;
199         PetscCall(MatShellSetOperation(MtM, MATOP_MULT, (void (*)(void))MatMultMtM_SeqAIJ));
200         PetscCall(MatShellSetOperation(MtM, MATOP_MULT_ADD, (void (*)(void))MatMultAddMtM_SeqAIJ));
201         PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 1, NULL, &D));
202         PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpT_mat_view"));
203         for (PetscInt i = 0; i < N; i++) {
204           const PetscScalar *vals;
205           const PetscInt    *cols;
206           PetscScalar        dot = 0;
207           PetscCall(MatGetRow(matshellctx->MpTrans, i, &nzl, &cols, &vals));
208           for (PetscInt ii = 0; ii < nzl; ii++) dot += PetscSqr(vals[ii]);
209           if (dot < PETSC_MACHINE_EPSILON) {
210             PetscCall(PetscInfo(ksp, "empty row in pseudo-inverse %d\n", (int)i));
211             is_lsqr = PETSC_TRUE; // empty rows
212             PetscCall(KSPSetType(ksp, KSPLSQR));
213             PetscCall(PCSetType(pc, PCNONE)); // should not happen, but could solve stable (Mp Mp^T), move projection Mp before solve
214             // clean up
215             PetscCall(MatDestroy(&matshellctx->MpTrans));
216             PetscCall(VecDestroy(&matshellctx->ff));
217             PetscCall(VecDestroy(&matshellctx->uu));
218             PetscCall(MatDestroy(&D));
219             PetscCall(MatDestroy(&MtM));
220             PetscCall(PetscFree(matshellctx));
221             D = NULL;
222             break;
223           }
224           PetscCall(MatSetValue(D, i, i, dot, INSERT_VALUES));
225         }
226         if (D) {
227           PetscCall(MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY));
228           PetscCall(MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY));
229           PetscCall(PetscInfo(M_p, "createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n", N, nzl));
230           PetscCall(KSPSetOperators(ksp, MtM, D));
231           PetscCall(MatViewFromOptions(D, NULL, "-ftop2_D_mat_view"));
232           PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view"));
233           PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpTranspose_mat_view"));
234           PetscCall(MatViewFromOptions(MtM, NULL, "-ftop2_MtM_mat_view"));
235         }
236       }
237     }
238   }
239   if (is_lsqr) {
240     PC        pc2;
241     PetscBool is_bjac;
242     PetscCall(KSPGetPC(ksp, &pc2));
243     PetscCall(PetscObjectTypeCompare((PetscObject)pc2, PCBJACOBI, &is_bjac));
244     if (is_bjac) {
245       PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p));
246       PetscCall(KSPSetOperators(ksp, M_p, PM_p));
247     } else {
248       PetscCall(KSPSetOperators(ksp, M_p, M_p));
249     }
250     PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view"));
251   }
252   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access
253   if (!is_lsqr) {
254     PetscErrorCode ierr;
255     ierr = KSPSolve(ksp, work_ferhs, matshellctx->uu);
256     if (!ierr) {
257       // 3) with Moore-Penrose apply Mp: M_p (Mp' Mp)^-1 M
258       PetscCall(MatMult(M_p, matshellctx->uu, ff));
259     } else { // failed
260       PC        pc2;
261       PetscBool is_bjac;
262       PetscCall(PetscInfo(ksp, "Solver failed, probably singular, try lsqr\n"));
263       PetscCall(KSPReset(ksp));
264       PetscCall(KSPSetType(ksp, KSPLSQR));
265       PetscCall(KSPGetPC(ksp, &pc2));
266       PetscCall(PCSetType(pc2, PCNONE)); // should not happen, but could solve stable (Mp Mp^T), move projection Mp before solve
267       PetscCall(KSPSetOptionsPrefix(ksp, "ftop_"));
268       PetscCall(KSPSetFromOptions(ksp));
269       PetscCall(PetscObjectTypeCompare((PetscObject)pc2, PCBJACOBI, &is_bjac));
270       if (is_bjac) {
271         PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p));
272         PetscCall(KSPSetOperators(ksp, M_p, PM_p));
273       } else {
274         PetscCall(KSPSetOperators(ksp, M_p, M_p));
275       }
276       ierr = KSPSolveTranspose(ksp, work_ferhs, ff);
277       if (ierr) { PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "backup LSQR solver failed - need to add N_v > N_p Moore-Penrose pseudo-inverse"); }
278     }
279     if (D) PetscCall(MatDestroy(&D));
280     PetscCall(MatDestroy(&MtM));
281     if (matshellctx->MpTrans) PetscCall(MatDestroy(&matshellctx->MpTrans));
282     PetscCall(VecDestroy(&matshellctx->ff));
283     PetscCall(VecDestroy(&matshellctx->uu));
284     PetscCall(PetscFree(matshellctx));
285   } else {
286     PetscErrorCode ierr;
287     // finally with LSQR apply M_p^\dagger
288     ierr = KSPSolveTranspose(ksp, work_ferhs, ff);
289     if (ierr) { PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "backup LSQR solver failed - need to add N_v > N_p Moore-Penrose pseudo-inverse"); }
290   }
291   PetscCall(KSPDestroy(&ksp));
292   PetscCall(MatDestroy(&PM_p));
293   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
294   PetscFunctionReturn(PETSC_SUCCESS);
295 }
296 
297 #define EX30_MAX_NUM_THRDS 12
298 #define EX30_MAX_BATCH_SZ  1024
299 //
300 // add grid to arg 'globSwarmArray[].w_q'
301 //
302 PetscErrorCode gridToParticles_private(DM grid_dm[], DM globSwarmArray[], const PetscInt dim, const PetscInt v_target, const PetscInt numthreads, const PetscInt num_vertices, const PetscInt global_vertex_id, Mat globMpArray[], Mat g_Mass[], Vec t_fhat[][EX30_MAX_NUM_THRDS], PetscReal moments[], Vec globXArray[], LandauCtx *ctx)
303 {
304   PetscErrorCode ierr = (PetscErrorCode)0; // used for inside thread loops
305 
306   PetscFunctionBeginUser;
307   // map back to particles
308   for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
309     PetscCall(PetscInfo(grid_dm[0], "g2p: global batch %" PetscInt_FMT " of %" PetscInt_FMT ", Landau batch %" PetscInt_FMT " of %" PetscInt_FMT ": map back to particles\n", global_vertex_id + 1, num_vertices, v_id_0 + 1, ctx->batch_sz));
310     //PetscPragmaOMP(parallel for)
311     for (PetscInt tid = 0; tid < numthreads; tid++) {
312       const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id + v_id;
313       if (glb_v_id < num_vertices) {
314         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
315           PetscErrorCode ierr_t;
316           ierr_t = PetscInfo(grid_dm[0], "gridToParticles: global batch %" PetscInt_FMT ", local batch b=%" PetscInt_FMT ", grid g=%" PetscInt_FMT ", index(b,g) %" PetscInt_FMT "\n", global_vertex_id, v_id, grid, LAND_PACK_IDX(v_id, grid));
317           ierr_t = gridToParticles(grid_dm[grid], globSwarmArray[LAND_PACK_IDX(v_id, grid)], globXArray[LAND_PACK_IDX(v_id, grid)], t_fhat[grid][tid], globMpArray[LAND_PACK_IDX(v_id, grid)], g_Mass[grid]);
318           if (ierr_t) ierr = ierr_t;
319         }
320       }
321     }
322     PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in OMP loop. ierr = %d", (int)ierr);
323     /* Get moments */
324     PetscCall(PetscInfo(grid_dm[0], "Cleanup batches %" PetscInt_FMT " to %" PetscInt_FMT "\n", v_id_0, v_id_0 + numthreads));
325     for (PetscInt tid = 0; tid < numthreads; tid++) {
326       const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id + v_id;
327       if (glb_v_id == v_target) {
328         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
329           PetscDataType dtype;
330           PetscReal    *wp, *coords;
331           DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
332           PetscInt      npoints, bs = 1;
333           PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here
334           PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
335           PetscCall(DMSwarmGetLocalSize(sw, &npoints));
336           for (PetscInt p = 0; p < npoints; p++) {
337             PetscReal v2 = 0, fact = (dim == 2) ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1, w = fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
338             for (PetscInt i = 0; i < dim; ++i) v2 += PetscSqr(coords[p * dim + i]);
339             moments[0] += w;
340             moments[1] += w * ctx->v_0 * coords[p * dim + 1]; // z-momentum
341             moments[2] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
342           }
343           PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
344           PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
345         }
346         const PetscReal N_inv = 1 / moments[0];
347         PetscCall(PetscInfo(grid_dm[0], "gridToParticles_private [%" PetscInt_FMT "], n = %g\n", v_id, (double)moments[0]));
348         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
349           PetscDataType dtype;
350           PetscReal    *wp, *coords;
351           DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
352           PetscInt      npoints, bs = 1;
353           PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here
354           PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
355           PetscCall(DMSwarmGetLocalSize(sw, &npoints));
356           for (PetscInt p = 0; p < npoints; p++) {
357             const PetscReal fact = dim == 2 ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1, w = fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]], ww = w * N_inv;
358             if (w > PETSC_REAL_MIN) {
359               moments[3] -= ww * PetscLogReal(ww);
360               PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "ww (%g) > 1", (double)ww);
361             } else moments[4] -= w; // keep track of density that is lost
362           }
363           PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
364           PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
365         }
366       }
367     } // thread batch
368   } // batch
369   PetscFunctionReturn(PETSC_SUCCESS);
370 }
371 
372 static void maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscReal shift, PetscScalar *u)
373 {
374   PetscInt  i;
375   PetscReal v2 = 0, theta = 2.0 * kt_m; /* theta = 2kT/mc^2 */
376 
377   if (shift != 0.) {
378     v2 = 0;
379     for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i];
380     v2 += (x[dim - 1] - shift) * (x[dim - 1] - shift);
381     /* evaluate the shifted Maxwellian */
382     u[0] += n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
383   } else {
384     /* compute the exponents, v^2 */
385     for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
386     /* evaluate the Maxwellian */
387     u[0] += n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
388   }
389 }
390 
391 static PetscErrorCode PostStep(TS ts)
392 {
393   PetscInt   n, dim, nDMs, v_id;
394   PetscReal  t;
395   LandauCtx *ctx;
396   Vec        X;
397   PrintCtx  *printCtx;
398   DM         pack;
399   PetscReal  moments[5], e_grid[LANDAU_MAX_GRIDS];
400 
401   PetscFunctionBeginUser;
402   PetscCall(TSGetApplicationContext(ts, &printCtx));
403   if (!printCtx->print && !printCtx->print_entropy) PetscFunctionReturn(PETSC_SUCCESS);
404   ctx = printCtx->ctx;
405   if (printCtx->v_target < printCtx->global_vertex_id_0 || printCtx->v_target >= printCtx->global_vertex_id_0 + ctx->batch_sz) PetscFunctionReturn(PETSC_SUCCESS);
406   for (PetscInt i = 0; i < 5; i++) moments[i] = 0;
407   for (PetscInt i = 0; i < LANDAU_MAX_GRIDS; i++) e_grid[i] = 0;
408   v_id = printCtx->v_target % ctx->batch_sz;
409   PetscCall(TSGetDM(ts, &pack));
410   PetscCall(DMGetDimension(pack, &dim));
411   PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); // number of vertices * number of grids
412   PetscCall(TSGetSolution(ts, &X));
413   PetscCall(TSGetStepNumber(ts, &n));
414   PetscCall(TSGetTime(ts, &t));
415   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, printCtx->globXArray));
416   if (printCtx->print_entropy && printCtx->v_target >= 0 && 0) {
417     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
418       PetscDataType dtype;
419       PetscReal    *wp, *coords;
420       DM            sw = printCtx->globSwarmArray[LAND_PACK_IDX(v_id, grid)];
421       Vec           work, subX = printCtx->globXArray[LAND_PACK_IDX(v_id, grid)];
422       PetscInt      bs, NN;
423       // C-G moments
424       PetscCall(VecDuplicate(subX, &work));
425       PetscCall(gridToParticles(printCtx->grid_dm[grid], sw, subX, work, printCtx->globMpArray[LAND_PACK_IDX(v_id, grid)], printCtx->g_Mass[grid]));
426       PetscCall(VecDestroy(&work));
427       // moments
428       PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
429       PetscCall(DMSwarmGetLocalSize(sw, &NN));
430       PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp));
431       for (PetscInt pp = 0; pp < NN; pp++) {
432         PetscReal v2 = 0, fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
433         for (PetscInt i = 0; i < dim; ++i) v2 += PetscSqr(coords[pp * dim + i]);
434         moments[0] += w;
435         moments[1] += w * ctx->v_0 * coords[pp * dim + 1]; // z-momentum
436         moments[2] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
437         e_grid[grid] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
438       }
439       PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
440       PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
441     }
442     // entropy
443     const PetscReal N_inv = 1 / moments[0];
444     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
445       PetscDataType dtype;
446       PetscReal    *wp, *coords;
447       DM            sw = printCtx->globSwarmArray[LAND_PACK_IDX(v_id, grid)];
448       PetscInt      bs, NN;
449       PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
450       PetscCall(DMSwarmGetLocalSize(sw, &NN));
451       PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp));
452       for (PetscInt pp = 0; pp < NN; pp++) {
453         PetscReal fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]], ww = w * N_inv;
454         if (w > PETSC_REAL_MIN) {
455           moments[3] -= ww * PetscLogReal(ww);
456         } else moments[4] -= w;
457       }
458       PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
459       PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
460     }
461     PetscCall(PetscInfo(X, "%4d) time %e, Landau particle moments: 0: %18.12e 1: %19.12e 2: %18.12e entropy: %e loss %e. energy = %e + %e + %e\n", (int)n, (double)t, (double)moments[0], (double)moments[1], (double)moments[2], (double)moments[3], (double)(moments[4] / moments[0]), (double)e_grid[0], (double)e_grid[1], (double)e_grid[2]));
462   }
463   if (printCtx->print && printCtx->g_target >= 0) {
464     PetscInt         grid   = printCtx->g_target, id;
465     static PetscReal last_t = -100000, period = .5;
466     if (last_t == -100000) last_t = -period + t;
467     if (t >= last_t + period) {
468       last_t = t;
469       PetscCall(DMGetOutputSequenceNumber(ctx->plex[grid], &id, NULL));
470       PetscCall(DMSetOutputSequenceNumber(ctx->plex[grid], id + 1, t));
471       PetscCall(VecViewFromOptions(printCtx->globXArray[LAND_PACK_IDX(v_id % ctx->batch_sz, grid)], NULL, "-ex30_vec_view"));
472       if (ctx->num_grids > grid + 1) {
473         PetscCall(DMSetOutputSequenceNumber(ctx->plex[grid + 1], id + 1, t));
474         PetscCall(VecViewFromOptions(printCtx->globXArray[LAND_PACK_IDX(v_id % ctx->batch_sz, grid + 1)], NULL, "-ex30_vec_view2"));
475       }
476       PetscCall(PetscInfo(X, "%4d) time %e View\n", (int)n, (double)t));
477     }
478   }
479   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, printCtx->globXArray));
480   PetscFunctionReturn(PETSC_SUCCESS);
481 }
482 
483 PetscErrorCode go(TS ts, Vec X, const PetscInt num_vertices, const PetscInt a_Np, const PetscInt dim, const PetscInt v_target, const PetscInt g_target, PetscReal shift, PetscBool use_uniform_particle_grid)
484 {
485   DM             pack, *globSwarmArray, grid_dm[LANDAU_MAX_GRIDS];
486   Mat           *globMpArray, g_Mass[LANDAU_MAX_GRIDS];
487   KSP            t_ksp[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
488   Vec            t_fhat[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
489   PetscInt       nDMs;
490   PetscErrorCode ierr = (PetscErrorCode)0; // used for inside thread loops
491 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
492   PetscInt numthreads = PetscNumOMPThreads;
493 #else
494   PetscInt numthreads = 1;
495 #endif
496   LandauCtx *ctx;
497   Vec       *globXArray;
498   PetscReal  moments_0[5], moments_1a[5], moments_1b[5], dt_init;
499   PrintCtx  *printCtx;
500 
501   PetscFunctionBeginUser;
502   PetscCheck(numthreads <= EX30_MAX_NUM_THRDS, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Too many threads %" PetscInt_FMT " > %d", numthreads, EX30_MAX_NUM_THRDS);
503   PetscCheck(numthreads > 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number threads %" PetscInt_FMT " > %d", numthreads, EX30_MAX_NUM_THRDS);
504   PetscCall(TSGetDM(ts, &pack));
505   PetscCall(DMGetApplicationContext(pack, &ctx));
506   PetscCheck(ctx->batch_sz % numthreads == 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "batch size (-dm_landau_batch_size) %" PetscInt_FMT "  mod #threads %" PetscInt_FMT " must equal zero", ctx->batch_sz, numthreads);
507   PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); // number of vertices * number of grids
508   PetscCall(PetscInfo(pack, "Have %" PetscInt_FMT " total grids, with %" PetscInt_FMT " Landau local batched and %" PetscInt_FMT " global items (vertices) %d DMs\n", ctx->num_grids, ctx->batch_sz, num_vertices, (int)nDMs));
509   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
510   PetscCall(PetscMalloc(sizeof(*globMpArray) * nDMs, &globMpArray));
511   PetscCall(PetscMalloc(sizeof(*globSwarmArray) * nDMs, &globSwarmArray));
512   // print ctx
513   PetscCall(PetscNew(&printCtx));
514   PetscCall(TSSetApplicationContext(ts, printCtx));
515   printCtx->v_target       = v_target;
516   printCtx->g_target       = g_target;
517   printCtx->ctx            = ctx;
518   printCtx->globSwarmArray = globSwarmArray;
519   printCtx->grid_dm        = grid_dm;
520   printCtx->globMpArray    = globMpArray;
521   printCtx->g_Mass         = g_Mass;
522   printCtx->globXArray     = globXArray;
523   printCtx->print_entropy  = PETSC_FALSE;
524   PetscOptionsBegin(PETSC_COMM_SELF, "", "Print Options", "DMPLEX");
525   PetscCall(PetscOptionsBool("-print_entropy", "Print entropy and moments at each time step", "ex30.c", printCtx->print_entropy, &printCtx->print_entropy, NULL));
526   PetscOptionsEnd();
527   // view
528   PetscCall(DMViewFromOptions(ctx->plex[g_target], NULL, "-ex30_dm_view"));
529   if (ctx->num_grids > g_target + 1) { PetscCall(DMViewFromOptions(ctx->plex[g_target + 1], NULL, "-ex30_dm_view2")); }
530   // create mesh mass matrices
531   PetscCall(VecZeroEntries(X));
532   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); // just to duplicate
533   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {               // add same particels for all grids
534     Vec          subX = globXArray[LAND_PACK_IDX(0, grid)];
535     DM           dm   = ctx->plex[grid];
536     PetscSection s;
537     grid_dm[grid] = dm;
538     PetscCall(DMCreateMassMatrix(dm, dm, &g_Mass[grid]));
539     //
540     PetscCall(DMGetLocalSection(dm, &s));
541     PetscCall(DMPlexCreateClosureIndex(dm, s));
542     for (PetscInt tid = 0; tid < numthreads; tid++) {
543       PC pc;
544       PetscCall(VecDuplicate(subX, &t_fhat[grid][tid]));
545       PetscCall(KSPCreate(PETSC_COMM_SELF, &t_ksp[grid][tid]));
546       PetscCall(KSPSetType(t_ksp[grid][tid], KSPCG));
547       PetscCall(KSPGetPC(t_ksp[grid][tid], &pc));
548       PetscCall(PCSetType(pc, PCJACOBI));
549       PetscCall(KSPSetOptionsPrefix(t_ksp[grid][tid], "ptof_"));
550       PetscCall(KSPSetOperators(t_ksp[grid][tid], g_Mass[grid], g_Mass[grid]));
551       PetscCall(KSPSetFromOptions(t_ksp[grid][tid]));
552     }
553   }
554   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
555   PetscCall(TSGetTimeStep(ts, &dt_init)); // we could have an adaptive time stepper
556   // loop over all vertices in chucks that are batched for TSSolve
557   for (PetscInt i = 0; i < 5; i++) moments_0[i] = moments_1a[i] = moments_1b[i] = 0;
558   for (PetscInt global_vertex_id_0 = 0; global_vertex_id_0 < num_vertices; global_vertex_id_0 += ctx->batch_sz, shift /= 2) { // outer vertex loop
559     PetscCall(TSSetTime(ts, 0));
560     PetscCall(TSSetStepNumber(ts, 0));
561     PetscCall(TSSetTimeStep(ts, dt_init));
562     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
563     printCtx->global_vertex_id_0 = global_vertex_id_0;
564     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
565       PetscCall(PetscObjectSetName((PetscObject)globXArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], "rho"));
566       printCtx->print = PETSC_TRUE;
567     } else printCtx->print = PETSC_FALSE;
568     // create fake particles in batches with threads
569     for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
570       PetscReal *xx_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS] /* , radiuses[80000] */;
571       PetscInt   Np_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
572       // make particles
573       for (PetscInt tid = 0; tid < numthreads; tid++) {
574         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
575         if (glb_v_id < num_vertices) {                                                     // the ragged edge (in last batch)
576           PetscInt Npp0 = a_Np + (glb_v_id % (a_Np / 10 + 1)), nTargetP[LANDAU_MAX_GRIDS]; // n of particels in each dim with load imbalance
577           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {                         // add same particels for all grids
578             // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) {
579             const PetscReal kT_m  = ctx->k * ctx->thermal_temps[ctx->species_offset[grid]] / ctx->masses[ctx->species_offset[grid]] / (ctx->v_0 * ctx->v_0);                      /* theta = 2kT/mc^2 per species */
580             PetscReal       lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3], vole; // would be nice to get box from DM
581             PetscInt        Npi = Npp0, Npj = 2 * Npp0, Npk = 1;
582             PetscRandom     rand;
583             PetscReal       sigma = ctx->thermal_speed[grid] / ctx->thermal_speed[0], p2_shift = grid == 0 ? shift : -shift; // symmetric shift of e vs ions
584             PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &rand));
585             PetscCall(PetscRandomSetInterval(rand, 0., 1.));
586             PetscCall(PetscRandomSetFromOptions(rand));
587             if (dim == 2) lo[0] = 0; // Landau coordinate (r,z)
588             else Npi = Npj = Npk = Npp0;
589             // User: use glb_v_id to index into your data
590             const PetscInt NNreal = Npi * Npj * Npk, NN = NNreal + (dim == 2 ? 3 : 6); // make room for bounding box
591             Np_t[grid][tid] = NN;
592             if (glb_v_id == v_target) nTargetP[grid] = NN;
593             PetscCall(PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid]));
594             hp[0] = (hi[0] - lo[0]) / Npi;
595             hp[1] = (hi[1] - lo[1]) / Npj;
596             hp[2] = (hi[2] - lo[2]) / Npk;
597             if (dim == 2) hp[2] = 1;
598             PetscCall(PetscInfo(pack, " lo = %14.7e, hi = %14.7e; hp = %14.7e, %14.7e; kT_m = %g; \n", (double)lo[1], (double)hi[1], (double)hp[0], (double)hp[1], (double)kT_m)); // temp
599             vole = hp[0] * hp[1] * hp[2] * ctx->n[grid];                                                                                                                           // fix for multi-species
600             PetscCall(PetscInfo(pack, "Vertex %" PetscInt_FMT ", grid %" PetscInt_FMT " with %" PetscInt_FMT " particles (diagnostic target = %" PetscInt_FMT ")\n", glb_v_id, grid, NN, v_target));
601             for (PetscInt pj = 0, pp = 0; pj < Npj; pj++) {
602               for (PetscInt pk = 0; pk < Npk; pk++) {
603                 for (PetscInt pi = 0; pi < Npi; pi++, pp++) {
604                   PetscReal p_shift   = p2_shift;
605                   wp_t[grid][tid][pp] = 0;
606                   if (use_uniform_particle_grid) {
607                     xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0];
608                     yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1];
609                     if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
610                     PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]};
611                     p_shift *= ctx->thermal_speed[grid] / ctx->v_0;
612                     if (ctx->sphere && PetscSqrtReal(PetscSqr(xx_t[grid][tid][pp]) + PetscSqr(yy_t[grid][tid][pp])) > 0.92 * hi[0]) {
613                       wp_t[grid][tid][pp] = 0;
614                     } else {
615                       maxwellian(dim, x, kT_m, vole, p_shift, &wp_t[grid][tid][pp]);
616                       if (ctx->num_grids == 1 && shift != 0) {                          // bi-maxwellian, electron plasma
617                         maxwellian(dim, x, kT_m, vole, -p_shift, &wp_t[grid][tid][pp]); // symmetric shift of electron plasma
618                       }
619                     }
620                   } else {
621                     PetscReal u1, u2;
622                     do {
623                       do {
624                         PetscCall(PetscRandomGetValueReal(rand, &u1));
625                       } while (u1 == 0);
626                       PetscCall(PetscRandomGetValueReal(rand, &u2));
627                       //compute z0 and z1
628                       PetscReal mag       = sigma * PetscSqrtReal(-2.0 * PetscLogReal(u1)); // is this the same scale grid Maxwellian? t_therm = sigma
629                       xx_t[grid][tid][pp] = mag * PetscCosReal(2.0 * PETSC_PI * u2);
630                       yy_t[grid][tid][pp] = mag * PetscSinReal(2.0 * PETSC_PI * u2);
631                       if (dim == 2 && xx_t[grid][tid][pp] < lo[0]) xx_t[grid][tid][pp] = -xx_t[grid][tid][pp];
632                       if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
633                       if (!ctx->sphere) {
634                         if (dim == 2 && xx_t[grid][tid][pp] < 0) xx_t[grid][tid][pp] = -xx_t[grid][tid][pp]; // ???
635                         else if (dim == 3) {
636                           while (zz_t[grid][tid][pp] >= hi[2] || zz_t[grid][tid][pp] <= lo[2]) zz_t[grid][tid][pp] *= .9;
637                         }
638                         while (xx_t[grid][tid][pp] >= hi[0] || xx_t[grid][tid][pp] <= lo[0]) xx_t[grid][tid][pp] *= .9;
639                         while (yy_t[grid][tid][pp] >= hi[1] || yy_t[grid][tid][pp] <= lo[1]) yy_t[grid][tid][pp] *= .9;
640                       } else { // 2D
641                         //if (glb_v_id == v_target && pp < 80000) radiuses[pp] = PetscSqrtReal(PetscSqr(xx_t[grid][tid][pp]) + PetscSqr(yy_t[grid][tid][pp]));
642                         while (PetscSqrtReal(PetscSqr(xx_t[grid][tid][pp]) + PetscSqr(yy_t[grid][tid][pp])) > 0.92 * hi[0]) { // safety factor for facets of sphere
643                           xx_t[grid][tid][pp] *= .9;
644                           yy_t[grid][tid][pp] *= .9;
645                         }
646                       }
647                       if (ctx->num_grids == 1 && pp % 2 == 0) p_shift = 0; // one species, split bi-max
648                       p_shift *= ctx->thermal_speed[grid] / ctx->v_0;
649                       if (dim == 3) zz_t[grid][tid][pp] += p_shift;
650                       else yy_t[grid][tid][pp] += p_shift;
651                       wp_t[grid][tid][pp] += ctx->n[grid] / NNreal * PetscSqrtReal(ctx->masses[ctx->species_offset[grid]] / ctx->masses[0]);
652                       if (p_shift <= 0) break; // add bi-max for electron plasma only
653                       p_shift = -p_shift;
654                     } while (ctx->num_grids == 1); // add bi-max for electron plasma only
655                   }
656                   {
657                     if (glb_v_id == v_target) {
658                       PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]};
659                       PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * x[0] : 1, w = fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
660                       for (PetscInt i = 0; i < dim; ++i) v2 += PetscSqr(x[i]);
661                       moments_0[0] += w;                   // not thread safe
662                       moments_0[1] += w * ctx->v_0 * x[1]; // z-momentum
663                       moments_0[2] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
664                     }
665                   }
666                 }
667               }
668             }
669             if (dim == 2) { // fix bounding box
670               PetscInt pp           = NNreal;
671               wp_t[grid][tid][pp]   = 0;
672               xx_t[grid][tid][pp]   = 1.e-7;
673               yy_t[grid][tid][pp++] = hi[1] - 5.e-7;
674               wp_t[grid][tid][pp]   = 0;
675               xx_t[grid][tid][pp]   = hi[0] - 5.e-7;
676               yy_t[grid][tid][pp++] = 0;
677               wp_t[grid][tid][pp]   = 0;
678               xx_t[grid][tid][pp]   = 1.e-7;
679               yy_t[grid][tid][pp++] = lo[1] + 5.e-7;
680             } else {
681               const PetscInt p0 = NNreal;
682               for (PetscInt pj = 0; pj < 6; pj++) { xx_t[grid][tid][p0 + pj] = yy_t[grid][tid][p0 + pj] = zz_t[grid][tid][p0 + pj] = wp_t[grid][tid][p0 + pj] = 0; }
683               xx_t[grid][tid][p0 + 0] = lo[0];
684               xx_t[grid][tid][p0 + 1] = hi[0];
685               yy_t[grid][tid][p0 + 2] = lo[1];
686               yy_t[grid][tid][p0 + 3] = hi[1];
687               zz_t[grid][tid][p0 + 4] = lo[2];
688               zz_t[grid][tid][p0 + 5] = hi[2];
689             }
690             PetscCall(PetscRandomDestroy(&rand));
691           }
692           // entropy init, need global n
693           if (glb_v_id == v_target) {
694             const PetscReal N_inv = 1 / moments_0[0];
695             PetscCall(PetscInfo(pack, "Target %" PetscInt_FMT " with %" PetscInt_FMT " particels\n", glb_v_id, nTargetP[0]));
696             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
697               const PetscInt NN = nTargetP[grid];
698               for (PetscInt pp = 0; pp < NN; pp++) {
699                 const PetscReal fact = dim == 2 ? 2.0 * PETSC_PI * xx_t[grid][tid][pp] : 1, w = fact * ctx->n_0 * ctx->masses[ctx->species_offset[grid]] * wp_t[grid][tid][pp], ww = w * N_inv;
700                 if (w > PETSC_REAL_MIN) {
701                   moments_0[3] -= ww * PetscLogReal(ww);
702                   PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "ww (%g) > 1", (double)ww);
703                 } else moments_0[4] -= w;
704               }
705             } // grid
706           } // target
707         } // active
708       } // threads
709       /* Create particle swarm */
710       for (PetscInt tid = 0; tid < numthreads; tid++) {
711         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
712         if (glb_v_id < num_vertices) {                             // the ragged edge of the last batch
713           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
714             PetscSection section;
715             PetscInt     Nf;
716             DM           dm = grid_dm[grid];
717             PetscCall(DMGetLocalSection(dm, &section));
718             PetscCall(PetscSectionGetNumFields(section, &Nf));
719             PetscCheck(Nf == 1, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only one species per grid supported -- todo");
720             PetscCall(DMViewFromOptions(dm, NULL, "-dm_view"));
721             PetscCall(PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local block index %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid)));
722             PetscCall(createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(v_id, grid)]));
723           }
724         } // active
725       } // threads
726       PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Only support one species per grid");
727       // make globMpArray
728       PetscPragmaOMP(parallel for)
729       for (PetscInt tid = 0; tid < numthreads; tid++) {
730         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
731         if (glb_v_id < num_vertices) {
732           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
733             // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) -- loop over species for Nf > 1 -- TODO
734             PetscErrorCode ierr_t;
735             DM             sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
736             ierr_t            = PetscInfo(pack, "makeSwarm %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
737             ierr_t            = makeSwarm(sw, dim, Np_t[grid][tid], xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid]);
738             if (ierr_t) ierr = ierr_t;
739           }
740         }
741       }
742       for (PetscInt tid = 0; tid < numthreads; tid++) {
743         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
744         if (glb_v_id < num_vertices) {
745           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
746             DM dm = grid_dm[grid];
747             DM sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
748             PetscCall(PetscInfo(pack, "createMp %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid)));
749             PetscCall(createMp(dm, sw, &globMpArray[LAND_PACK_IDX(v_id, grid)]));
750             PetscCall(MatViewFromOptions(globMpArray[LAND_PACK_IDX(v_id, grid)], NULL, "-mp_mat_view"));
751           }
752         }
753       }
754       // p --> g: set X
755       // PetscPragmaOMP(parallel for)
756       for (PetscInt tid = 0; tid < numthreads; tid++) {
757         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
758         if (glb_v_id < num_vertices) {
759           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
760             PetscErrorCode ierr_t;
761             DM             dm   = grid_dm[grid];
762             DM             sw   = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
763             Vec            subX = globXArray[LAND_PACK_IDX(v_id, grid)], work = t_fhat[grid][tid];
764             ierr_t = PetscInfo(pack, "particlesToGrid %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
765             ierr_t = particlesToGrid(dm, sw, tid, dim, wp_t[grid][tid], subX, globMpArray[LAND_PACK_IDX(v_id, grid)]);
766             if (ierr_t) ierr = ierr_t;
767             // u = M^_1 f_w
768             ierr_t = VecCopy(subX, work);
769             ierr_t = KSPSolve(t_ksp[grid][tid], work, subX);
770             if (ierr_t) ierr = ierr_t;
771           }
772         }
773       }
774       PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in OMP loop. ierr = %d", (int)ierr);
775       /* Cleanup */
776       for (PetscInt tid = 0; tid < numthreads; tid++) {
777         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
778         if (glb_v_id < num_vertices) {
779           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
780             PetscCall(PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid]));
781           }
782         } // active
783       } // threads
784     } // (fake) particle loop
785     // standard view of initial conditions
786     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
787       PetscCall(DMSetOutputSequenceNumber(ctx->plex[g_target], 0, 0.0));
788       PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], NULL, "-ex30_vec_view"));
789       if (ctx->num_grids > g_target + 1) {
790         PetscCall(DMSetOutputSequenceNumber(ctx->plex[g_target + 1], 0, 0.0));
791         PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target + 1)], NULL, "-ex30_vec_view2"));
792       }
793       PetscCall(MatViewFromOptions(globMpArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], NULL, "-ex30_mass_mat_view"));
794       PetscCall(DMViewFromOptions(globSwarmArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], NULL, "-ex30_sw_view"));
795       PetscCall(DMSwarmViewXDMF(globSwarmArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], "initial_swarm.xmf")); // writes a file by default!!!
796     }
797     // coarse graining moments_1a, bring f back from grid before advance
798     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz && printCtx->print_entropy) {
799       const PetscInt v_id = v_target % ctx->batch_sz;
800       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
801         PetscDataType dtype;
802         PetscReal    *wp, *coords;
803         DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
804         Vec           work, subX = globXArray[LAND_PACK_IDX(v_id, grid)];
805         PetscInt      bs, NN;
806         // C-G moments
807         PetscCall(VecDuplicate(subX, &work));
808         PetscCall(gridToParticles(grid_dm[grid], sw, subX, work, globMpArray[LAND_PACK_IDX(v_id, grid)], g_Mass[grid]));
809         PetscCall(VecDestroy(&work));
810         // moments
811         PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
812         PetscCall(DMSwarmGetLocalSize(sw, &NN));
813         PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp));
814         for (PetscInt pp = 0; pp < NN; pp++) {
815           PetscReal v2 = 0, fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
816           for (PetscInt i = 0; i < dim; ++i) v2 += PetscSqr(coords[pp * dim + i]);
817           moments_1a[0] += w;
818           moments_1a[1] += w * ctx->v_0 * coords[pp * dim + 1]; // z-momentum
819           moments_1a[2] += w * 0.5 * ctx->v_0 * ctx->v_0 * v2;
820         }
821         PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
822         PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
823       }
824       // entropy
825       const PetscReal N_inv = 1 / moments_1a[0];
826       PetscCall(PetscInfo(pack, "Entropy batch %" PetscInt_FMT " of %" PetscInt_FMT ", n = %g\n", v_target, num_vertices, (double)(1 / N_inv)));
827       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
828         PetscDataType dtype;
829         PetscReal    *wp, *coords;
830         DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
831         PetscInt      bs, NN;
832         PetscCall(DMSwarmGetLocalSize(sw, &NN));
833         PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp));
834         PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
835         for (PetscInt pp = 0; pp < NN; pp++) {
836           PetscReal fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]], ww = w * N_inv;
837           if (w > PETSC_REAL_MIN) {
838             moments_1a[3] -= ww * PetscLogReal(ww);
839             PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "ww (%g) > 1", (double)ww);
840           } else moments_1a[4] -= w;
841         }
842         PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
843         PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
844       }
845     }
846     // restore vector
847     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
848     // view initial grid
849     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) { PetscCall(DMPlexLandauPrintNorms(X, 0)); }
850     // advance
851     PetscCall(TSSetSolution(ts, X));
852     PetscCall(PetscInfo(pack, "Advance vertex %" PetscInt_FMT " to %" PetscInt_FMT "\n", global_vertex_id_0, global_vertex_id_0 + ctx->batch_sz));
853     PetscCall(TSSetPostStep(ts, PostStep));
854     PetscCall(PostStep(ts));
855     PetscCall(TSSolve(ts, X));
856     // view
857     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
858     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
859       /* Visualize original particle field */
860       DM  sw = globSwarmArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)];
861       Vec f;
862       PetscCall(DMSetOutputSequenceNumber(sw, 0, 0.0));
863       PetscCall(DMViewFromOptions(grid_dm[g_target], NULL, "-weights_dm_view"));
864       PetscCall(DMViewFromOptions(sw, NULL, "-weights_sw_view"));
865       PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &f));
866       PetscCall(PetscObjectSetName((PetscObject)f, "weights"));
867       PetscCall(VecViewFromOptions(f, NULL, "-weights_vec_view"));
868       PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &f));
869       //
870       PetscCall(DMPlexLandauPrintNorms(X, 1));
871     }
872     if (!use_uniform_particle_grid) { // resample to uniform grid
873       for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
874         PetscReal *xx_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
875         PetscInt   Np_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
876         for (PetscInt tid = 0; tid < numthreads; tid++) {
877           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
878           if (glb_v_id < num_vertices) {
879             // create uniform grid w/o weights & smaller
880             PetscInt Npp0 = (a_Np + (glb_v_id % (a_Np / 10 + 1))) / 2, Nv; // 1/2 of uniform particle grid size
881             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
882               // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++)
883               PetscReal lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3];
884               PetscInt  Npi = Npp0, Npj = 2 * Npp0, Npk = 1, NN;
885               // delete old particles and particle mass matrix
886               PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(v_id, grid)]));
887               PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(v_id, grid)]));
888               // create fake particles in batches with threads
889               PetscCall(MatGetLocalSize(g_Mass[grid], &Nv, NULL));
890               if (dim == 2) lo[0] = 0;
891               else Npi = Npj = Npk = Npp0;
892               NN = Npi * Npj * Npk + (dim == 2 ? 3 : 6); // make a regular grid of particles Npp x Npp
893               while (Npi * Npj * Npk < Nv) {             // make stable - no LS
894                 Npi++;
895                 Npj++;
896                 Npk++;
897                 NN = Npi * Npj * Npk + (dim == 2 ? 3 : 6);
898               }
899               Np_t[grid][tid] = NN;
900               PetscCall(PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid]));
901               hp[0] = (hi[0] - lo[0]) / Npi;
902               hp[1] = (hi[1] - lo[1]) / Npj;
903               hp[2] = (hi[2] - lo[2]) / Npk;
904               if (dim == 2) hp[2] = 1;
905               PetscCall(PetscInfo(pack, "Resampling %d particles, %d vertices\n", (int)NN, (int)Nv)); // temp
906               for (PetscInt pj = 0, pp = 0; pj < Npj; pj++) {
907                 for (PetscInt pk = 0; pk < Npk; pk++) {
908                   for (PetscInt pi = 0; pi < Npi; pi++, pp++) {
909                     wp_t[grid][tid][pp] = 0;
910                     xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0];
911                     yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1];
912                     if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
913                   }
914                 }
915               }
916               if (dim == 2) { // fix bounding box
917                 PetscInt pp           = NN - 3;
918                 wp_t[grid][tid][pp]   = 0;
919                 xx_t[grid][tid][pp]   = 1.e-7;
920                 yy_t[grid][tid][pp++] = hi[1] - 5.e-7;
921                 wp_t[grid][tid][pp]   = 0;
922                 xx_t[grid][tid][pp]   = hi[0] - 5.e-7;
923                 yy_t[grid][tid][pp++] = 0;
924                 wp_t[grid][tid][pp]   = 0;
925                 xx_t[grid][tid][pp]   = 1.e-7;
926                 yy_t[grid][tid][pp++] = lo[1] + 5.e-7;
927               } else {
928                 const PetscInt p0 = NN - 6;
929                 for (PetscInt pj = 0; pj < 6; pj++) { xx_t[grid][tid][p0 + pj] = yy_t[grid][tid][p0 + pj] = zz_t[grid][tid][p0 + pj] = wp_t[grid][tid][p0 + pj] = 0; }
930                 xx_t[grid][tid][p0 + 0] = lo[0];
931                 xx_t[grid][tid][p0 + 1] = hi[0];
932                 yy_t[grid][tid][p0 + 2] = lo[1];
933                 yy_t[grid][tid][p0 + 3] = hi[1];
934                 zz_t[grid][tid][p0 + 4] = lo[2];
935                 zz_t[grid][tid][p0 + 5] = hi[2];
936               }
937             }
938           } // active
939         } // threads
940         /* Create particle swarm */
941         for (PetscInt tid = 0; tid < numthreads; tid++) {
942           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
943           if (glb_v_id < num_vertices) {                             // the ragged edge of the last batch
944             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
945               // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) -- loop over species for Nf > 1 -- TODO
946               PetscErrorCode ierr_t;
947               PetscSection   section;
948               PetscInt       Nf;
949               DM             dm = grid_dm[grid];
950               ierr_t            = DMGetLocalSection(dm, &section);
951               ierr_t            = PetscSectionGetNumFields(section, &Nf);
952               if (Nf != 1) ierr_t = (PetscErrorCode)9999;
953               else {
954                 ierr_t = DMViewFromOptions(dm, NULL, "-dm_view");
955                 ierr_t = PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local block index %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
956                 ierr_t = createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(v_id, grid)]);
957               }
958               if (ierr_t) ierr = ierr_t;
959             }
960           } // active
961         } // threads
962         PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only support one species per grid");
963         PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Error in OMP loop. ierr = %d", (int)ierr);
964         // make globMpArray
965         PetscPragmaOMP(parallel for)
966         for (PetscInt tid = 0; tid < numthreads; tid++) {
967           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
968           if (glb_v_id < num_vertices) {
969             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
970               // for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) -- loop over species for Nf > 1 -- TODO
971               PetscErrorCode ierr_t;
972               DM             sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
973               ierr_t            = PetscInfo(pack, "makeSwarm %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
974               ierr_t            = makeSwarm(sw, dim, Np_t[grid][tid], xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid]);
975               if (ierr_t) ierr = ierr_t;
976             }
977           } // active
978         } // threads
979         // create particle mass matrices
980         //PetscPragmaOMP(parallel for)
981         for (PetscInt tid = 0; tid < numthreads; tid++) {
982           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
983           if (glb_v_id < num_vertices) {
984             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
985               PetscErrorCode ierr_t;
986               DM             dm = grid_dm[grid];
987               DM             sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
988               ierr_t            = PetscInfo(pack, "createMp %" PetscInt_FMT ".%" PetscInt_FMT ") for block %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
989               ierr_t            = createMp(dm, sw, &globMpArray[LAND_PACK_IDX(v_id, grid)]);
990               if (ierr_t) ierr = ierr_t;
991             }
992           } // active
993         } // threads
994         PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in OMP loop. ierr = %d", (int)ierr);
995         /* Cleanup */
996         for (PetscInt tid = 0; tid < numthreads; tid++) {
997           const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
998           if (glb_v_id < num_vertices) {
999             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
1000               PetscCall(PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid]));
1001             }
1002           } // active
1003         } // threads
1004       } // batch
1005       // view
1006       if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
1007         /* Visualize particle field */
1008         DM  sw = globSwarmArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)];
1009         Vec f;
1010         PetscCall(DMSetOutputSequenceNumber(sw, 0, 0.0));
1011         PetscCall(DMViewFromOptions(sw, NULL, "-resampled_weights_sw_view"));
1012         PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &f));
1013         PetscCall(PetscObjectSetName((PetscObject)f, "resampled_weights"));
1014         PetscCall(VecViewFromOptions(f, NULL, "-resampled_weights_vec_view"));
1015         PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &f));
1016         PetscCall(DMSwarmViewXDMF(sw, "resampled.xmf"));
1017       }
1018     } // !uniform
1019     // particles to grid, compute moments and entropy, for target vertex only
1020     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz && printCtx->print_entropy) {
1021       PetscReal energy_error_rel;
1022       PetscCall(gridToParticles_private(grid_dm, globSwarmArray, dim, v_target, numthreads, num_vertices, global_vertex_id_0, globMpArray, g_Mass, t_fhat, moments_1b, globXArray, ctx));
1023       energy_error_rel = PetscAbsReal(moments_1b[2] - moments_0[2]) / moments_0[2];
1024       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Particle Moments:\t number density      momentum (par)     energy             entropy            negative weights  : # OMP threads %g\n", (double)numthreads));
1025       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\tInitial:         %18.12e %19.12e %18.12e %18.12e %g %%\n", (double)moments_0[0], (double)moments_0[1], (double)moments_0[2], (double)moments_0[3], 100 * (double)(moments_0[4] / moments_0[0])));
1026       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\tCoarse-graining: %18.12e %19.12e %18.12e %18.12e %g %%\n", (double)moments_1a[0], (double)moments_1a[1], (double)moments_1a[2], (double)moments_1a[3], 100 * (double)(moments_1a[4] / moments_0[0])));
1027       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\tLandau:          %18.12e %19.12e %18.12e %18.12e %g %%\n", (double)moments_1b[0], (double)moments_1b[1], (double)moments_1b[2], (double)moments_1b[3], 100 * (double)(moments_1b[4] / moments_0[0])));
1028       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Coarse-graining entropy generation = %e ; Landau entropy generation = %e\n", (double)(moments_1a[3] - moments_0[3]), (double)(moments_1b[3] - moments_0[3])));
1029       PetscCall(PetscPrintf(PETSC_COMM_WORLD, "(relative) energy conservation: Coarse-graining = %e, Landau = %e (%g %d)\n", (double)(moments_1a[2] - moments_0[2]) / (double)moments_0[2], (double)energy_error_rel, (double)PetscLog10Real(energy_error_rel), (int)(PetscLog10Real(energy_error_rel) + .5)));
1030     }
1031     // restore vector
1032     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
1033     // cleanup
1034     for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
1035       for (PetscInt tid = 0; tid < numthreads; tid++) {
1036         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
1037         if (glb_v_id < num_vertices) {
1038           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1039             PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(v_id, grid)]));
1040             PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(v_id, grid)]));
1041           }
1042         }
1043       }
1044     }
1045   } // user batch, not used
1046   /* Cleanup */
1047   PetscCall(PetscFree(globXArray));
1048   PetscCall(PetscFree(globSwarmArray));
1049   PetscCall(PetscFree(globMpArray));
1050   PetscCall(PetscFree(printCtx));
1051   // clean up mass matrices
1052   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
1053     PetscCall(MatDestroy(&g_Mass[grid]));
1054     for (PetscInt tid = 0; tid < numthreads; tid++) {
1055       PetscCall(VecDestroy(&t_fhat[grid][tid]));
1056       PetscCall(KSPDestroy(&t_ksp[grid][tid]));
1057     }
1058   }
1059   PetscFunctionReturn(PETSC_SUCCESS);
1060 }
1061 
1062 int main(int argc, char **argv)
1063 {
1064   DM         pack;
1065   Vec        X;
1066   PetscInt   dim = 2, num_vertices = 1, Np = 10, v_target = 0, g_target = 0;
1067   TS         ts;
1068   Mat        J;
1069   LandauCtx *ctx;
1070   PetscReal  shift                     = 0;
1071   PetscBool  use_uniform_particle_grid = PETSC_TRUE;
1072 
1073   PetscFunctionBeginUser;
1074   PetscCall(PetscInitialize(&argc, &argv, NULL, help));
1075   // process args
1076   PetscOptionsBegin(PETSC_COMM_SELF, "", "Collision Options", "DMPLEX");
1077   PetscCall(PetscOptionsInt("-dim", "Velocity space dimension", "ex30.c", dim, &dim, NULL));
1078   PetscCall(PetscOptionsInt("-number_spatial_vertices", "Number of user spatial vertices to be batched for Landau", "ex30.c", num_vertices, &num_vertices, NULL));
1079   PetscCall(PetscOptionsInt("-number_particles_per_dimension", "Number of particles per grid, with slight modification per spatial vertex, in each dimension of base Cartesian grid", "ex30.c", Np, &Np, NULL));
1080   PetscCall(PetscOptionsBool("-use_uniform_particle_grid", "Use uniform particle grid", "ex30.c", use_uniform_particle_grid, &use_uniform_particle_grid, NULL));
1081   PetscCall(PetscOptionsInt("-vertex_view_target", "Global vertex for diagnostics", "ex30.c", v_target, &v_target, NULL));
1082   PetscCall(PetscOptionsReal("-e_shift", "Bi-Maxwellian shift", "ex30.c", shift, &shift, NULL));
1083   PetscCheck(v_target < num_vertices, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Batch to view %" PetscInt_FMT " should be < number of vertices %" PetscInt_FMT, v_target, num_vertices);
1084   PetscCall(PetscOptionsInt("-grid_view_target", "Grid to view with diagnostics", "ex30.c", g_target, &g_target, NULL));
1085   PetscOptionsEnd();
1086   /* Create a mesh */
1087   PetscCall(DMPlexLandauCreateVelocitySpace(PETSC_COMM_SELF, dim, "", &X, &J, &pack));
1088   PetscCall(DMGetApplicationContext(pack, &ctx));
1089   PetscCall(DMSetUp(pack));
1090   PetscCall(DMSetOutputSequenceNumber(pack, 0, 0.0));
1091   PetscCheck(g_target < ctx->num_grids, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Grid to view %" PetscInt_FMT " should be < number of grids %" PetscInt_FMT, g_target, ctx->num_grids);
1092   PetscCheck(ctx->batch_view_idx == v_target % ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Global view index %" PetscInt_FMT " mode batch size %" PetscInt_FMT " != ctx->batch_view_idx %" PetscInt_FMT, v_target, ctx->batch_sz, ctx->batch_view_idx);
1093   // PetscCheck(!use_uniform_particle_grid || !ctx->sphere, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Can not use -use_uniform_particle_grid and -dm_landau_sphere");
1094   /* Create timestepping solver context */
1095   PetscCall(TSCreate(PETSC_COMM_SELF, &ts));
1096   PetscCall(TSSetDM(ts, pack));
1097   PetscCall(TSSetIFunction(ts, NULL, DMPlexLandauIFunction, NULL));
1098   PetscCall(TSSetIJacobian(ts, J, J, DMPlexLandauIJacobian, NULL));
1099   PetscCall(TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER));
1100   PetscCall(TSSetFromOptions(ts));
1101   PetscCall(PetscObjectSetName((PetscObject)X, "X"));
1102   // do particle advance
1103   PetscCall(go(ts, X, num_vertices, Np, dim, v_target, g_target, shift, use_uniform_particle_grid));
1104   PetscCall(MatZeroEntries(J)); // need to zero out so as to not reuse it in Landau's logic
1105   /* clean up */
1106   PetscCall(DMPlexLandauDestroyVelocitySpace(&pack));
1107   PetscCall(TSDestroy(&ts));
1108   PetscCall(VecDestroy(&X));
1109   PetscCall(PetscFinalize());
1110   return 0;
1111 }
1112 
1113 /*TEST
1114 
1115   build:
1116     requires: !complex
1117 
1118   testset:
1119     requires: double defined(PETSC_USE_DMLANDAU_2D)
1120     output_file: output/ex30_0.out
1121     args: -dim 2 -petscspace_degree 3 -dm_landau_num_species_grid 1,1,1 -dm_refine 1 -number_particles_per_dimension 20 \
1122           -dm_landau_batch_size 4 -number_spatial_vertices 6 -vertex_view_target 5 -grid_view_target 1 -dm_landau_batch_view_idx 1 \
1123           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
1124           -ftop_ksp_rtol 1e-10 -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_factor_shift_type nonzero -ftop_sub_pc_type lu -ftop_ksp_error_if_not_converged \
1125           -ksp_type gmres -ksp_error_if_not_converged -dm_landau_verbose 4 -print_entropy \
1126           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_rtol 1e-12 -ptof_ksp_error_if_not_converged\
1127           -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12 \
1128           -ts_dt 0.01 -ts_rtol 1e-1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler
1129     test:
1130       suffix: cpu
1131       args: -dm_landau_device_type cpu -pc_type jacobi
1132     test:
1133       suffix: kokkos
1134       # failed on Sunspot@ALCF with sycl
1135       requires: kokkos_kernels !openmp !sycl
1136       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos -pc_type bjkokkos -pc_bjkokkos_ksp_type tfqmr -pc_bjkokkos_pc_type jacobi
1137 
1138   testset:
1139     requires: double !defined(PETSC_USE_DMLANDAU_2D)
1140     output_file: output/ex30_3d.out
1141     args: -dim 3 -petscspace_degree 2 -dm_landau_num_species_grid 1,1 -dm_refine 0 -number_particles_per_dimension 10 -dm_plex_hash_location \
1142           -dm_landau_batch_size 1 -number_spatial_vertices 1 -vertex_view_target 0 -grid_view_target 0 -dm_landau_batch_view_idx 0 \
1143           -dm_landau_n 1.000018,1 -dm_landau_thermal_temps 2,1 -dm_landau_ion_masses 2 -dm_landau_ion_charges 1 \
1144           -ftop_ksp_type cg -ftop_pc_type jacobi -ftop_ksp_rtol 1e-12 -ftop_ksp_error_if_not_converged -ksp_type preonly -pc_type lu -ksp_error_if_not_converged \
1145           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_rtol 1e-12 -ptof_ksp_error_if_not_converged \
1146           -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12 \
1147           -ts_dt 0.1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -print_entropy
1148     test:
1149       suffix: cpu_3d
1150       args: -dm_landau_device_type cpu
1151     test:
1152       suffix: kokkos_3d
1153       requires: kokkos_kernels !openmp
1154       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos -pc_type bjkokkos -pc_bjkokkos_ksp_type tfqmr -pc_bjkokkos_pc_type jacobi
1155 
1156   test:
1157     suffix: conserve
1158     requires: !complex double defined(PETSC_USE_DMLANDAU_2D) !cuda
1159     args: -dm_landau_batch_size 4 -dm_refine 0 -dm_landau_num_species_grid 1 -dm_landau_thermal_temps 1 -petscspace_degree 3 -snes_converged_reason -ts_type beuler -ts_dt .1 \
1160           -ts_max_steps 1 -ksp_type preonly -ksp_error_if_not_converged -snes_rtol 1e-14 -snes_stol 1e-14 -dm_landau_device_type cpu -number_particles_per_dimension 20 \
1161           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_rtol 1e-14 -ptof_ksp_error_if_not_converged -pc_type lu -dm_landau_simplex 1 -use_uniform_particle_grid false -dm_landau_sphere -print_entropy -number_particles_per_dimension 50 -ftop_ksp_type cg -ftop_pc_type jacobi -ftop_ksp_rtol 1e-14
1162 
1163 TEST*/
1164