xref: /petsc/src/ts/tests/ex30.c (revision 5f3c5e7ab1713b2b36ec2007ece43899b4f0dcb3)
1 static char help[] = "Grid based Landau collision operator with PIC interface with OpenMP setup. (one species per grid)\n";
2 
3 /*
4    Support 2.5V with axisymmetric coordinates
5      - r,z coordinates
6      - Domain and species data input by Landau operator
7      - "radius" for each grid, normalized with electron thermal velocity
8      - Domain: (0,radius) x (-radius,radius), thus first coordinate x[0] is perpendicular velocity and 2pi*x[0] term is added for axisymmetric
9    Supports full 3V
10 
11  */
12 
13 #include "petscdmplex.h"
14 #include "petscds.h"
15 #include "petscdmswarm.h"
16 #include "petscksp.h"
17 #include <petsc/private/petscimpl.h>
18 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
19   #include <omp.h>
20 #endif
21 #include <petsclandau.h>
22 #include <petscdmcomposite.h>
23 
24 typedef struct {
25   Mat MpTrans;
26   Mat Mp;
27   Vec ff;
28   Vec uu;
29 } MatShellCtx;
30 
31 PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy)
32 {
33   MatShellCtx *matshellctx;
34 
35   PetscFunctionBeginUser;
36   PetscCall(MatShellGetContext(MtM, &matshellctx));
37   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
38   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
39   PetscCall(MatMult(matshellctx->MpTrans, matshellctx->ff, yy));
40   PetscFunctionReturn(PETSC_SUCCESS);
41 }
42 
43 PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy, Vec zz)
44 {
45   MatShellCtx *matshellctx;
46 
47   PetscFunctionBeginUser;
48   PetscCall(MatShellGetContext(MtM, &matshellctx));
49   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
50   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
51   PetscCall(MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz));
52   PetscFunctionReturn(PETSC_SUCCESS);
53 }
54 
55 PetscErrorCode createSwarm(const DM dm, PetscInt dim, DM *sw)
56 {
57   PetscInt Nc = 1;
58 
59   PetscFunctionBeginUser;
60   PetscCall(DMCreate(PETSC_COMM_SELF, sw));
61   PetscCall(DMSetType(*sw, DMSWARM));
62   PetscCall(DMSetDimension(*sw, dim));
63   PetscCall(DMSwarmSetType(*sw, DMSWARM_PIC));
64   PetscCall(DMSwarmSetCellDM(*sw, dm));
65   PetscCall(DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_SCALAR));
66   PetscCall(DMSwarmFinalizeFieldRegister(*sw));
67   PetscCall(DMSetFromOptions(*sw));
68   PetscFunctionReturn(PETSC_SUCCESS);
69 }
70 
71 PetscErrorCode gridToParticles(const DM dm, DM sw, Vec rhs, Vec work, Mat M_p, Mat Mass)
72 {
73   PetscBool    is_lsqr;
74   KSP          ksp;
75   Mat          PM_p = NULL, MtM, D;
76   Vec          ff;
77   PetscInt     N, M, nzl;
78   MatShellCtx *matshellctx;
79 
80   PetscFunctionBeginUser;
81   PetscCall(MatMult(Mass, rhs, work));
82   PetscCall(VecCopy(work, rhs));
83   // pseudo-inverse
84   PetscCall(KSPCreate(PETSC_COMM_SELF, &ksp));
85   PetscCall(KSPSetOptionsPrefix(ksp, "ftop_"));
86   PetscCall(KSPSetFromOptions(ksp));
87   PetscCall(PetscObjectTypeCompare((PetscObject)ksp, KSPLSQR, &is_lsqr));
88   if (!is_lsqr) {
89     PetscCall(MatGetLocalSize(M_p, &M, &N));
90     if (N > M) {
91       PC pc;
92       PetscCall(PetscInfo(ksp, " M (%" PetscInt_FMT ") < M (%" PetscInt_FMT ") -- skip revert to lsqr\n", M, N));
93       is_lsqr = PETSC_TRUE;
94       PetscCall(KSPSetType(ksp, KSPLSQR));
95       PetscCall(KSPGetPC(ksp, &pc));
96       PetscCall(PCSetType(pc, PCNONE)); // could put in better solver -ftop_pc_type bjacobi -ftop_sub_pc_type lu -ftop_sub_pc_factor_shift_type nonzero
97     } else {
98       PetscCall(PetscNew(&matshellctx));
99       PetscCall(MatCreateShell(PetscObjectComm((PetscObject)dm), N, N, PETSC_DECIDE, PETSC_DECIDE, matshellctx, &MtM));
100       PetscCall(MatTranspose(M_p, MAT_INITIAL_MATRIX, &matshellctx->MpTrans));
101       matshellctx->Mp = M_p;
102       PetscCall(MatShellSetOperation(MtM, MATOP_MULT, (void (*)(void))MatMultMtM_SeqAIJ));
103       PetscCall(MatShellSetOperation(MtM, MATOP_MULT_ADD, (void (*)(void))MatMultAddMtM_SeqAIJ));
104       PetscCall(MatCreateVecs(M_p, &matshellctx->uu, &matshellctx->ff));
105       PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 1, NULL, &D));
106       PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_Mp_mat_view"));
107       for (int i = 0; i < N; i++) {
108         const PetscScalar *vals;
109         const PetscInt    *cols;
110         PetscScalar        dot = 0;
111         PetscCall(MatGetRow(matshellctx->MpTrans, i, &nzl, &cols, &vals));
112         for (int ii = 0; ii < nzl; ii++) dot += PetscSqr(vals[ii]);
113         PetscCheck(dot != 0.0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Row %d is empty", i);
114         PetscCall(MatSetValue(D, i, i, dot, INSERT_VALUES));
115       }
116       PetscCall(MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY));
117       PetscCall(MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY));
118       PetscCall(PetscInfo(M_p, "createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n", N, nzl));
119       PetscCall(KSPSetOperators(ksp, MtM, D));
120       PetscCall(MatViewFromOptions(D, NULL, "-ftop2_D_mat_view"));
121       PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view"));
122       PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpTranspose_mat_view"));
123       PetscCall(MatViewFromOptions(MtM, NULL, "-ftop2_MtM_mat_view"));
124     }
125   }
126   if (is_lsqr) {
127     PC        pc;
128     PetscBool is_bjac;
129     PetscCall(KSPGetPC(ksp, &pc));
130     PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCBJACOBI, &is_bjac));
131     if (is_bjac) {
132       PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p));
133       PetscCall(KSPSetOperators(ksp, M_p, PM_p));
134     } else {
135       PetscCall(KSPSetOperators(ksp, M_p, M_p));
136     }
137   }
138   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access
139   if (!is_lsqr) {
140     PetscCall(KSPSolve(ksp, rhs, matshellctx->uu));
141     PetscCall(MatMult(M_p, matshellctx->uu, ff));
142     PetscCall(MatDestroy(&matshellctx->MpTrans));
143     PetscCall(VecDestroy(&matshellctx->ff));
144     PetscCall(VecDestroy(&matshellctx->uu));
145     PetscCall(MatDestroy(&D));
146     PetscCall(MatDestroy(&MtM));
147     PetscCall(PetscFree(matshellctx));
148   } else {
149     PetscCall(KSPSolveTranspose(ksp, rhs, ff));
150   }
151   PetscCall(KSPDestroy(&ksp));
152   /* Visualize particle field */
153   PetscCall(VecViewFromOptions(ff, NULL, "-weights_view"));
154   PetscCall(MatDestroy(&PM_p));
155   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
156 
157   PetscFunctionReturn(PETSC_SUCCESS);
158 }
159 
160 PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt Np, const PetscInt a_tid, const PetscInt dim, const PetscReal xx[], const PetscReal yy[], const PetscReal zz[], const PetscReal a_wp[], Vec rho, Mat *Mp_out)
161 {
162   PetscBool     removePoints = PETSC_TRUE;
163   PetscReal    *wq, *coords;
164   PetscDataType dtype;
165   Mat           M_p;
166   Vec           ff;
167   PetscInt      bs, p, zero = 0;
168 
169   PetscFunctionBeginUser;
170   PetscCall(DMSwarmSetLocalSizes(sw, Np, zero));
171   PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq));
172   PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
173   for (p = 0; p < Np; p++) {
174     coords[p * dim + 0] = xx[p];
175     coords[p * dim + 1] = yy[p];
176     wq[p]               = a_wp[p];
177     if (dim == 3) coords[p * dim + 2] = zz[p];
178   }
179   PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
180   PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq));
181   PetscCall(DMSwarmMigrate(sw, removePoints));
182   PetscCall(PetscObjectSetName((PetscObject)sw, "Particle Grid"));
183 
184   /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */
185   PetscCall(DMCreateMassMatrix(sw, dm, &M_p));
186 
187   PetscCall(PetscObjectSetName((PetscObject)rho, "rho"));
188   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff));
189   PetscCall(PetscObjectSetName((PetscObject)ff, "weights"));
190   PetscCall(MatMultTranspose(M_p, ff, rho));
191   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
192 
193   // output
194   *Mp_out = M_p;
195 
196   PetscFunctionReturn(PETSC_SUCCESS);
197 }
198 static void maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscScalar *u)
199 {
200   PetscInt  i;
201   PetscReal v2 = 0, theta = 2.0 * kt_m; /* theta = 2kT/mc^2 */
202 
203   /* compute the exponents, v^2 */
204   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
205   /* evaluate the Maxwellian */
206   u[0] = n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
207 }
208 
209 #define MAX_NUM_THRDS 12
210 PetscErrorCode go(TS ts, Vec X, const PetscInt NUserV, const PetscInt a_Np, const PetscInt dim, const PetscInt b_target, const PetscInt g_target)
211 {
212   DM             pack, *globSwarmArray, grid_dm[LANDAU_MAX_GRIDS];
213   Mat           *globMpArray, g_Mass[LANDAU_MAX_GRIDS];
214   KSP            t_ksp[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
215   Vec            t_fhat[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
216   PetscInt       nDMs, glb_b_id, nTargetP = 0;
217   PetscErrorCode ierr = 0; // used for inside thread loops
218 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
219   PetscInt numthreads = PetscNumOMPThreads;
220 #else
221   PetscInt numthreads = 1;
222 #endif
223   LandauCtx *ctx;
224   Vec       *globXArray;
225   PetscReal  moments_0[3], moments_1[3], dt_init;
226 
227   PetscFunctionBeginUser;
228   PetscCheck(numthreads <= MAX_NUM_THRDS, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Too many threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS);
229   PetscCheck(numthreads > 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS);
230   PetscCall(TSGetDM(ts, &pack));
231   PetscCall(DMGetApplicationContext(pack, &ctx));
232   PetscCheck(ctx->batch_sz % numthreads == 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "batch size (-dm_landau_batch_size) %" PetscInt_FMT "  mod #threads %" PetscInt_FMT " must equal zero", ctx->batch_sz, numthreads);
233   PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
234   PetscCall(PetscInfo(pack, "Have %" PetscInt_FMT " total grids, with %" PetscInt_FMT " Landau local batched and %" PetscInt_FMT " global items (vertices)\n", ctx->num_grids, ctx->batch_sz, NUserV));
235   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
236   PetscCall(PetscMalloc(sizeof(*globMpArray) * nDMs, &globMpArray));
237   PetscCall(PetscMalloc(sizeof(*globSwarmArray) * nDMs, &globSwarmArray));
238   PetscCall(DMViewFromOptions(ctx->plex[g_target], NULL, "-ex30_dm_view"));
239   // create mass matrices
240   PetscCall(VecZeroEntries(X));
241   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); // just to duplicate
242   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {               // add same particels for all grids
243     Vec          subX = globXArray[LAND_PACK_IDX(0, grid)];
244     DM           dm   = ctx->plex[grid];
245     PetscSection s;
246     grid_dm[grid] = dm;
247     PetscCall(DMCreateMassMatrix(dm, dm, &g_Mass[grid]));
248     //
249     PetscCall(DMGetLocalSection(dm, &s));
250     PetscCall(DMPlexCreateClosureIndex(dm, s));
251     for (int tid = 0; tid < numthreads; tid++) {
252       PetscCall(VecDuplicate(subX, &t_fhat[grid][tid]));
253       PetscCall(KSPCreate(PETSC_COMM_SELF, &t_ksp[grid][tid]));
254       PetscCall(KSPSetOptionsPrefix(t_ksp[grid][tid], "ptof_"));
255       PetscCall(KSPSetOperators(t_ksp[grid][tid], g_Mass[grid], g_Mass[grid]));
256       PetscCall(KSPSetFromOptions(t_ksp[grid][tid]));
257     }
258   }
259   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
260   // create particle raw data. could use OMP with a thread safe malloc, but this is just the fake user
261   for (int i = 0; i < 3; i++) moments_0[i] = moments_1[i] = 0;
262   PetscCall(TSGetTimeStep(ts, &dt_init)); // we could have an adaptive time stepper
263   for (PetscInt global_batch_id = 0; global_batch_id < NUserV; global_batch_id += ctx->batch_sz) {
264     PetscCall(TSSetTime(ts, 0));
265     PetscCall(TSSetStepNumber(ts, 0));
266     PetscCall(TSSetTimeStep(ts, dt_init));
267     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
268     if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) PetscCall(PetscObjectSetName((PetscObject)globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], "rho"));
269     // create fake particles
270     for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) {
271       PetscReal *xx_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
272       PetscInt   Np_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
273       // make particles
274       for (int tid = 0; tid < numthreads; tid++) {
275         const PetscInt b_id = b_id_0 + tid;
276         if ((glb_b_id = global_batch_id + b_id) < NUserV) {        // the ragged edge of the last batch
277           PetscInt Npp0 = a_Np + (glb_b_id % a_Np), NN;            // fake user: number of particels in each dimension with add some load imbalance and diff (<2x)
278           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
279             const PetscReal kT_m = ctx->k * ctx->thermal_temps[ctx->species_offset[grid]] / ctx->masses[ctx->species_offset[grid]] / (ctx->v_0 * ctx->v_0); /* theta = 2kT/mc^2 per species -- TODO */
280             ;
281             PetscReal lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3], vole; // would be nice to get box from DM
282             PetscInt  Npi = Npp0, Npj = 2 * Npp0, Npk = 1;
283             if (dim == 2) lo[0] = 0; // Landau coordinate (r,z)
284             else Npi = Npj = Npk = Npp0;
285             // User: use glb_b_id to index into your data
286             NN = Npi * Npj * Npk; // make a regular grid of particles Npp x Npp
287             if (glb_b_id == b_target) {
288               nTargetP = NN;
289               PetscCall(PetscInfo(pack, "Target %" PetscInt_FMT " with %" PetscInt_FMT " particels\n", glb_b_id, NN));
290             }
291             Np_t[grid][tid] = NN;
292             PetscCall(PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid]));
293             hp[0] = (hi[0] - lo[0]) / Npi;
294             hp[1] = (hi[1] - lo[1]) / Npj;
295             hp[2] = (hi[2] - lo[2]) / Npk;
296             if (dim == 2) hp[2] = 1;
297             PetscCall(PetscInfo(pack, " lo = %14.7e, hi = %14.7e; hp = %14.7e, %14.7e; kT_m = %g; \n", (double)lo[1], (double)hi[1], (double)hp[0], (double)hp[1], (double)kT_m)); // temp
298             vole = hp[0] * hp[1] * hp[2] * ctx->n[grid];                                                                                                                           // fix for multi-species
299             PetscCall(PetscInfo(pack, "Vertex %" PetscInt_FMT ", grid %" PetscInt_FMT " with %" PetscInt_FMT " particles (diagnostic target = %" PetscInt_FMT ")\n", glb_b_id, grid, NN, b_target));
300             for (int pj = 0, pp = 0; pj < Npj; pj++) {
301               for (int pk = 0; pk < Npk; pk++) {
302                 for (int pi = 0; pi < Npi; pi++, pp++) {
303                   xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0];
304                   yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1];
305                   if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
306                   {
307                     PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]};
308                     maxwellian(dim, x, kT_m, vole, &wp_t[grid][tid][pp]);
309                     //PetscCall(PetscInfo(pack,"%" PetscInt_FMT ") x = %14.7e, %14.7e, %14.7e, n = %14.7e, w = %14.7e\n", pp, x[0], x[1], dim==2 ? 0 : x[2], ctx->n[grid], wp_t[grid][tid][pp])); // temp
310                     if (glb_b_id == b_target) {
311                       PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * x[0] : 1;
312                       for (int i = 0; i < dim; ++i) v2 += PetscSqr(x[i]);
313                       moments_0[0] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
314                       moments_0[1] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * x[1]; // z-momentum
315                       moments_0[2] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2;
316                     }
317                   }
318                 }
319               }
320             }
321           } // grid
322         }   // active
323       }     // fake threads
324       /* Create particle swarm */
325       PetscPragmaOMP(parallel for)
326       for (int tid = 0; tid < numthreads; tid++) {
327         const PetscInt b_id = b_id_0 + tid;
328         if ((glb_b_id = global_batch_id + b_id) < NUserV) { // the ragged edge of the last batch
329           //PetscCall(PetscInfo(pack,"Create swarms for 'glob' index %" PetscInt_FMT " create swarm\n",glb_b_id));
330           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
331             PetscErrorCode ierr_t;
332             PetscSection   section;
333             PetscInt       Nf;
334             DM             dm = grid_dm[grid];
335             ierr_t            = DMGetLocalSection(dm, &section);
336             ierr_t            = PetscSectionGetNumFields(section, &Nf);
337             if (Nf != 1) ierr_t = 9999;
338             else {
339               ierr_t = DMViewFromOptions(dm, NULL, "-dm_view");
340               ierr_t = PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local batch index %" PetscInt_FMT "\n", b_id, grid, LAND_PACK_IDX(b_id, grid));
341               ierr_t = createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(b_id, grid)]);
342             }
343             if (ierr_t) ierr = ierr_t;
344           }
345         } // active
346       }
347       PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only support one species per grid");
348       PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Error in OMP loop. ierr = %d", (int)ierr);
349       // p --> g: make globMpArray & set X
350       PetscPragmaOMP(parallel for)
351       for (int tid = 0; tid < numthreads; tid++) {
352         const PetscInt b_id = b_id_0 + tid;
353         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
354           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
355             PetscErrorCode ierr_t;
356             DM             dm   = grid_dm[grid];
357             DM             sw   = globSwarmArray[LAND_PACK_IDX(b_id, grid)];
358             Vec            subX = globXArray[LAND_PACK_IDX(b_id, grid)], work = t_fhat[grid][tid];
359             ierr_t = PetscInfo(pack, "particlesToGrid %" PetscInt_FMT ".%" PetscInt_FMT ") particlesToGrid for local batch %" PetscInt_FMT "\n", global_batch_id, grid, LAND_PACK_IDX(b_id, grid));
360             ierr_t = particlesToGrid(dm, sw, Np_t[grid][tid], tid, dim, xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid], wp_t[grid][tid], subX, &globMpArray[LAND_PACK_IDX(b_id, grid)]);
361             if (ierr_t) ierr = ierr_t;
362             // u = M^_1 f_w
363             ierr_t = VecCopy(subX, work);
364             ierr_t = KSPSolve(t_ksp[grid][tid], work, subX);
365             if (ierr_t) ierr = ierr_t;
366           }
367         }
368       }
369       PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Error in OMP loop. ierr = %d", (int)ierr);
370       /* Cleanup */
371       for (int tid = 0; tid < numthreads; tid++) {
372         const PetscInt b_id = b_id_0 + tid;
373         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
374           PetscCall(PetscInfo(pack, "Free for global batch %" PetscInt_FMT " of %" PetscInt_FMT "\n", glb_b_id + 1, NUserV));
375           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
376             PetscCall(PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid]));
377           }
378         } // active
379       }
380     } // batches
381     if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], NULL, "-ex30_vec_view"));
382     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
383     PetscCall(DMPlexLandauPrintNorms(X, 0));
384     // advance
385     PetscCall(TSSetSolution(ts, X));
386     PetscCall(PetscInfo(pack, "Advance vertex %" PetscInt_FMT " to %" PetscInt_FMT " (with padding)\n", global_batch_id, global_batch_id + ctx->batch_sz));
387     PetscCall(TSSolve(ts, X));
388     PetscCall(DMPlexLandauPrintNorms(X, 1));
389     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
390     // map back to particles
391     for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) {
392       PetscCall(PetscInfo(pack, "g2p: global batch %" PetscInt_FMT " of %" PetscInt_FMT ", Landau batch %" PetscInt_FMT " of %" PetscInt_FMT ": map back to particles\n", global_batch_id + 1, NUserV, b_id_0 + 1, ctx->batch_sz));
393       PetscPragmaOMP(parallel for)
394       for (int tid = 0; tid < numthreads; tid++) {
395         const PetscInt b_id = b_id_0 + tid;
396         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
397           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
398             PetscErrorCode ierr_t;
399             ierr_t = PetscInfo(pack, "gridToParticles: global batch %" PetscInt_FMT ", local batch b=%" PetscInt_FMT ", grid g=%" PetscInt_FMT ", index(b,g) %" PetscInt_FMT "\n", global_batch_id, b_id, grid, LAND_PACK_IDX(b_id, grid));
400             ierr_t = gridToParticles(grid_dm[grid], globSwarmArray[LAND_PACK_IDX(b_id, grid)], globXArray[LAND_PACK_IDX(b_id, grid)], t_fhat[grid][tid], globMpArray[LAND_PACK_IDX(b_id, grid)], g_Mass[grid]);
401             if (ierr_t) ierr = ierr_t;
402           }
403         }
404       }
405       PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Error in OMP loop. ierr = %d", (int)ierr);
406       /* Cleanup, and get data */
407       PetscCall(PetscInfo(pack, "Cleanup batches %" PetscInt_FMT " to %" PetscInt_FMT "\n", b_id_0, b_id_0 + numthreads));
408       for (int tid = 0; tid < numthreads; tid++) {
409         const PetscInt b_id = b_id_0 + tid;
410         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
411           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
412             PetscDataType dtype;
413             PetscReal    *wp, *coords;
414             DM            sw = globSwarmArray[LAND_PACK_IDX(b_id, grid)];
415             PetscInt      npoints, bs = 1;
416             PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here
417             if (glb_b_id == b_target) {
418               PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
419               PetscCall(DMSwarmGetLocalSize(sw, &npoints));
420               for (int p = 0; p < npoints; p++) {
421                 PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1;
422                 for (int i = 0; i < dim; ++i) v2 += PetscSqr(coords[p * dim + i]);
423                 moments_1[0] += fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
424                 moments_1[1] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * coords[p * dim + 1]; // z-momentum
425                 moments_1[2] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2;
426               }
427               PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
428             }
429             PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
430             PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(b_id, grid)]));
431             PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(b_id, grid)]));
432           }
433         }
434       }
435     } // thread batch
436     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
437   } // user batch
438   /* Cleanup */
439   PetscCall(PetscFree(globXArray));
440   PetscCall(PetscFree(globSwarmArray));
441   PetscCall(PetscFree(globMpArray));
442   // clean up mass matrices
443   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
444     PetscCall(MatDestroy(&g_Mass[grid]));
445     for (int tid = 0; tid < numthreads; tid++) {
446       PetscCall(VecDestroy(&t_fhat[grid][tid]));
447       PetscCall(KSPDestroy(&t_ksp[grid][tid]));
448     }
449   }
450   {
451     PetscReal relerr = PetscAbsReal((moments_1[2] - moments_0[2]) / moments_0[2]), logEerr = PetscLog10Real(relerr);
452     PetscCall(PetscInfo(X, "Total number density: %20.12e (%20.12e); x-momentum = %20.12e (%20.12e); energy = %20.12e (%20.12e) rerr = %e (log10(rerr) = %e, %" PetscInt_FMT "), %" PetscInt_FMT " particles. Use %" PetscInt_FMT " threads\n", (double)moments_1[0], (double)moments_0[0], (double)moments_1[1], (double)moments_0[1], (double)moments_1[2], (double)moments_0[2], (double)relerr, (double)logEerr, -((-(PetscInt)logEerr + 1) / 2) * 2, nTargetP, numthreads));
453     PetscFunctionReturn(PETSC_SUCCESS);
454   }
455   PetscFunctionReturn(PETSC_SUCCESS);
456 }
457 
458 int main(int argc, char **argv)
459 {
460   DM         pack;
461   Vec        X;
462   PetscInt   dim = 2, nvert = 1, Np = 10, btarget = 0, gtarget = 0;
463   TS         ts;
464   Mat        J;
465   LandauCtx *ctx;
466 
467   PetscFunctionBeginUser;
468   PetscCall(PetscInitialize(&argc, &argv, NULL, help));
469   // process args
470   PetscOptionsBegin(PETSC_COMM_SELF, "", "Collision Options", "DMPLEX");
471   PetscCall(PetscOptionsInt("-number_spatial_vertices", "Number of user spatial vertices to be batched for Landau", "ex30.c", nvert, &nvert, NULL));
472   PetscCall(PetscOptionsInt("-dim", "Velocity space dimension", "ex30.c", dim, &dim, NULL));
473   PetscCall(PetscOptionsInt("-number_particles_per_dimension", "Number of particles per grid, with slight modification per spatial vertex, in each dimension of base Cartesian grid", "ex30.c", Np, &Np, NULL));
474   PetscCall(PetscOptionsInt("-view_vertex_target", "Batch to view with diagnostics", "ex30.c", btarget, &btarget, NULL));
475   PetscCheck(btarget < nvert, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Batch to view %" PetscInt_FMT " should be < number of vertices %" PetscInt_FMT, btarget, nvert);
476   PetscCall(PetscOptionsInt("-view_grid_target", "Grid to view with diagnostics", "ex30.c", gtarget, &gtarget, NULL));
477   PetscOptionsEnd();
478   /* Create a mesh */
479   PetscCall(DMPlexLandauCreateVelocitySpace(PETSC_COMM_SELF, dim, "", &X, &J, &pack));
480   PetscCall(DMSetUp(pack));
481   PetscCall(DMSetOutputSequenceNumber(pack, 0, 0.0));
482   PetscCall(DMGetApplicationContext(pack, &ctx));
483   PetscCheck(gtarget < ctx->num_grids, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Grid to view %" PetscInt_FMT " should be < number of grids %" PetscInt_FMT, gtarget, ctx->num_grids);
484   PetscCheck(nvert >= ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number of vertices %" PetscInt_FMT " should be <= batch size %" PetscInt_FMT, nvert, ctx->batch_sz);
485   /* Create timestepping solver context */
486   PetscCall(TSCreate(PETSC_COMM_SELF, &ts));
487   PetscCall(TSSetDM(ts, pack));
488   PetscCall(TSSetIFunction(ts, NULL, DMPlexLandauIFunction, NULL));
489   PetscCall(TSSetIJacobian(ts, J, J, DMPlexLandauIJacobian, NULL));
490   PetscCall(TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER));
491   PetscCall(TSSetFromOptions(ts));
492   PetscCall(PetscObjectSetName((PetscObject)X, "X"));
493   // do particle advance
494   PetscCall(go(ts, X, nvert, Np, dim, btarget, gtarget));
495   PetscCall(MatZeroEntries(J)); // need to zero out so as to not reuse it in Landau's logic
496   /* clean up */
497   PetscCall(DMPlexLandauDestroyVelocitySpace(&pack));
498   PetscCall(TSDestroy(&ts));
499   PetscCall(VecDestroy(&X));
500   PetscCall(PetscFinalize());
501   return 0;
502 }
503 
504 /*TEST
505 
506   build:
507     requires: !complex p4est
508 
509   testset:
510     requires: double defined(PETSC_USE_DMLANDAU_2D)
511     output_file: output/ex30_0.out
512     filter: sed 's/vec:seqkokkos/vec:seq/g'
513     args: -dim 2 -petscspace_degree 3 -dm_landau_type p4est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
514           -dm_landau_amr_post_refine 1 -number_particles_per_dimension 10 -dm_plex_hash_location \
515           -dm_landau_batch_size 2 -number_spatial_vertices 3 -dm_landau_batch_view_idx 1 -view_vertex_target 2 -view_grid_target 1 \
516           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
517           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-10 -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_factor_shift_type nonzero -ftop_sub_pc_type lu \
518           -ksp_type preonly -pc_type lu -dm_landau_verbose 4 \
519           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\
520           -snes_converged_reason -snes_monitor -snes_rtol 1e-14 -snes_stol 1e-14\
521           -ts_dt 0.01 -ts_rtol 1e-1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec
522 
523     test:
524       suffix: cpu
525       args: -dm_landau_device_type cpu
526     test:
527       suffix: kokkos
528       requires: kokkos_kernels
529       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
530 
531   testset:
532     requires: double !defined(PETSC_USE_DMLANDAU_2D)
533     output_file: output/ex30_3d.out
534     filter: sed 's/vec:seqkokkos/vec:seq/g'
535     args: -dim 3 -petscspace_degree 2 -dm_landau_type p8est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
536           -dm_landau_amr_post_refine 0 -number_particles_per_dimension 5 -dm_plex_hash_location \
537           -dm_landau_batch_size 1 -number_spatial_vertices 1 -dm_landau_batch_view_idx 0 -view_vertex_target 0 -view_grid_target 0 \
538           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
539           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-12 -ftop_ksp_type cg -ftop_pc_type jacobi \
540           -ksp_type preonly -pc_type lu \
541           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-6\
542           -snes_converged_reason -snes_monitor -snes_rtol 1e-9 -snes_stol 1e-9\
543           -ts_dt 0.1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec
544 
545     test:
546       suffix: cpu_3d
547       args: -dm_landau_device_type cpu
548     test:
549       suffix: kokkos_3d
550       requires: kokkos_kernels
551       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
552 
553 TEST*/
554