xref: /petsc/src/ts/tests/ex30.c (revision a69119a591a03a9d906b29c0a4e9802e4d7c9795)
1 static char help[] = "Grid based Landau collision operator with PIC interface with OpenMP setup. (one species per grid)\n";
2 
3 /*
4    Support 2.5V with axisymmetric coordinates
5      - r,z coordinates
6      - Domain and species data input by Landau operator
7      - "radius" for each grid, normalized with electron thermal velocity
8      - Domain: (0,radius) x (-radius,radius), thus first coordinate x[0] is perpendicular velocity and 2pi*x[0] term is added for axisymmetric
9    Supports full 3V
10 
11  */
12 
13 #include "petscdmplex.h"
14 #include "petscds.h"
15 #include "petscdmswarm.h"
16 #include "petscksp.h"
17 #include <petsc/private/petscimpl.h>
18 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
19 #include <omp.h>
20 #endif
21 #include <petsclandau.h>
22 #include <petscdmcomposite.h>
23 
24 typedef struct {
25   Mat MpTrans;
26   Mat Mp;
27   Vec ff;
28   Vec uu;
29 } MatShellCtx;
30 
31 PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy) {
32   MatShellCtx *matshellctx;
33 
34   PetscFunctionBeginUser;
35   PetscCall(MatShellGetContext(MtM, &matshellctx));
36   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
37   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
38   PetscCall(MatMult(matshellctx->MpTrans, matshellctx->ff, yy));
39   PetscFunctionReturn(0);
40 }
41 
42 PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy, Vec zz) {
43   MatShellCtx *matshellctx;
44 
45   PetscFunctionBeginUser;
46   PetscCall(MatShellGetContext(MtM, &matshellctx));
47   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
48   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
49   PetscCall(MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz));
50   PetscFunctionReturn(0);
51 }
52 
53 PetscErrorCode createSwarm(const DM dm, PetscInt dim, DM *sw) {
54   PetscInt Nc = 1;
55 
56   PetscFunctionBeginUser;
57   PetscCall(DMCreate(PETSC_COMM_SELF, sw));
58   PetscCall(DMSetType(*sw, DMSWARM));
59   PetscCall(DMSetDimension(*sw, dim));
60   PetscCall(DMSwarmSetType(*sw, DMSWARM_PIC));
61   PetscCall(DMSwarmSetCellDM(*sw, dm));
62   PetscCall(DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_SCALAR));
63   PetscCall(DMSwarmFinalizeFieldRegister(*sw));
64   PetscCall(DMSetFromOptions(*sw));
65   PetscFunctionReturn(0);
66 }
67 
68 PetscErrorCode gridToParticles(const DM dm, DM sw, Vec rhs, Vec work, Mat M_p, Mat Mass) {
69   PetscBool    is_lsqr;
70   KSP          ksp;
71   Mat          PM_p = NULL, MtM, D;
72   Vec          ff;
73   PetscInt     N, M, nzl;
74   MatShellCtx *matshellctx;
75 
76   PetscFunctionBeginUser;
77   PetscCall(MatMult(Mass, rhs, work));
78   PetscCall(VecCopy(work, rhs));
79   // pseudo-inverse
80   PetscCall(KSPCreate(PETSC_COMM_SELF, &ksp));
81   PetscCall(KSPSetOptionsPrefix(ksp, "ftop_"));
82   PetscCall(KSPSetFromOptions(ksp));
83   PetscCall(PetscObjectTypeCompare((PetscObject)ksp, KSPLSQR, &is_lsqr));
84   if (!is_lsqr) {
85     PetscCall(MatGetLocalSize(M_p, &M, &N));
86     if (N > M) {
87       PC pc;
88       PetscCall(PetscInfo(ksp, " M (%" PetscInt_FMT ") < M (%" PetscInt_FMT ") -- skip revert to lsqr\n", M, N));
89       is_lsqr = PETSC_TRUE;
90       PetscCall(KSPSetType(ksp, KSPLSQR));
91       PetscCall(KSPGetPC(ksp, &pc));
92       PetscCall(PCSetType(pc, PCNONE)); // could put in better solver -ftop_pc_type bjacobi -ftop_sub_pc_type lu -ftop_sub_pc_factor_shift_type nonzero
93     } else {
94       PetscCall(PetscNew(&matshellctx));
95       PetscCall(MatCreateShell(PetscObjectComm((PetscObject)dm), N, N, PETSC_DECIDE, PETSC_DECIDE, matshellctx, &MtM));
96       PetscCall(MatTranspose(M_p, MAT_INITIAL_MATRIX, &matshellctx->MpTrans));
97       matshellctx->Mp = M_p;
98       PetscCall(MatShellSetOperation(MtM, MATOP_MULT, (void (*)(void))MatMultMtM_SeqAIJ));
99       PetscCall(MatShellSetOperation(MtM, MATOP_MULT_ADD, (void (*)(void))MatMultAddMtM_SeqAIJ));
100       PetscCall(MatCreateVecs(M_p, &matshellctx->uu, &matshellctx->ff));
101       PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 1, NULL, &D));
102       PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_Mp_mat_view"));
103       for (int i = 0; i < N; i++) {
104         const PetscScalar *vals;
105         const PetscInt    *cols;
106         PetscScalar        dot = 0;
107         PetscCall(MatGetRow(matshellctx->MpTrans, i, &nzl, &cols, &vals));
108         for (int ii = 0; ii < nzl; ii++) dot += PetscSqr(vals[ii]);
109         PetscCheck(dot != 0.0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Row %d is empty", i);
110         PetscCall(MatSetValue(D, i, i, dot, INSERT_VALUES));
111       }
112       PetscCall(MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY));
113       PetscCall(MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY));
114       PetscCall(PetscInfo(M_p, "createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n", N, nzl));
115       PetscCall(KSPSetOperators(ksp, MtM, D));
116       PetscCall(MatViewFromOptions(D, NULL, "-ftop2_D_mat_view"));
117       PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view"));
118       PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpTranspose_mat_view"));
119     }
120   }
121   if (is_lsqr) {
122     PC        pc;
123     PetscBool is_bjac;
124     PetscCall(KSPGetPC(ksp, &pc));
125     PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCBJACOBI, &is_bjac));
126     if (is_bjac) {
127       PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p));
128       PetscCall(KSPSetOperators(ksp, M_p, PM_p));
129     } else {
130       PetscCall(KSPSetOperators(ksp, M_p, M_p));
131     }
132   }
133   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access
134   if (!is_lsqr) {
135     PetscCall(KSPSolve(ksp, rhs, matshellctx->uu));
136     PetscCall(MatMult(M_p, matshellctx->uu, ff));
137     PetscCall(MatDestroy(&matshellctx->MpTrans));
138     PetscCall(VecDestroy(&matshellctx->ff));
139     PetscCall(VecDestroy(&matshellctx->uu));
140     PetscCall(MatDestroy(&D));
141     PetscCall(MatDestroy(&MtM));
142     PetscCall(PetscFree(matshellctx));
143   } else {
144     PetscCall(KSPSolveTranspose(ksp, rhs, ff));
145   }
146   PetscCall(KSPDestroy(&ksp));
147   /* Visualize particle field */
148   PetscCall(VecViewFromOptions(ff, NULL, "-weights_view"));
149   PetscCall(MatDestroy(&PM_p));
150   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
151 
152   PetscFunctionReturn(0);
153 }
154 
155 PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt Np, const PetscInt a_tid, const PetscInt dim, const PetscReal xx[], const PetscReal yy[], const PetscReal zz[], const PetscReal a_wp[], Vec rho, Mat *Mp_out) {
156   PetscBool     removePoints = PETSC_TRUE;
157   PetscReal    *wq, *coords;
158   PetscDataType dtype;
159   Mat           M_p;
160   Vec           ff;
161   PetscInt      bs, p, zero = 0;
162 
163   PetscFunctionBeginUser;
164   PetscCall(DMSwarmSetLocalSizes(sw, Np, zero));
165   PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq));
166   PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
167   for (p = 0; p < Np; p++) {
168     coords[p * dim + 0] = xx[p];
169     coords[p * dim + 1] = yy[p];
170     wq[p]               = a_wp[p];
171     if (dim == 3) coords[p * dim + 2] = zz[p];
172   }
173   PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
174   PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq));
175   PetscCall(DMSwarmMigrate(sw, removePoints));
176   PetscCall(PetscObjectSetName((PetscObject)sw, "Particle Grid"));
177 
178   /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */
179   PetscCall(DMCreateMassMatrix(sw, dm, &M_p));
180 
181   PetscCall(PetscObjectSetName((PetscObject)rho, "rho"));
182   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff));
183   PetscCall(PetscObjectSetName((PetscObject)ff, "weights"));
184   PetscCall(MatMultTranspose(M_p, ff, rho));
185   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
186 
187   // output
188   *Mp_out = M_p;
189 
190   PetscFunctionReturn(0);
191 }
192 static void maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscScalar *u) {
193   PetscInt  i;
194   PetscReal v2 = 0, theta = 2.0 * kt_m; /* theta = 2kT/mc^2 */
195 
196   /* compute the exponents, v^2 */
197   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
198   /* evaluate the Maxwellian */
199   u[0] = n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
200 }
201 
202 #define MAX_NUM_THRDS 12
203 PetscErrorCode go(TS ts, Vec X, const PetscInt NUserV, const PetscInt a_Np, const PetscInt dim, const PetscInt b_target, const PetscInt g_target) {
204   DM             pack, *globSwarmArray, grid_dm[LANDAU_MAX_GRIDS];
205   Mat           *globMpArray, g_Mass[LANDAU_MAX_GRIDS];
206   KSP            t_ksp[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
207   Vec            t_fhat[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
208   PetscInt       nDMs, glb_b_id, nTargetP = 0;
209   PetscErrorCode ierr = 0;
210 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
211   PetscInt numthreads = PetscNumOMPThreads;
212 #else
213   PetscInt numthreads = 1;
214 #endif
215   LandauCtx *ctx;
216   Vec       *globXArray;
217   PetscReal  moments_0[3], moments_1[3], dt_init;
218 
219   PetscFunctionBeginUser;
220   PetscCheck(numthreads <= MAX_NUM_THRDS, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Too many threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS);
221   PetscCheck(numthreads > 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS);
222   PetscCall(TSGetDM(ts, &pack));
223   PetscCall(DMGetApplicationContext(pack, &ctx));
224   PetscCheck(ctx->batch_sz % numthreads == 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "batch size (-dm_landau_batch_size) %" PetscInt_FMT "  mod #threads %" PetscInt_FMT " must equal zero", ctx->batch_sz, numthreads);
225   PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
226   PetscCall(PetscInfo(pack, "Have %" PetscInt_FMT " total grids, with %" PetscInt_FMT " Landau local batched and %" PetscInt_FMT " global items (vertices)\n", ctx->num_grids, ctx->batch_sz, NUserV));
227   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
228   PetscCall(PetscMalloc(sizeof(*globMpArray) * nDMs, &globMpArray));
229   PetscCall(PetscMalloc(sizeof(*globSwarmArray) * nDMs, &globSwarmArray));
230   PetscCall(DMViewFromOptions(ctx->plex[g_target], NULL, "-ex30_dm_view"));
231   // create mass matrices
232   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); // just to duplicate
233   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {               // add same particels for all grids
234     Vec          subX = globXArray[LAND_PACK_IDX(0, grid)];
235     DM           dm   = ctx->plex[grid];
236     PetscSection s;
237     grid_dm[grid] = dm;
238     PetscCall(DMCreateMassMatrix(dm, dm, &g_Mass[grid]));
239     //
240     PetscCall(DMGetLocalSection(dm, &s));
241     PetscCall(DMPlexCreateClosureIndex(dm, s));
242     for (int tid = 0; tid < numthreads; tid++) {
243       PetscCall(VecDuplicate(subX, &t_fhat[grid][tid]));
244       PetscCall(KSPCreate(PETSC_COMM_SELF, &t_ksp[grid][tid]));
245       PetscCall(KSPSetOptionsPrefix(t_ksp[grid][tid], "ptof_"));
246       PetscCall(KSPSetOperators(t_ksp[grid][tid], g_Mass[grid], g_Mass[grid]));
247       PetscCall(KSPSetFromOptions(t_ksp[grid][tid]));
248     }
249   }
250   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
251   // create particle raw data. could use OMP with a thread safe malloc, but this is just the fake user
252   for (int i = 0; i < 3; i++) moments_0[i] = moments_1[i] = 0;
253   PetscCall(TSGetTimeStep(ts, &dt_init)); // we could have an adaptive time stepper
254   for (PetscInt global_batch_id = 0; global_batch_id < NUserV; global_batch_id += ctx->batch_sz) {
255     ierr = TSSetTime(ts, 0);
256     CHKERRQ(ierr);
257     ierr = TSSetStepNumber(ts, 0);
258     CHKERRQ(ierr);
259     ierr = TSSetTimeStep(ts, dt_init);
260     CHKERRQ(ierr);
261     PetscCall(VecZeroEntries(X));
262     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
263     if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) PetscCall(PetscObjectSetName((PetscObject)globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], "rho"));
264     // create fake particles
265     for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) {
266       PetscReal *xx_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
267       PetscInt   Np_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
268       // make particles
269       for (int tid = 0; tid < numthreads; tid++) {
270         const PetscInt b_id = b_id_0 + tid;
271         if ((glb_b_id = global_batch_id + b_id) < NUserV) {        // the ragged edge of the last batch
272           PetscInt Npp0 = a_Np + (glb_b_id % a_Np), NN;            // fake user: number of particels in each dimension with add some load imbalance and diff (<2x)
273           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
274             const PetscReal kT_m = ctx->k * ctx->thermal_temps[ctx->species_offset[grid]] / ctx->masses[ctx->species_offset[grid]] / (ctx->v_0 * ctx->v_0); /* theta = 2kT/mc^2 per species -- TODO */
275             ;
276             PetscReal lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3], vole; // would be nice to get box from DM
277             PetscInt  Npi = Npp0, Npj = 2 * Npp0, Npk = 1;
278             if (dim == 2) lo[0] = 0; // Landau coordinate (r,z)
279             else Npi = Npj = Npk = Npp0;
280             // User: use glb_b_id to index into your data
281             NN = Npi * Npj * Npk; // make a regular grid of particles Npp x Npp
282             if (glb_b_id == b_target) {
283               nTargetP = NN;
284               PetscCall(PetscInfo(pack, "Target %" PetscInt_FMT " with %" PetscInt_FMT " particels\n", glb_b_id, NN));
285             }
286             Np_t[grid][tid] = NN;
287             PetscCall(PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid]));
288             hp[0] = (hi[0] - lo[0]) / Npi;
289             hp[1] = (hi[1] - lo[1]) / Npj;
290             hp[2] = (hi[2] - lo[2]) / Npk;
291             if (dim == 2) hp[2] = 1;
292             PetscCall(PetscInfo(pack, " lo = %14.7e, hi = %14.7e; hp = %14.7e, %14.7e; kT_m = %g; \n", (double)lo[1], (double)hi[1], (double)hp[0], (double)hp[1], (double)kT_m)); // temp
293             vole = hp[0] * hp[1] * hp[2] * ctx->n[grid];                                                                                                                           // fix for multi-species
294             PetscCall(PetscInfo(pack, "Vertex %" PetscInt_FMT ", grid %" PetscInt_FMT " with %" PetscInt_FMT " particles (diagnostic target = %" PetscInt_FMT ")\n", glb_b_id, grid, NN, b_target));
295             for (int pj = 0, pp = 0; pj < Npj; pj++) {
296               for (int pk = 0; pk < Npk; pk++) {
297                 for (int pi = 0; pi < Npi; pi++, pp++) {
298                   xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0];
299                   yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1];
300                   if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
301                   {
302                     PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]};
303                     maxwellian(dim, x, kT_m, vole, &wp_t[grid][tid][pp]);
304                     //PetscCall(PetscInfo(pack,"%" PetscInt_FMT ") x = %14.7e, %14.7e, %14.7e, n = %14.7e, w = %14.7e\n", pp, x[0], x[1], dim==2 ? 0 : x[2], ctx->n[grid], wp_t[grid][tid][pp])); // temp
305                     if (glb_b_id == b_target) {
306                       PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * x[0] : 1;
307                       for (int i = 0; i < dim; ++i) v2 += PetscSqr(x[i]);
308                       moments_0[0] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
309                       moments_0[1] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * x[1]; // z-momentum
310                       moments_0[2] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2;
311                     }
312                   }
313                 }
314               }
315             }
316           } // grid
317         }   // active
318       }     // fake threads
319       /* Create particle swarm */
320       PetscPragmaOMP(parallel for)
321         for (int tid=0; tid<numthreads; tid++) {
322         const PetscInt b_id = b_id_0 + tid;
323         if ((glb_b_id = global_batch_id + b_id) < NUserV) { // the ragged edge of the last batch
324           //PetscCall(PetscInfo(pack,"Create swarms for 'glob' index %" PetscInt_FMT " create swarm\n",glb_b_id));
325           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
326             PetscErrorCode ierr_t;
327             PetscSection   section;
328             PetscInt       Nf;
329             DM             dm = grid_dm[grid];
330             ierr_t            = DMGetLocalSection(dm, &section);
331             ierr_t            = PetscSectionGetNumFields(section, &Nf);
332             if (Nf != 1) ierr_t = 9999;
333             else {
334               ierr_t = DMViewFromOptions(dm, NULL, "-dm_view");
335               ierr_t = PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local batch index %" PetscInt_FMT "\n", b_id, grid, LAND_PACK_IDX(b_id, grid));
336               ierr_t = createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(b_id, grid)]);
337             }
338             if (ierr_t) ierr = ierr_t;
339           }
340         } // active
341       }
342       PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only support one species per grid");
343       PetscCall(ierr);
344       // p --> g: make globMpArray & set X
345       PetscPragmaOMP(parallel for)
346         for (int tid=0; tid<numthreads; tid++) {
347         const PetscInt b_id = b_id_0 + tid;
348         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
349           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
350             PetscErrorCode ierr_t;
351             DM             dm   = grid_dm[grid];
352             DM             sw   = globSwarmArray[LAND_PACK_IDX(b_id, grid)];
353             Vec            subX = globXArray[LAND_PACK_IDX(b_id, grid)], work = t_fhat[grid][tid];
354             PetscInfo(pack, "particlesToGrid %" PetscInt_FMT ".%" PetscInt_FMT ") particlesToGrid for local batch %" PetscInt_FMT "\n", global_batch_id, grid, LAND_PACK_IDX(b_id, grid));
355             ierr_t = particlesToGrid(dm, sw, Np_t[grid][tid], tid, dim, xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid], wp_t[grid][tid], subX, &globMpArray[LAND_PACK_IDX(b_id, grid)]);
356             if (ierr_t) ierr = ierr_t;
357             // u = M^_1 f_w
358             ierr_t = VecCopy(subX, work);
359             ierr_t = KSPSolve(t_ksp[grid][tid], work, subX);
360             if (ierr_t) ierr = ierr_t;
361           }
362         }
363       }
364       PetscCall(ierr);
365       /* Cleanup */
366       for (int tid = 0; tid < numthreads; tid++) {
367         const PetscInt b_id = b_id_0 + tid;
368         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
369           PetscCall(PetscInfo(pack, "Free for global batch %" PetscInt_FMT " of %" PetscInt_FMT "\n", glb_b_id + 1, NUserV));
370           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
371             PetscCall(PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid]));
372           }
373         } // active
374       }
375     } // Landau
376     if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], NULL, "-ex30_vec_view"));
377     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
378     PetscCall(DMPlexLandauPrintNorms(X, 0));
379     // advance
380     PetscCall(TSSetSolution(ts, X));
381     PetscCall(PetscInfo(pack, "Advance vertex %" PetscInt_FMT " to %" PetscInt_FMT " (with padding)\n", global_batch_id, global_batch_id + ctx->batch_sz));
382     PetscCall(TSSolve(ts, X));
383     PetscCall(DMPlexLandauPrintNorms(X, 1));
384     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
385     // map back to particles
386     for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) {
387       PetscCall(PetscInfo(pack, "g2p: global batch %" PetscInt_FMT " of %" PetscInt_FMT ", Landau batch %" PetscInt_FMT " of %" PetscInt_FMT ": map back to particles\n", global_batch_id + 1, NUserV, b_id_0 + 1, ctx->batch_sz));
388       PetscPragmaOMP(parallel for)
389         for (int tid=0; tid<numthreads; tid++) {
390         const PetscInt b_id = b_id_0 + tid;
391         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
392           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
393             PetscErrorCode ierr_t;
394             PetscInfo(pack, "gridToParticles: global batch %" PetscInt_FMT ", local batch b=%" PetscInt_FMT ", grid g=%" PetscInt_FMT ", index(b,g) %" PetscInt_FMT "\n", global_batch_id, b_id, grid, LAND_PACK_IDX(b_id, grid));
395             ierr_t = gridToParticles(grid_dm[grid], globSwarmArray[LAND_PACK_IDX(b_id, grid)], globXArray[LAND_PACK_IDX(b_id, grid)], t_fhat[grid][tid], globMpArray[LAND_PACK_IDX(b_id, grid)], g_Mass[grid]);
396             if (ierr_t) ierr = ierr_t;
397           }
398         }
399       }
400       PetscCall(ierr);
401       /* Cleanup, and get data */
402       PetscCall(PetscInfo(pack, "Cleanup batches %" PetscInt_FMT " to %" PetscInt_FMT "\n", b_id_0, b_id_0 + numthreads));
403       for (int tid = 0; tid < numthreads; tid++) {
404         const PetscInt b_id = b_id_0 + tid;
405         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
406           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
407             PetscDataType dtype;
408             PetscReal    *wp, *coords;
409             DM            sw = globSwarmArray[LAND_PACK_IDX(b_id, grid)];
410             PetscInt      npoints, bs = 1;
411             PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here
412             if (glb_b_id == b_target) {
413               PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
414               PetscCall(DMSwarmGetLocalSize(sw, &npoints));
415               for (int p = 0; p < npoints; p++) {
416                 PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1;
417                 for (int i = 0; i < dim; ++i) v2 += PetscSqr(coords[p * dim + i]);
418                 moments_1[0] += fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
419                 moments_1[1] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * coords[p * dim + 1]; // z-momentum
420                 moments_1[2] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2;
421               }
422               PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
423             }
424             PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
425             PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(b_id, grid)]));
426             PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(b_id, grid)]));
427           }
428         }
429       }
430     } // thread batch
431     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
432   } // user batch
433   /* Cleanup */
434   PetscCall(PetscFree(globXArray));
435   PetscCall(PetscFree(globSwarmArray));
436   PetscCall(PetscFree(globMpArray));
437   // clean up mass matrices
438   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
439     PetscCall(MatDestroy(&g_Mass[grid]));
440     for (int tid = 0; tid < numthreads; tid++) {
441       PetscCall(VecDestroy(&t_fhat[grid][tid]));
442       PetscCall(KSPDestroy(&t_ksp[grid][tid]));
443     }
444   }
445   PetscCall(PetscInfo(X, "Total number density: %20.12e (%20.12e); x-momentum = %20.12e (%20.12e); energy = %20.12e (%20.12e) error = %e (log10 of error = %" PetscInt_FMT "), %" PetscInt_FMT " particles. Use %" PetscInt_FMT " threads\n", (double)moments_1[0], (double)moments_0[0], (double)moments_1[1], (double)moments_0[1], (double)moments_1[2], (double)moments_0[2], (double)((moments_1[2] - moments_0[2]) / moments_0[2]), (PetscInt)PetscLog10Real(PetscAbsReal((moments_1[2] - moments_0[2]) / moments_0[2])), nTargetP, numthreads));
446   PetscFunctionReturn(0);
447 }
448 
449 int main(int argc, char **argv) {
450   DM         pack;
451   Vec        X;
452   PetscInt   dim = 2, nvert = 1, Np = 10, btarget = 0, gtarget = 0;
453   TS         ts;
454   Mat        J;
455   LandauCtx *ctx;
456 #if defined(PETSC_USE_LOG)
457   PetscLogStage stage;
458 #endif
459 
460   PetscFunctionBeginUser;
461   PetscCall(PetscInitialize(&argc, &argv, NULL, help));
462   // process args
463   PetscOptionsBegin(PETSC_COMM_SELF, "", "Collision Options", "DMPLEX");
464   PetscCall(PetscOptionsInt("-number_spatial_vertices", "Number of user spatial vertices to be batched for Landau", "ex30.c", nvert, &nvert, NULL));
465   PetscCall(PetscOptionsInt("-dim", "Velocity space dimension", "ex30.c", dim, &dim, NULL));
466   PetscCall(PetscOptionsInt("-number_particles_per_dimension", "Number of particles per grid, with slight modification per spatial vertex, in each dimension of base Cartesian grid", "ex30.c", Np, &Np, NULL));
467   PetscCall(PetscOptionsInt("-view_vertex_target", "Batch to view with diagnostics", "ex30.c", btarget, &btarget, NULL));
468   PetscCheck(btarget < nvert, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Batch to view %" PetscInt_FMT " should be < number of vertices %" PetscInt_FMT, btarget, nvert);
469   PetscCall(PetscOptionsInt("-view_grid_target", "Grid to view with diagnostics", "ex30.c", gtarget, &gtarget, NULL));
470   PetscOptionsEnd();
471   /* Create a mesh */
472   PetscCall(DMPlexLandauCreateVelocitySpace(PETSC_COMM_SELF, dim, "", &X, &J, &pack));
473   PetscCall(DMSetUp(pack));
474   PetscCall(DMSetOutputSequenceNumber(pack, 0, 0.0));
475   PetscCall(DMGetApplicationContext(pack, &ctx));
476   PetscCheck(gtarget < ctx->num_grids, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Grid to view %" PetscInt_FMT " should be < number of grids %" PetscInt_FMT, gtarget, ctx->num_grids);
477   PetscCheck(nvert >= ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number of vertices %" PetscInt_FMT " should be <= batch size %" PetscInt_FMT, nvert, ctx->batch_sz);
478   /* Create timestepping solver context */
479   PetscCall(TSCreate(PETSC_COMM_SELF, &ts));
480   PetscCall(TSSetDM(ts, pack));
481   PetscCall(TSSetIFunction(ts, NULL, DMPlexLandauIFunction, NULL));
482   PetscCall(TSSetIJacobian(ts, J, J, DMPlexLandauIJacobian, NULL));
483   PetscCall(TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER));
484   PetscCall(TSSetFromOptions(ts));
485   PetscCall(PetscObjectSetName((PetscObject)X, "X"));
486   // do particle advance, warmup
487   PetscCall(go(ts, X, nvert, Np, dim, btarget, gtarget));
488   PetscCall(MatZeroEntries(J)); // need to zero out so as to not reuse it in Landau's logic
489   // hot
490   PetscCall(PetscLogStageRegister("ex30 hot solve", &stage));
491   PetscCall(PetscLogStagePush(stage));
492   PetscCall(go(ts, X, nvert, Np, dim, btarget, gtarget));
493   PetscCall(PetscLogStagePop());
494   /* clean up */
495   PetscCall(DMPlexLandauDestroyVelocitySpace(&pack));
496   PetscCall(TSDestroy(&ts));
497   PetscCall(VecDestroy(&X));
498   PetscCall(PetscFinalize());
499   return 0;
500 }
501 
502 /*TEST
503 
504   build:
505     requires: !complex p4est
506 
507   testset:
508     requires: double defined(PETSC_USE_DMLANDAU_2D)
509     output_file: output/ex30_0.out
510     args: -dim 2 -petscspace_degree 3 -dm_landau_type p4est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
511           -dm_landau_amr_post_refine 1 -number_particles_per_dimension 10 -dm_plex_hash_location \
512           -dm_landau_batch_size 2 -number_spatial_vertices 3 -dm_landau_batch_view_idx 1 -view_vertex_target 2 -view_grid_target 1 \
513           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
514           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-10 -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_factor_shift_type nonzero -ftop_sub_pc_type lu \
515           -ksp_type preonly -pc_type lu \
516           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\
517           -snes_converged_reason -snes_monitor -snes_rtol 1e-14 -snes_stol 1e-14\
518           -ts_dt 0.01 -ts_rtol 1e-1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec
519 
520     test:
521       suffix: cpu
522       args: -dm_landau_device_type cpu
523     test:
524       suffix: kokkos
525       requires: kokkos_kernels
526       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
527     test:
528       suffix: cuda
529       requires: cuda
530       args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda
531 
532   testset:
533     requires: double !defined(PETSC_USE_DMLANDAU_2D)
534     output_file: output/ex30_3d.out
535     args: -dim 3 -petscspace_degree 2 -dm_landau_type p8est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
536           -dm_landau_amr_post_refine 0 -number_particles_per_dimension 5 -dm_plex_hash_location \
537           -dm_landau_batch_size 1 -number_spatial_vertices 1 -dm_landau_batch_view_idx 0 -view_vertex_target 0 -view_grid_target 0 \
538           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
539           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-12 -ftop_ksp_type cg -ftop_pc_type jacobi \
540           -ksp_type preonly -pc_type lu \
541           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\
542           -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12\
543           -ts_dt 0.1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec
544 
545     test:
546       suffix: cpu_3d
547       args: -dm_landau_device_type cpu
548     test:
549       suffix: kokkos_3d
550       requires: kokkos_kernels
551       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
552     test:
553       suffix: cuda_3d
554       requires: cuda
555       args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda
556 
557 TEST*/
558