xref: /petsc/src/ts/tests/ex30.c (revision 976ed0a4a7e2c9a504a034b07aaf489e2c2d55c5)
1 static char help[] = "Grid based Landau collision operator with PIC interface with OpenMP setup. (one species per grid)\n";
2 
3 /*
4    Support 2.5V with axisymmetric coordinates
5      - r,z coordinates
6      - Domain and species data input by Landau operator
7      - "radius" for each grid, normalized with electron thermal velocity
8      - Domain: (0,radius) x (-radius,radius), thus first coordinate x[0] is perpendicular velocity and 2pi*x[0] term is added for axisymmetric
9    Supports full 3V
10 
11  */
12 
13 #include <petscdmplex.h>
14 #include <petscds.h>
15 #include <petscdmswarm.h>
16 #include <petscksp.h>
17 #include <petsc/private/petscimpl.h>
18 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
19   #include <omp.h>
20 #endif
21 #include <petsclandau.h>
22 #include <petscdmcomposite.h>
23 
24 typedef struct {
25   Mat MpTrans;
26   Mat Mp;
27   Vec ff;
28   Vec uu;
29 } MatShellCtx;
30 
31 typedef struct {
32   PetscInt   v_target;
33   DM        *globSwarmArray;
34   LandauCtx *ctx;
35   PetscInt  *nTargetP;
36   PetscReal  N_inv;
37   DM        *grid_dm;
38   Mat       *g_Mass;
39   Mat       *globMpArray;
40   Vec       *globXArray;
41   PetscBool  print;
42 } PrintCtx;
43 
44 PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy)
45 {
46   MatShellCtx *matshellctx;
47 
48   PetscFunctionBeginUser;
49   PetscCall(MatShellGetContext(MtM, &matshellctx));
50   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
51   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
52   PetscCall(MatMult(matshellctx->MpTrans, matshellctx->ff, yy));
53   PetscFunctionReturn(PETSC_SUCCESS);
54 }
55 
56 PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy, Vec zz)
57 {
58   MatShellCtx *matshellctx;
59 
60   PetscFunctionBeginUser;
61   PetscCall(MatShellGetContext(MtM, &matshellctx));
62   PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
63   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
64   PetscCall(MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz));
65   PetscFunctionReturn(PETSC_SUCCESS);
66 }
67 
68 PetscErrorCode createSwarm(const DM dm, PetscInt dim, DM *sw)
69 {
70   PetscInt Nc = 1;
71 
72   PetscFunctionBeginUser;
73   PetscCall(DMCreate(PETSC_COMM_SELF, sw));
74   PetscCall(DMSetType(*sw, DMSWARM));
75   PetscCall(DMSetDimension(*sw, dim));
76   PetscCall(DMSwarmSetType(*sw, DMSWARM_PIC));
77   PetscCall(DMSwarmSetCellDM(*sw, dm));
78   PetscCall(DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_REAL));
79   PetscCall(DMSwarmFinalizeFieldRegister(*sw));
80   PetscCall(DMSetFromOptions(*sw));
81   PetscCall(PetscObjectSetName((PetscObject)*sw, "Particle Grid"));
82   PetscFunctionReturn(PETSC_SUCCESS);
83 }
84 
85 static PetscErrorCode makeSwarm(DM sw, const PetscInt dim, const PetscInt Np, const PetscReal xx[], const PetscReal yy[], const PetscReal zz[])
86 {
87   PetscReal    *coords;
88   PetscDataType dtype;
89   PetscInt      bs, p, zero = 0;
90   PetscFunctionBeginUser;
91 
92   PetscCall(DMSwarmSetLocalSizes(sw, Np, zero));
93   PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
94   for (p = 0; p < Np; p++) {
95     coords[p * dim + 0] = xx[p];
96     coords[p * dim + 1] = yy[p];
97     if (dim == 3) coords[p * dim + 2] = zz[p];
98   }
99   PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
100   PetscFunctionReturn(PETSC_SUCCESS);
101 }
102 
103 static PetscErrorCode createMp(const DM dm, DM sw, Mat *Mp_out)
104 {
105   PetscBool removePoints = PETSC_TRUE;
106   Mat       M_p;
107   PetscFunctionBeginUser;
108   // migrate after coords are set
109   PetscCall(DMSwarmMigrate(sw, removePoints));
110   PetscCall(PetscObjectSetName((PetscObject)sw, "Particle Grid"));
111   /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */
112   PetscCall(DMCreateMassMatrix(sw, dm, &M_p));
113   PetscCall(DMViewFromOptions(sw, NULL, "-ex30_sw_view"));
114   // output
115   *Mp_out = M_p;
116   PetscFunctionReturn(PETSC_SUCCESS);
117 }
118 
119 static PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt Np, const PetscInt a_tid, const PetscInt dim, const PetscReal a_wp[], Vec rho, Mat M_p)
120 {
121   PetscReal    *wq;
122   PetscDataType dtype;
123   Vec           ff;
124   PetscInt      bs, p;
125 
126   PetscFunctionBeginUser;
127   PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq));
128   for (p = 0; p < Np; p++) wq[p] = a_wp[p];
129   PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq));
130   PetscCall(PetscObjectSetName((PetscObject)rho, "rho"));
131   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff));
132   PetscCall(PetscObjectSetName((PetscObject)ff, "weights"));
133   PetscCall(MatMultTranspose(M_p, ff, rho));
134   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
135   PetscFunctionReturn(PETSC_SUCCESS);
136 }
137 
138 //
139 // add grid to arg 'sw.w_q'
140 //
141 PetscErrorCode gridToParticles(const DM dm, DM sw, const Vec rhs, Vec work, Mat M_p, Mat Mass)
142 {
143   PetscBool    is_lsqr;
144   KSP          ksp;
145   Mat          PM_p = NULL, MtM, D;
146   Vec          ff;
147   PetscInt     N, M, nzl;
148   MatShellCtx *matshellctx;
149 
150   PetscFunctionBeginUser;
151   PetscCall(MatMult(Mass, rhs, work));
152   // pseudo-inverse
153   PetscCall(KSPCreate(PETSC_COMM_SELF, &ksp));
154   PetscCall(KSPSetOptionsPrefix(ksp, "ftop_"));
155   PetscCall(KSPSetFromOptions(ksp));
156   PetscCall(PetscObjectTypeCompare((PetscObject)ksp, KSPLSQR, &is_lsqr));
157   if (!is_lsqr) {
158     PetscCall(MatGetLocalSize(M_p, &M, &N));
159     if (N > M) {
160       PC pc;
161       PetscCall(PetscInfo(ksp, " M (%" PetscInt_FMT ") < M (%" PetscInt_FMT ") -- skip revert to lsqr\n", M, N));
162       is_lsqr = PETSC_TRUE;
163       PetscCall(KSPSetType(ksp, KSPLSQR));
164       PetscCall(KSPGetPC(ksp, &pc));
165       PetscCall(PCSetType(pc, PCNONE)); // could put in better solver -ftop_pc_type bjacobi -ftop_sub_pc_type lu -ftop_sub_pc_factor_shift_type nonzero
166     } else {
167       PetscCall(PetscNew(&matshellctx));
168       PetscCall(MatCreateShell(PetscObjectComm((PetscObject)dm), N, N, PETSC_DECIDE, PETSC_DECIDE, matshellctx, &MtM));
169       PetscCall(MatTranspose(M_p, MAT_INITIAL_MATRIX, &matshellctx->MpTrans));
170       matshellctx->Mp = M_p;
171       PetscCall(MatShellSetOperation(MtM, MATOP_MULT, (void (*)(void))MatMultMtM_SeqAIJ));
172       PetscCall(MatShellSetOperation(MtM, MATOP_MULT_ADD, (void (*)(void))MatMultAddMtM_SeqAIJ));
173       PetscCall(MatCreateVecs(M_p, &matshellctx->uu, &matshellctx->ff));
174       PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 1, NULL, &D));
175       PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_Mp_mat_view"));
176       for (int i = 0; i < N; i++) {
177         const PetscScalar *vals;
178         const PetscInt    *cols;
179         PetscScalar        dot = 0;
180         PetscCall(MatGetRow(matshellctx->MpTrans, i, &nzl, &cols, &vals));
181         for (int ii = 0; ii < nzl; ii++) dot += PetscSqr(vals[ii]);
182         PetscCheck(dot != 0.0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Row %d is empty", i);
183         PetscCall(MatSetValue(D, i, i, dot, INSERT_VALUES));
184       }
185       PetscCall(MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY));
186       PetscCall(MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY));
187       PetscCall(PetscInfo(M_p, "createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n", N, nzl));
188       PetscCall(KSPSetOperators(ksp, MtM, D));
189       PetscCall(MatViewFromOptions(D, NULL, "-ftop2_D_mat_view"));
190       PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view"));
191       PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpTranspose_mat_view"));
192       PetscCall(MatViewFromOptions(MtM, NULL, "-ftop2_MtM_mat_view"));
193     }
194   }
195   if (is_lsqr) {
196     PC        pc;
197     PetscBool is_bjac;
198     PetscCall(KSPGetPC(ksp, &pc));
199     PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCBJACOBI, &is_bjac));
200     if (is_bjac) {
201       PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p));
202       PetscCall(KSPSetOperators(ksp, M_p, PM_p));
203     } else {
204       PetscCall(KSPSetOperators(ksp, M_p, M_p));
205     }
206   }
207   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access
208   if (!is_lsqr) {
209     PetscCall(KSPSolve(ksp, work, matshellctx->uu));
210     PetscCall(MatMult(M_p, matshellctx->uu, ff));
211     PetscCall(MatDestroy(&matshellctx->MpTrans));
212     PetscCall(VecDestroy(&matshellctx->ff));
213     PetscCall(VecDestroy(&matshellctx->uu));
214     PetscCall(MatDestroy(&D));
215     PetscCall(MatDestroy(&MtM));
216     PetscCall(PetscFree(matshellctx));
217   } else {
218     PetscCall(KSPSolveTranspose(ksp, work, ff));
219   }
220   PetscCall(KSPDestroy(&ksp));
221   /* Visualize particle field */
222   PetscCall(VecViewFromOptions(ff, NULL, "-weights_view"));
223   PetscCall(MatDestroy(&PM_p));
224   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
225 
226   PetscFunctionReturn(PETSC_SUCCESS);
227 }
228 
229 #define EX30_MAX_NUM_THRDS 12
230 #define EX30_MAX_BATCH_SZ  1024
231 //
232 // add grid to arg 'globSwarmArray[].w_q'
233 //
234 PetscErrorCode gridToParticles_private(DM grid_dm[], DM globSwarmArray[], const PetscInt dim, const PetscInt v_target, const PetscInt numthreads, const PetscInt num_vertices, const PetscInt global_vertex_id, Mat globMpArray[], Mat g_Mass[], Vec t_fhat[][EX30_MAX_NUM_THRDS], PetscReal moments[], Vec globXArray[], LandauCtx *ctx)
235 {
236   PetscErrorCode ierr = (PetscErrorCode)0; // used for inside thread loops
237 
238   PetscFunctionBeginUser;
239   // map back to particles
240   for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
241     PetscCall(PetscInfo(grid_dm[0], "g2p: global batch %" PetscInt_FMT " of %" PetscInt_FMT ", Landau batch %" PetscInt_FMT " of %" PetscInt_FMT ": map back to particles\n", global_vertex_id + 1, num_vertices, v_id_0 + 1, ctx->batch_sz));
242     //PetscPragmaOMP(parallel for)
243     for (int tid = 0; tid < numthreads; tid++) {
244       const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id + v_id;
245       if (glb_v_id < num_vertices) {
246         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
247           PetscErrorCode ierr_t;
248           ierr_t = PetscInfo(grid_dm[0], "gridToParticles: global batch %" PetscInt_FMT ", local batch b=%" PetscInt_FMT ", grid g=%" PetscInt_FMT ", index(b,g) %" PetscInt_FMT "\n", global_vertex_id, v_id, grid, LAND_PACK_IDX(v_id, grid));
249           ierr_t = gridToParticles(grid_dm[grid], globSwarmArray[LAND_PACK_IDX(v_id, grid)], globXArray[LAND_PACK_IDX(v_id, grid)], t_fhat[grid][tid], globMpArray[LAND_PACK_IDX(v_id, grid)], g_Mass[grid]);
250           if (ierr_t) ierr = ierr_t;
251         }
252       }
253     }
254     PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Error in OMP loop. ierr = %d", (int)ierr);
255     /* Get moments */
256     PetscCall(PetscInfo(grid_dm[0], "Cleanup batches %" PetscInt_FMT " to %" PetscInt_FMT "\n", v_id_0, v_id_0 + numthreads));
257     for (int tid = 0; tid < numthreads; tid++) {
258       const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id + v_id;
259       if (glb_v_id == v_target) {
260         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
261           PetscDataType dtype;
262           PetscReal    *wp, *coords;
263           DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
264           PetscInt      npoints, bs = 1;
265           PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here
266           PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
267           PetscCall(DMSwarmGetLocalSize(sw, &npoints));
268           for (int p = 0; p < npoints; p++) {
269             PetscReal v2 = 0, fact = (dim == 2) ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1, w = fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
270             for (int i = 0; i < dim; ++i) v2 += PetscSqr(coords[p * dim + i]);
271             moments[0] += w;
272             moments[1] += w * ctx->v_0 * coords[p * dim + 1]; // z-momentum
273             moments[2] += w * ctx->v_0 * ctx->v_0 * v2;
274           }
275           PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
276           PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
277         }
278         const PetscReal N_inv = 1 / moments[0];
279         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
280           PetscDataType dtype;
281           PetscReal    *wp, *coords;
282           DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
283           PetscInt      npoints, bs = 1;
284           PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here
285           PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
286           PetscCall(DMSwarmGetLocalSize(sw, &npoints));
287           for (int p = 0; p < npoints; p++) {
288             const PetscReal fact = dim == 2 ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1, w = fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]], ww = w * N_inv;
289             if (ww > PETSC_REAL_MIN) {
290               moments[3] -= ww * PetscLogReal(ww);
291               PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "ww (%g) > 1", (double)ww);
292             }
293           }
294           PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
295           PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
296         }
297       }
298     } // thread batch
299   }   // batch
300   PetscFunctionReturn(PETSC_SUCCESS);
301 }
302 
303 static void maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscReal shift, PetscScalar *u)
304 {
305   PetscInt  i;
306   PetscReal v2 = 0, theta = 2.0 * kt_m; /* theta = 2kT/mc^2 */
307 
308   /* compute the exponents, v^2 */
309   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
310   /* evaluate the Maxwellian */
311   u[0] = n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
312   if (shift != 0.) {
313     v2 = 0;
314     for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i];
315     v2 += (x[dim - 1] - shift) * (x[dim - 1] - shift);
316     /* evaluate the shifted Maxwellian */
317     u[0] += n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
318   }
319 }
320 
321 static PetscErrorCode PostStep(TS ts)
322 {
323   PetscInt   n, dim, nDMs;
324   PetscReal  t;
325   LandauCtx *ctx;
326   Vec        X;
327   PrintCtx  *printCtx;
328   PetscReal  moments[4];
329   DM         pack;
330 
331   PetscFunctionBeginUser;
332   PetscCall(TSGetApplicationContext(ts, &printCtx));
333   if (!printCtx->print) PetscFunctionReturn(PETSC_SUCCESS);
334 
335   for (int i = 0; i < 4; i++) moments[i] = 0;
336   ctx = printCtx->ctx;
337   PetscCall(TSGetDM(ts, &pack));
338   PetscCall(DMGetDimension(pack, &dim));
339   PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); // number of vertices * number of grids
340   PetscCall(TSGetSolution(ts, &X));
341   const PetscInt v_id = printCtx->v_target % ctx->batch_sz;
342   PetscCall(TSGetSolution(ts, &X));
343   PetscCall(TSGetStepNumber(ts, &n));
344   PetscCall(TSGetTime(ts, &t));
345   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, printCtx->globXArray));
346   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
347     PetscDataType dtype;
348     PetscReal    *wp, *coords;
349     DM            sw = printCtx->globSwarmArray[LAND_PACK_IDX(v_id, grid)];
350     Vec           work, subX = printCtx->globXArray[LAND_PACK_IDX(v_id, grid)];
351     PetscInt      bs, NN     = printCtx->nTargetP[grid];
352     // C-G moments
353     PetscCall(VecDuplicate(subX, &work));
354     PetscCall(gridToParticles(printCtx->grid_dm[grid], sw, subX, work, printCtx->globMpArray[LAND_PACK_IDX(v_id, grid)], printCtx->g_Mass[grid]));
355     PetscCall(VecDestroy(&work));
356     // moments
357     PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
358     PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // could get NN from sw - todo
359     for (int pp = 0; pp < NN; pp++) {
360       PetscReal v2 = 0, fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]], ww = w * printCtx->N_inv;
361       for (int i = 0; i < dim; ++i) v2 += PetscSqr(coords[pp * dim + i]);
362       moments[0] += w;
363       moments[1] += w * ctx->v_0 * coords[pp * dim + 1]; // z-momentum
364       moments[2] += w * ctx->v_0 * ctx->v_0 * v2;
365       if (ww > PETSC_REAL_MIN) {
366         moments[3] -= ww * PetscLogReal(ww);
367         PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "ww (%g) > 1", (double)ww);
368       }
369     }
370     PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
371     PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
372   }
373   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, printCtx->globXArray));
374   PetscCall(PetscInfo(X, "%4d) time %e, Landau moments: %18.12e %19.12e %18.12e %e\n", (int)n, (double)t, (double)moments[0], (double)moments[1], (double)moments[2], (double)moments[3]));
375   PetscFunctionReturn(PETSC_SUCCESS);
376 }
377 
378 PetscErrorCode go(TS ts, Vec X, const PetscInt num_vertices, const PetscInt a_Np, const PetscInt dim, const PetscInt v_target, const PetscInt g_target, PetscReal shift)
379 {
380   DM             pack, *globSwarmArray, grid_dm[LANDAU_MAX_GRIDS];
381   Mat           *globMpArray, g_Mass[LANDAU_MAX_GRIDS];
382   KSP            t_ksp[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
383   Vec            t_fhat[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
384   PetscInt       nDMs, nTargetP[LANDAU_MAX_GRIDS];
385   PetscErrorCode ierr = (PetscErrorCode)0; // used for inside thread loops
386 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
387   PetscInt numthreads = PetscNumOMPThreads;
388 #else
389   PetscInt numthreads = 1;
390 #endif
391   LandauCtx *ctx;
392   Vec       *globXArray;
393   PetscReal  moments_0[4], moments_1a[4], moments_1b[4], dt_init;
394   PrintCtx  *printCtx;
395 
396   PetscFunctionBeginUser;
397   PetscCheck(numthreads <= EX30_MAX_NUM_THRDS, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Too many threads %" PetscInt_FMT " > %d", numthreads, EX30_MAX_NUM_THRDS);
398   PetscCheck(numthreads > 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number threads %" PetscInt_FMT " > %d", numthreads, EX30_MAX_NUM_THRDS);
399   PetscCall(TSGetDM(ts, &pack));
400   PetscCall(DMGetApplicationContext(pack, &ctx));
401   PetscCheck(ctx->batch_sz % numthreads == 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "batch size (-dm_landau_batch_size) %" PetscInt_FMT "  mod #threads %" PetscInt_FMT " must equal zero", ctx->batch_sz, numthreads);
402   PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); // number of vertices * number of grids
403   PetscCall(PetscInfo(pack, "Have %" PetscInt_FMT " total grids, with %" PetscInt_FMT " Landau local batched and %" PetscInt_FMT " global items (vertices)\n", ctx->num_grids, ctx->batch_sz, num_vertices));
404   PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray));
405   PetscCall(PetscMalloc(sizeof(*globMpArray) * nDMs, &globMpArray));
406   PetscCall(PetscMalloc(sizeof(*globSwarmArray) * nDMs, &globSwarmArray));
407   // print ctx
408   PetscCall(PetscNew(&printCtx));
409   PetscCall(TSSetApplicationContext(ts, printCtx));
410   printCtx->v_target       = v_target;
411   printCtx->ctx            = ctx;
412   printCtx->nTargetP       = nTargetP;
413   printCtx->globSwarmArray = globSwarmArray;
414   printCtx->grid_dm        = grid_dm;
415   printCtx->globMpArray    = globMpArray;
416   printCtx->g_Mass         = g_Mass;
417   printCtx->globXArray     = globXArray;
418   // view
419   PetscCall(DMViewFromOptions(ctx->plex[g_target], NULL, "-ex30_dm_view"));
420   // create mesh mass matrices
421   PetscCall(VecZeroEntries(X));
422   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); // just to duplicate
423   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {               // add same particels for all grids
424     Vec          subX = globXArray[LAND_PACK_IDX(0, grid)];
425     DM           dm   = ctx->plex[grid];
426     PetscSection s;
427     grid_dm[grid] = dm;
428     PetscCall(DMCreateMassMatrix(dm, dm, &g_Mass[grid]));
429     //
430     PetscCall(DMGetLocalSection(dm, &s));
431     PetscCall(DMPlexCreateClosureIndex(dm, s));
432     for (int tid = 0; tid < numthreads; tid++) {
433       PetscCall(VecDuplicate(subX, &t_fhat[grid][tid]));
434       PetscCall(KSPCreate(PETSC_COMM_SELF, &t_ksp[grid][tid]));
435       PetscCall(KSPSetOptionsPrefix(t_ksp[grid][tid], "ptof_"));
436       PetscCall(KSPSetOperators(t_ksp[grid][tid], g_Mass[grid], g_Mass[grid]));
437       PetscCall(KSPSetFromOptions(t_ksp[grid][tid]));
438     }
439   }
440   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
441   for (int i = 0; i < 4; i++) moments_0[i] = moments_1a[i] = moments_1b[i] = 0;
442   PetscCall(TSGetTimeStep(ts, &dt_init)); // we could have an adaptive time stepper
443   // loop over all vertices in chucks that are batched for TSSolve
444   for (PetscInt global_vertex_id_0 = 0; global_vertex_id_0 < num_vertices; global_vertex_id_0 += ctx->batch_sz, shift /= 2) { // outer vertex loop
445     PetscCall(TSSetTime(ts, 0));
446     PetscCall(TSSetStepNumber(ts, 0));
447     PetscCall(TSSetTimeStep(ts, dt_init));
448     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
449     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
450       PetscCall(PetscObjectSetName((PetscObject)globXArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], "rho"));
451       printCtx->print = PETSC_TRUE;
452     } else printCtx->print = PETSC_FALSE;
453     // create fake particles in batches with threads
454     for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
455       PetscReal *xx_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
456       PetscInt   Np_t[LANDAU_MAX_GRIDS][EX30_MAX_NUM_THRDS];
457       // make particles
458       for (int tid = 0; tid < numthreads; tid++) {
459         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
460         if (glb_v_id < num_vertices) {                                                                                                                                            // the ragged edge (in last batch)
461           PetscInt Npp0 = a_Np + (glb_v_id % (a_Np / 10 + 1)), NN;                                                                                                                // number of particels in each dimension with add some load imbalance
462           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {                                                                                                                // add same particels for all grids
463             const PetscReal kT_m  = ctx->k * ctx->thermal_temps[ctx->species_offset[grid]] / ctx->masses[ctx->species_offset[grid]] / (ctx->v_0 * ctx->v_0);                      /* theta = 2kT/mc^2 per species */
464             PetscReal       lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3], vole; // would be nice to get box from DM
465             PetscInt        Npi = Npp0, Npj = 2 * Npp0, Npk = 1;
466             if (dim == 2) lo[0] = 0; // Landau coordinate (r,z)
467             else Npi = Npj = Npk = Npp0;
468             // User: use glb_v_id to index into your data
469             NN              = Npi * Npj * Npk; // make a regular grid of particles Npp x Npp
470             Np_t[grid][tid] = NN;
471             if (glb_v_id == v_target) nTargetP[grid] = NN;
472             PetscCall(PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid]));
473             hp[0] = (hi[0] - lo[0]) / Npi;
474             hp[1] = (hi[1] - lo[1]) / Npj;
475             hp[2] = (hi[2] - lo[2]) / Npk;
476             if (dim == 2) hp[2] = 1;
477             PetscCall(PetscInfo(pack, " lo = %14.7e, hi = %14.7e; hp = %14.7e, %14.7e; kT_m = %g; \n", (double)lo[1], (double)hi[1], (double)hp[0], (double)hp[1], (double)kT_m)); // temp
478             vole = hp[0] * hp[1] * hp[2] * ctx->n[grid];                                                                                                                           // fix for multi-species
479             PetscCall(PetscInfo(pack, "Vertex %" PetscInt_FMT ", grid %" PetscInt_FMT " with %" PetscInt_FMT " particles (diagnostic target = %" PetscInt_FMT ")\n", glb_v_id, grid, NN, v_target));
480             for (int pj = 0, pp = 0; pj < Npj; pj++) {
481               for (int pk = 0; pk < Npk; pk++) {
482                 for (int pi = 0; pi < Npi; pi++, pp++) {
483                   xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0];
484                   yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1];
485                   if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
486                   {
487                     PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]};
488                     maxwellian(dim, x, kT_m, vole, shift, &wp_t[grid][tid][pp]);
489                     // PetscCall(PetscInfo(pack,"%" PetscInt_FMT ") x = %14.7e, %14.7e, %14.7e, n = %14.7e, w = %14.7e\n", pp, x[0], x[1], dim==2 ? 0 : x[2], ctx->n[grid], wp_t[grid][tid][pp])); // temp
490                     if (glb_v_id == v_target) {
491                       PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * x[0] : 1, w = fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
492                       for (int i = 0; i < dim; ++i) v2 += PetscSqr(x[i]);
493                       moments_0[0] += w;                   // not thread safe
494                       moments_0[1] += w * ctx->v_0 * x[1]; // z-momentum
495                       moments_0[2] += w * ctx->v_0 * ctx->v_0 * v2;
496                     }
497                   }
498                 }
499               }
500             }
501           }
502           // entropy
503           if (glb_v_id == v_target) {
504             printCtx->N_inv = 1 / moments_0[0];
505             PetscCall(PetscInfo(pack, "Target %" PetscInt_FMT " with %" PetscInt_FMT " particels\n", glb_v_id, nTargetP[0]));
506             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
507               NN = nTargetP[grid];
508               for (int pp = 0; pp < NN; pp++) {
509                 const PetscReal fact = dim == 2 ? 2.0 * PETSC_PI * xx_t[grid][tid][pp] : 1, w = fact * ctx->n_0 * ctx->masses[ctx->species_offset[grid]] * wp_t[grid][tid][pp], ww = w * printCtx->N_inv;
510                 if (ww > PETSC_REAL_MIN) {
511                   moments_0[3] -= ww * PetscLogReal(ww);
512                   PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "ww (%g) > 1", (double)ww);
513                 }
514               }
515             } // diagnostics
516           }   // grid
517         }     // active
518       }       // threads
519       /* Create particle swarm */
520       for (int tid = 0; tid < numthreads; tid++) {
521         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
522         if (glb_v_id < num_vertices) {                             // the ragged edge of the last batch
523           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
524             PetscErrorCode ierr_t;
525             PetscSection   section;
526             PetscInt       Nf;
527             DM             dm = grid_dm[grid];
528             ierr_t            = DMGetLocalSection(dm, &section);
529             ierr_t            = PetscSectionGetNumFields(section, &Nf);
530             if (Nf != 1) ierr_t = (PetscErrorCode)9999;
531             else {
532               ierr_t = DMViewFromOptions(dm, NULL, "-dm_view");
533               ierr_t = PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local batch index %" PetscInt_FMT "\n", v_id, grid, LAND_PACK_IDX(v_id, grid));
534               ierr_t = createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(v_id, grid)]);
535             }
536             if (ierr_t) ierr = ierr_t;
537           }
538         } // active
539       }   // threads
540       PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only support one species per grid");
541       PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Error in OMP loop. ierr = %d", (int)ierr);
542       // make globMpArray
543       PetscPragmaOMP(parallel for)
544       for (int tid = 0; tid < numthreads; tid++) {
545         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
546         if (glb_v_id < num_vertices) {
547           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
548             PetscErrorCode ierr_t;
549             DM             sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
550             ierr_t            = PetscInfo(pack, "makeSwarm %" PetscInt_FMT ".%" PetscInt_FMT ") for batch %" PetscInt_FMT "\n", global_vertex_id_0, grid, LAND_PACK_IDX(v_id, grid));
551             ierr_t            = makeSwarm(sw, dim, Np_t[grid][tid], xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid]);
552             if (ierr_t) ierr = ierr_t;
553           }
554         }
555       }
556       //PetscPragmaOMP(parallel for)
557       for (int tid = 0; tid < numthreads; tid++) {
558         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
559         if (glb_v_id < num_vertices) {
560           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
561             PetscErrorCode ierr_t;
562             DM             dm = grid_dm[grid];
563             DM             sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
564             ierr_t            = PetscInfo(pack, "createMp %" PetscInt_FMT ".%" PetscInt_FMT ") for batch %" PetscInt_FMT "\n", global_vertex_id_0, grid, LAND_PACK_IDX(v_id, grid));
565             ierr_t            = createMp(dm, sw, &globMpArray[LAND_PACK_IDX(v_id, grid)]);
566             if (ierr_t) ierr = ierr_t;
567           }
568         }
569       }
570       PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Error in OMP loop. ierr = %d", (int)ierr);
571       // p --> g: set X
572       // PetscPragmaOMP(parallel for)
573       for (int tid = 0; tid < numthreads; tid++) {
574         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
575         if (glb_v_id < num_vertices) {
576           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
577             PetscErrorCode ierr_t;
578             DM             dm   = grid_dm[grid];
579             DM             sw   = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
580             Vec            subX = globXArray[LAND_PACK_IDX(v_id, grid)], work = t_fhat[grid][tid];
581             ierr_t = PetscInfo(pack, "particlesToGrid %" PetscInt_FMT ".%" PetscInt_FMT ") for local batch %" PetscInt_FMT "\n", global_vertex_id_0, grid, LAND_PACK_IDX(v_id, grid));
582             ierr_t = particlesToGrid(dm, sw, Np_t[grid][tid], tid, dim, wp_t[grid][tid], subX, globMpArray[LAND_PACK_IDX(v_id, grid)]);
583             if (ierr_t) ierr = ierr_t;
584             // u = M^_1 f_w
585             ierr_t = VecCopy(subX, work);
586             ierr_t = KSPSolve(t_ksp[grid][tid], work, subX);
587             if (ierr_t) ierr = ierr_t;
588           }
589         }
590       }
591       PetscCheck(!ierr, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Error in OMP loop. ierr = %d", (int)ierr);
592       /* Cleanup */
593       for (int tid = 0; tid < numthreads; tid++) {
594         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
595         if (glb_v_id < num_vertices) {
596           PetscCall(PetscInfo(pack, "Free for global batch %" PetscInt_FMT " of %" PetscInt_FMT "\n", glb_v_id + 1, num_vertices));
597           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
598             PetscCall(PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid]));
599           }
600         } // active
601       }   // threads
602     }     // (fake) particle loop
603     // standard view of initial conditions
604     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
605       PetscCall(DMSetOutputSequenceNumber(ctx->plex[g_target], 0, 0.0));
606       PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], NULL, "-ex30_vec_view"));
607     }
608     // coarse graining moments
609     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
610       const PetscInt v_id = v_target % ctx->batch_sz;
611       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
612         PetscDataType dtype;
613         PetscReal    *wp, *coords;
614         DM            sw = globSwarmArray[LAND_PACK_IDX(v_id, grid)];
615         Vec           work, subX = globXArray[LAND_PACK_IDX(v_id, grid)];
616         PetscInt      bs, NN     = nTargetP[grid];
617         // C-G moments
618         PetscCall(VecDuplicate(subX, &work));
619         PetscCall(gridToParticles(grid_dm[grid], sw, subX, work, globMpArray[LAND_PACK_IDX(v_id, grid)], g_Mass[grid]));
620         PetscCall(VecDestroy(&work));
621         // moments
622         PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
623         PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // could get NN from sw - todo
624         for (int pp = 0; pp < NN; pp++) {
625           PetscReal v2 = 0, fact = (dim == 2) ? 2.0 * PETSC_PI * coords[pp * dim + 0] : 1, w = fact * wp[pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]], ww = w * printCtx->N_inv;
626           for (int i = 0; i < dim; ++i) v2 += PetscSqr(coords[pp * dim + i]);
627           moments_1a[0] += w;
628           moments_1a[1] += w * ctx->v_0 * coords[pp * dim + 1]; // z-momentum
629           moments_1a[2] += w * ctx->v_0 * ctx->v_0 * v2;
630           if (ww > PETSC_REAL_MIN) {
631             moments_1a[3] -= ww * PetscLogReal(ww);
632             PetscCheck(ww < 1 - PETSC_MACHINE_EPSILON, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "ww (%g) > 1", (double)ww);
633           }
634         }
635         PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords));
636         PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp));
637       }
638     }
639     // restore vector
640     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
641     // view
642     PetscCall(DMPlexLandauPrintNorms(X, 0));
643     // advance
644     PetscCall(TSSetSolution(ts, X));
645     PetscCall(PetscInfo(pack, "Advance vertex %" PetscInt_FMT " to %" PetscInt_FMT " (with padding)\n", global_vertex_id_0, global_vertex_id_0 + ctx->batch_sz));
646     PetscCall(TSSetPostStep(ts, PostStep));
647     PetscCall(PostStep(ts));
648     PetscCall(TSSolve(ts, X));
649     // view
650     PetscCall(DMPlexLandauPrintNorms(X, 1));
651     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
652     if (v_target >= global_vertex_id_0 && v_target < global_vertex_id_0 + ctx->batch_sz) {
653       PetscCall(DMSetOutputSequenceNumber(ctx->plex[g_target], 1, dt_init));
654       PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(v_target % ctx->batch_sz, g_target)], NULL, "-ex30_vec_view"));
655     }
656     // particles to grid, compute moments and entropy
657     PetscCall(gridToParticles_private(grid_dm, globSwarmArray, dim, v_target, numthreads, num_vertices, global_vertex_id_0, globMpArray, g_Mass, t_fhat, moments_1b, globXArray, ctx));
658     // restore vector
659     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
660     // cleanup
661     for (PetscInt v_id_0 = 0; v_id_0 < ctx->batch_sz; v_id_0 += numthreads) {
662       for (int tid = 0; tid < numthreads; tid++) {
663         const PetscInt v_id = v_id_0 + tid, glb_v_id = global_vertex_id_0 + v_id;
664         if (glb_v_id < num_vertices) {
665           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
666             PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(v_id, grid)]));
667             PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(v_id, grid)]));
668           }
669         }
670       }
671     }
672   } // user batch
673   /* Cleanup */
674   PetscCall(PetscFree(globXArray));
675   PetscCall(PetscFree(globSwarmArray));
676   PetscCall(PetscFree(globMpArray));
677   PetscCall(PetscFree(printCtx));
678   // clean up mass matrices
679   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
680     PetscCall(MatDestroy(&g_Mass[grid]));
681     for (int tid = 0; tid < numthreads; tid++) {
682       PetscCall(VecDestroy(&t_fhat[grid][tid]));
683       PetscCall(KSPDestroy(&t_ksp[grid][tid]));
684     }
685   }
686   PetscCall(PetscInfo(X, "Moments:\t         number density      x-momentum          energy             entropy : # OMP threads %g\n", (double)numthreads));
687   PetscCall(PetscInfo(X, "\tInitial:         %18.12e %19.12e %18.12e %e\n", (double)moments_0[0], (double)moments_0[1], (double)moments_0[2], (double)moments_0[3]));
688   PetscCall(PetscInfo(X, "\tCoarse-graining: %18.12e %19.12e %18.12e %e\n", (double)moments_1a[0], (double)moments_1a[1], (double)moments_1a[2], (double)moments_1a[3]));
689   PetscCall(PetscInfo(X, "\tLandau:          %18.12e %19.12e %18.12e %e\n", (double)moments_1b[0], (double)moments_1b[1], (double)moments_1b[2], (double)moments_1b[3]));
690   PetscCall(PetscInfo(X, "Coarse-graining entropy generation = %e ; Landau entropy generation = %e\n", (double)(moments_1a[3] - moments_0[3]), (double)(moments_1b[3] - moments_0[3])));
691   PetscCall(PetscInfo(X, "(relative) energy conservation: Coarse-graining = %e ; Landau = %e\n", (double)(moments_1a[2] - moments_0[2]) / (double)moments_0[2], (double)(moments_1b[2] - moments_0[2]) / (double)moments_0[2]));
692 
693   PetscFunctionReturn(PETSC_SUCCESS);
694 }
695 
696 int main(int argc, char **argv)
697 {
698   DM         pack;
699   Vec        X;
700   PetscInt   dim = 2, num_vertices = 1, Np = 10, v_target = 0, gtarget = 0;
701   TS         ts;
702   Mat        J;
703   LandauCtx *ctx;
704   PetscReal  shift = 0;
705 
706   PetscFunctionBeginUser;
707   PetscCall(PetscInitialize(&argc, &argv, NULL, help));
708   // process args
709   PetscOptionsBegin(PETSC_COMM_SELF, "", "Collision Options", "DMPLEX");
710   PetscCall(PetscOptionsInt("-number_spatial_vertices", "Number of user spatial vertices to be batched for Landau", "ex30.c", num_vertices, &num_vertices, NULL));
711   PetscCall(PetscOptionsInt("-dim", "Velocity space dimension", "ex30.c", dim, &dim, NULL));
712   PetscCall(PetscOptionsInt("-number_particles_per_dimension", "Number of particles per grid, with slight modification per spatial vertex, in each dimension of base Cartesian grid", "ex30.c", Np, &Np, NULL));
713   PetscCall(PetscOptionsInt("-vertex_view_target", "Vertex to view with diagnostics", "ex30.c", v_target, &v_target, NULL));
714   PetscCall(PetscOptionsReal("-e_shift", "Bim-Maxwellian shift", "ex30.c", shift, &shift, NULL));
715   PetscCheck(v_target < num_vertices, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Batch to view %" PetscInt_FMT " should be < number of vertices %" PetscInt_FMT, v_target, num_vertices);
716   PetscCall(PetscOptionsInt("-grid_view_target", "Grid to view with diagnostics", "ex30.c", gtarget, &gtarget, NULL));
717   PetscOptionsEnd();
718   /* Create a mesh */
719   PetscCall(DMPlexLandauCreateVelocitySpace(PETSC_COMM_SELF, dim, "", &X, &J, &pack));
720   PetscCall(DMSetUp(pack));
721   PetscCall(DMSetOutputSequenceNumber(pack, 0, 0.0));
722   PetscCall(DMGetApplicationContext(pack, &ctx));
723   PetscCheck(gtarget < ctx->num_grids, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Grid to view %" PetscInt_FMT " should be < number of grids %" PetscInt_FMT, gtarget, ctx->num_grids);
724   PetscCheck(num_vertices >= ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number of vertices %" PetscInt_FMT " should be <= batch size %" PetscInt_FMT, num_vertices, ctx->batch_sz);
725   /* Create timestepping solver context */
726   PetscCall(TSCreate(PETSC_COMM_SELF, &ts));
727   PetscCall(TSSetDM(ts, pack));
728   PetscCall(TSSetIFunction(ts, NULL, DMPlexLandauIFunction, NULL));
729   PetscCall(TSSetIJacobian(ts, J, J, DMPlexLandauIJacobian, NULL));
730   PetscCall(TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER));
731   PetscCall(TSSetFromOptions(ts));
732   PetscCall(PetscObjectSetName((PetscObject)X, "X"));
733   // do particle advance
734   PetscCall(go(ts, X, num_vertices, Np, dim, v_target, gtarget, shift));
735   PetscCall(MatZeroEntries(J)); // need to zero out so as to not reuse it in Landau's logic
736   /* clean up */
737   PetscCall(DMPlexLandauDestroyVelocitySpace(&pack));
738   PetscCall(TSDestroy(&ts));
739   PetscCall(VecDestroy(&X));
740   PetscCall(PetscFinalize());
741   return 0;
742 }
743 
744 /*TEST
745 
746   build:
747     requires: !complex
748 
749   testset:
750     requires: double defined(PETSC_USE_DMLANDAU_2D)
751     output_file: output/ex30_0.out
752     args: -dim 2 -petscspace_degree 3 -dm_landau_num_species_grid 1,1,1 -dm_refine 1 -number_particles_per_dimension 10 -dm_plex_hash_location \
753           -dm_landau_batch_size 4 -number_spatial_vertices 5 -dm_landau_batch_view_idx 1 -vertex_view_target 2 -grid_view_target 1 \
754           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
755           -ftop_ksp_rtol 1e-10 -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_factor_shift_type nonzero -ftop_sub_pc_type lu \
756           -ksp_type preonly -pc_type lu -dm_landau_verbose 4 \
757           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_rtol 1e-12\
758           -snes_converged_reason -snes_monitor -snes_rtol 1e-14 -snes_stol 1e-14\
759           -ts_dt 0.01 -ts_rtol 1e-1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler
760 
761     test:
762       suffix: cpu
763       args: -dm_landau_device_type cpu
764     test:
765       suffix: kokkos
766       requires: kokkos_kernels !openmp
767       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos -pc_type bjkokkos -pc_bjkokkos_ksp_type tfqmr -pc_bjkokkos_pc_type jacobi
768 
769   testset:
770     requires: double !defined(PETSC_USE_DMLANDAU_2D)
771     output_file: output/ex30_3d.out
772     args: -dim 3 -petscspace_degree 2 -dm_landau_num_species_grid 1,1,1 -dm_refine 0 -number_particles_per_dimension 5 -dm_plex_hash_location \
773           -dm_landau_batch_size 1 -number_spatial_vertices 1 -dm_landau_batch_view_idx 0 -vertex_view_target 0 -grid_view_target 0 \
774           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
775           -ftop_ksp_rtol 1e-12 -ftop_ksp_type cg -ftop_pc_type jacobi \
776           -ksp_type preonly -pc_type lu \
777           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_rtol 1e-12\
778           -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12\
779           -ts_dt 0.1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler
780 
781     test:
782       suffix: cpu_3d
783       args: -dm_landau_device_type cpu
784     test:
785       suffix: kokkos_3d
786       requires: kokkos_kernels !openmp
787       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos -pc_type bjkokkos -pc_bjkokkos_ksp_type tfqmr -pc_bjkokkos_pc_type jacobi
788 
789   testset:
790     requires: !complex double defined(PETSC_USE_DMLANDAU_2D) !cuda
791     args: -dm_landau_domain_radius 6 -dm_refine 2 -dm_landau_num_species_grid 1 -dm_landau_thermal_temps 1 -petscspace_degree 3 -snes_converged_reason -ts_type beuler -ts_dt 1 -ts_max_steps 1 -ksp_type preonly -pc_type lu -snes_rtol 1e-12 -snes_stol 1e-12 -dm_landau_device_type cpu -number_particles_per_dimension 30 -e_shift 3 -ftop_ksp_rtol 1e-12 -ptof_ksp_rtol 1e-12 -dm_landau_batch_size 4 -number_spatial_vertices 4 -grid_view_target 0 -vertex_view_target 1
792     test:
793       suffix: simple
794       args: -ex30_dm_view
795     test:
796       requires: hdf5
797       suffix: simple_hdf5
798       args: -ex30_dm_view hdf5:sol_e.h5 -ex30_vec_view hdf5:sol_e.h5::append
799 
800 TEST*/
801