xref: /petsc/src/ts/tests/ex30.c (revision 8fb5bd83c3955fefcf33a54e3bb66920a9fa884b)
1 static char help[] = "Grid based Landau collision operator with PIC interface with OpenMP setup. (one species per grid)\n";
2 
3 /*
4    Support 2.5V with axisymmetric coordinates
5      - r,z coordinates
6      - Domain and species data input by Landau operator
7      - "radius" for each grid, normalized with electron thermal velocity
8      - Domain: (0,radius) x (-radius,radius), thus first coordinate x[0] is perpendicular velocity and 2pi*x[0] term is added for axisymmetric
9    Supports full 3V
10 
11  */
12 
13 #include "petscdmplex.h"
14 #include "petscds.h"
15 #include "petscdmswarm.h"
16 #include "petscksp.h"
17 #include <petsc/private/petscimpl.h>
18 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
19 #include <omp.h>
20 #endif
21 #include <petsclandau.h>
22 #include <petscdmcomposite.h>
23 
24 typedef struct {
25   Mat MpTrans;
26   Mat Mp;
27   Vec ff;
28   Vec uu;
29 } MatShellCtx;
30 
31 PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM,Vec xx,Vec yy)
32 {
33   MatShellCtx    *matshellctx;
34 
35   PetscFunctionBeginUser;
36   PetscCall(MatShellGetContext(MtM,&matshellctx));
37   PetscCheck(matshellctx,PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
38   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
39   PetscCall(MatMult(matshellctx->MpTrans, matshellctx->ff, yy));
40   PetscFunctionReturn(0);
41 }
42 
43 PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM,Vec xx, Vec yy, Vec zz)
44 {
45   MatShellCtx    *matshellctx;
46 
47   PetscFunctionBeginUser;
48   PetscCall(MatShellGetContext(MtM,&matshellctx));
49   PetscCheck(matshellctx,PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context");
50   PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff));
51   PetscCall(MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz));
52   PetscFunctionReturn(0);
53 }
54 
55 PetscErrorCode createSwarm(const DM dm, PetscInt dim, DM *sw)
56 {
57   PetscInt       Nc = 1;
58 
59   PetscFunctionBeginUser;
60   PetscCall(DMCreate(PETSC_COMM_SELF, sw));
61   PetscCall(DMSetType(*sw, DMSWARM));
62   PetscCall(DMSetDimension(*sw, dim));
63   PetscCall(DMSwarmSetType(*sw, DMSWARM_PIC));
64   PetscCall(DMSwarmSetCellDM(*sw, dm));
65   PetscCall(DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_SCALAR));
66   PetscCall(DMSwarmFinalizeFieldRegister(*sw));
67   PetscCall(DMSetFromOptions(*sw));
68   PetscFunctionReturn(0);
69 }
70 
71 PetscErrorCode gridToParticles(const DM dm, DM sw, Vec rhs, Vec work, Mat M_p, Mat Mass)
72 {
73   PetscBool      is_lsqr;
74   KSP            ksp;
75   Mat            PM_p=NULL,MtM,D;
76   Vec            ff;
77   PetscInt       N, M, nzl;
78   MatShellCtx    *matshellctx;
79 
80   PetscFunctionBeginUser;
81   PetscCall(MatMult(Mass, rhs, work));
82   PetscCall(VecCopy(work, rhs));
83   // pseudo-inverse
84   PetscCall(KSPCreate(PETSC_COMM_SELF, &ksp));
85   PetscCall(KSPSetOptionsPrefix(ksp, "ftop_"));
86   PetscCall(KSPSetFromOptions(ksp));
87   PetscCall(PetscObjectTypeCompare((PetscObject)ksp,KSPLSQR,&is_lsqr));
88   if (!is_lsqr) {
89     PetscCall(MatGetLocalSize(M_p, &M, &N));
90     if (N>M) {
91       PC        pc;
92       PetscCall(PetscInfo(ksp, " M (%" PetscInt_FMT ") < M (%" PetscInt_FMT ") -- skip revert to lsqr\n",M,N));
93       is_lsqr = PETSC_TRUE;
94       PetscCall(KSPSetType(ksp,KSPLSQR));
95       PetscCall(KSPGetPC(ksp,&pc));
96       PetscCall(PCSetType(pc,PCNONE)); // could put in better solver -ftop_pc_type bjacobi -ftop_sub_pc_type lu -ftop_sub_pc_factor_shift_type nonzero
97     } else {
98       PetscCall(PetscNew(&matshellctx));
99       PetscCall(MatCreateShell(PetscObjectComm((PetscObject)dm),N,N,PETSC_DECIDE,PETSC_DECIDE,matshellctx,&MtM));
100       PetscCall(MatTranspose(M_p,MAT_INITIAL_MATRIX,&matshellctx->MpTrans));
101       matshellctx->Mp = M_p;
102       PetscCall(MatShellSetOperation(MtM, MATOP_MULT, (void (*)(void))MatMultMtM_SeqAIJ));
103       PetscCall(MatShellSetOperation(MtM, MATOP_MULT_ADD, (void (*)(void))MatMultAddMtM_SeqAIJ));
104       PetscCall(MatCreateVecs(M_p,&matshellctx->uu,&matshellctx->ff));
105       PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF,N,N,1,NULL,&D));
106       PetscCall(MatViewFromOptions(matshellctx->MpTrans,NULL,"-ftop2_Mp_mat_view"));
107       for (int i=0 ; i<N ; i++) {
108         const PetscScalar *vals;
109         const PetscInt    *cols;
110         PetscScalar dot = 0;
111         PetscCall(MatGetRow(matshellctx->MpTrans,i,&nzl,&cols,&vals));
112         for (int ii=0 ; ii<nzl ; ii++) dot += PetscSqr(vals[ii]);
113         PetscCheck(dot!=0.0,PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Row %d is empty", i);
114         PetscCall(MatSetValue(D,i,i,dot,INSERT_VALUES));
115       }
116       PetscCall(MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY));
117       PetscCall(MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY));
118       PetscCall(PetscInfo(M_p,"createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n",N,nzl));
119       PetscCall(KSPSetOperators(ksp, MtM, D));
120       PetscCall(MatViewFromOptions(D,NULL,"-ftop2_D_mat_view"));
121       PetscCall(MatViewFromOptions(M_p,NULL,"-ftop2_Mp_mat_view"));
122       PetscCall(MatViewFromOptions(matshellctx->MpTrans,NULL,"-ftop2_MpTranspose_mat_view"));
123     }
124   }
125   if (is_lsqr) {
126     PC        pc;
127     PetscBool is_bjac;
128     PetscCall(KSPGetPC(ksp,&pc));
129     PetscCall(PetscObjectTypeCompare((PetscObject)pc,PCBJACOBI,&is_bjac));
130     if (is_bjac) {
131       PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p));
132       PetscCall(KSPSetOperators(ksp, M_p, PM_p));
133     } else {
134       PetscCall(KSPSetOperators(ksp, M_p, M_p));
135     }
136   }
137   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access
138   if (!is_lsqr) {
139     PetscCall(KSPSolve(ksp, rhs, matshellctx->uu));
140     PetscCall(MatMult(M_p, matshellctx->uu, ff));
141     PetscCall(MatDestroy(&matshellctx->MpTrans));
142     PetscCall(VecDestroy(&matshellctx->ff));
143     PetscCall(VecDestroy(&matshellctx->uu));
144     PetscCall(MatDestroy(&D));
145     PetscCall(MatDestroy(&MtM));
146     PetscCall(PetscFree(matshellctx));
147   } else {
148     PetscCall(KSPSolveTranspose(ksp, rhs, ff));
149   }
150   PetscCall(KSPDestroy(&ksp));
151   /* Visualize particle field */
152   PetscCall(VecViewFromOptions(ff, NULL, "-weights_view"));
153   PetscCall(MatDestroy(&PM_p));
154   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
155 
156   PetscFunctionReturn(0);
157 }
158 
159 PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt Np, const PetscInt a_tid, const PetscInt dim,
160                                const PetscReal xx[], const PetscReal yy[], const PetscReal zz[], const PetscReal a_wp[], Vec rho, Mat *Mp_out)
161 {
162 
163   PetscBool      removePoints = PETSC_TRUE;
164   PetscReal      *wq, *coords;
165   PetscDataType  dtype;
166   Mat            M_p;
167   Vec            ff;
168   PetscInt       bs,p,zero=0;
169 
170   PetscFunctionBeginUser;
171   PetscCall(DMSwarmSetLocalSizes(sw, Np, zero));
172   PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void**)&wq));
173   PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void**)&coords));
174   for (p=0;p<Np;p++) {
175     coords[p*dim+0]  = xx[p];
176     coords[p*dim+1]  = yy[p];
177     wq[p]          = a_wp[p];
178     if (dim==3) coords[p*dim+2]  = zz[p];
179   }
180   PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void**)&coords));
181   PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void**)&wq));
182   PetscCall(DMSwarmMigrate(sw, removePoints));
183   PetscCall(PetscObjectSetName((PetscObject)sw, "Particle Grid"));
184 
185   /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */
186   PetscCall(DMCreateMassMatrix(sw, dm, &M_p));
187 
188   PetscCall(PetscObjectSetName((PetscObject)rho, "rho"));
189   PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff));
190   PetscCall(PetscObjectSetName((PetscObject)ff, "weights"));
191   PetscCall(MatMultTranspose(M_p, ff, rho));
192   PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff));
193 
194   // output
195   *Mp_out = M_p;
196 
197   PetscFunctionReturn(0);
198 }
199 static void maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscScalar *u)
200 {
201   PetscInt      i;
202   PetscReal     v2 = 0, theta = 2.0*kt_m; /* theta = 2kT/mc^2 */
203 
204   /* compute the exponents, v^2 */
205   for (i = 0; i < dim; ++i) v2 += x[i]*x[i];
206   /* evaluate the Maxwellian */
207   u[0] = n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
208 }
209 
210 #define MAX_NUM_THRDS 12
211 PetscErrorCode go(TS ts, Vec X, const PetscInt NUserV, const PetscInt a_Np, const PetscInt dim, const PetscInt b_target, const PetscInt g_target)
212 {
213   DM              pack, *globSwarmArray, grid_dm[LANDAU_MAX_GRIDS];
214   Mat             *globMpArray, g_Mass[LANDAU_MAX_GRIDS];
215   KSP             t_ksp[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
216   Vec             t_fhat[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
217   PetscInt        nDMs, glb_b_id, nTargetP=0;
218   PetscErrorCode  ierr = 0;
219 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
220   PetscInt        numthreads = PetscNumOMPThreads;
221 #else
222   PetscInt        numthreads = 1;
223 #endif
224   LandauCtx      *ctx;
225   Vec            *globXArray;
226   PetscReal       moments_0[3], moments_1[3], dt_init;
227 
228   PetscFunctionBeginUser;
229   PetscCheck(numthreads<=MAX_NUM_THRDS,PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Too many threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS);
230   PetscCheck(numthreads>0,PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number threads %" PetscInt_FMT " > %d", numthreads,  MAX_NUM_THRDS);
231   PetscCall(TSGetDM(ts,&pack));
232   PetscCall(DMGetApplicationContext(pack, &ctx));
233   PetscCheck(ctx->batch_sz%numthreads==0,PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "batch size (-dm_landau_batch_size) %" PetscInt_FMT "  mod #threads %" PetscInt_FMT " must equal zero", ctx->batch_sz, numthreads);
234   PetscCall(DMCompositeGetNumberDM(pack,&nDMs));
235   PetscCall(PetscInfo(pack,"Have %" PetscInt_FMT " total grids, with %" PetscInt_FMT " Landau local batched and %" PetscInt_FMT " global items (vertices)\n",ctx->num_grids,ctx->batch_sz,NUserV));
236   PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray));
237   PetscCall(PetscMalloc(sizeof(*globMpArray)*nDMs, &globMpArray));
238   PetscCall(PetscMalloc(sizeof(*globSwarmArray)*nDMs, &globSwarmArray));
239   PetscCall(DMViewFromOptions(ctx->plex[g_target],NULL,"-ex30_dm_view"));
240   // create mass matrices
241   PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); // just to duplicate
242   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { // add same particels for all grids
243     Vec  subX = globXArray[LAND_PACK_IDX(0,grid)];
244     DM   dm = ctx->plex[grid];
245     PetscSection s;
246     grid_dm[grid] = dm;
247     PetscCall(DMCreateMassMatrix(dm,dm, &g_Mass[grid]));
248     //
249     PetscCall(DMGetLocalSection(dm, &s));
250     PetscCall(DMPlexCreateClosureIndex(dm, s));
251     for (int tid=0; tid<numthreads; tid++) {
252       PetscCall(VecDuplicate(subX,&t_fhat[grid][tid]));
253       PetscCall(KSPCreate(PETSC_COMM_SELF, &t_ksp[grid][tid]));
254       PetscCall(KSPSetOptionsPrefix(t_ksp[grid][tid], "ptof_"));
255       PetscCall(KSPSetOperators(t_ksp[grid][tid], g_Mass[grid], g_Mass[grid]));
256       PetscCall(KSPSetFromOptions(t_ksp[grid][tid]));
257     }
258   }
259   PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
260   // create particle raw data. could use OMP with a thread safe malloc, but this is just the fake user
261   for (int i=0;i<3;i++) moments_0[i] = moments_1[i] = 0;
262   PetscCall(TSGetTimeStep(ts,&dt_init)); // we could have an adaptive time stepper
263   for (PetscInt global_batch_id=0 ; global_batch_id < NUserV ; global_batch_id += ctx->batch_sz) {
264     ierr = TSSetTime(ts, 0);CHKERRQ(ierr);
265     ierr = TSSetStepNumber(ts, 0);CHKERRQ(ierr);
266     ierr = TSSetTimeStep(ts, dt_init);CHKERRQ(ierr);
267     PetscCall(VecZeroEntries(X));
268     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
269     if (b_target >= global_batch_id && b_target < global_batch_id+ctx->batch_sz) {
270       PetscCall(PetscObjectSetName((PetscObject)globXArray[LAND_PACK_IDX(b_target%ctx->batch_sz,g_target)], "rho"));
271     }
272     // create fake particles
273     for (PetscInt b_id_0 = 0 ; b_id_0 < ctx->batch_sz ; b_id_0 += numthreads) {
274       PetscReal *xx_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
275       PetscInt  Np_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
276       // make particles
277       for (int tid=0; tid<numthreads; tid++) {
278         const PetscInt b_id = b_id_0 + tid;
279         if ((glb_b_id = global_batch_id + b_id) < NUserV) { // the ragged edge of the last batch
280           PetscInt Npp0 = a_Np + (glb_b_id%a_Np), NN; // fake user: number of particels in each dimension with add some load imbalance and diff (<2x)
281           for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { // add same particels for all grids
282             const PetscReal kT_m = ctx->k*ctx->thermal_temps[ctx->species_offset[grid]]/ctx->masses[ctx->species_offset[grid]]/(ctx->v_0*ctx->v_0); /* theta = 2kT/mc^2 per species -- TODO */;
283             PetscReal       lo[3] = {-ctx->radius[grid],-ctx->radius[grid],-ctx->radius[grid]}, hi[3] = {ctx->radius[grid],ctx->radius[grid],ctx->radius[grid]}, hp[3], vole; // would be nice to get box from DM
284             PetscInt  Npi=Npp0,Npj=2*Npp0,Npk=1;
285             if (dim==2) lo[0] = 0; // Landau coordinate (r,z)
286             else Npi = Npj = Npk = Npp0;
287             // User: use glb_b_id to index into your data
288             NN = Npi*Npj*Npk; // make a regular grid of particles Npp x Npp
289             if (glb_b_id==b_target) {
290               nTargetP = NN;
291               PetscCall(PetscInfo(pack,"Target %" PetscInt_FMT " with %" PetscInt_FMT " particels\n",glb_b_id,NN));
292             }
293             Np_t[grid][tid] = NN;
294             PetscCall(PetscMalloc4(NN,&xx_t[grid][tid],NN,&yy_t[grid][tid],NN,&wp_t[grid][tid], dim==2 ? 1 : NN, &zz_t[grid][tid]));
295             hp[0] = (hi[0] - lo[0])/Npi;
296             hp[1] = (hi[1] - lo[1])/Npj;
297             hp[2] = (hi[2] - lo[2])/Npk;
298             if (dim==2) hp[2] = 1;
299             PetscCall(PetscInfo(pack," lo = %14.7e, hi = %14.7e; hp = %14.7e, %14.7e; kT_m = %g; \n",(double)lo[1], (double)hi[1], (double)hp[0], (double)hp[1], (double)kT_m)); // temp
300             vole = hp[0]*hp[1]*hp[2]*ctx->n[grid]; // fix for multi-species
301             PetscCall(PetscInfo(pack,"Vertex %" PetscInt_FMT ", grid %" PetscInt_FMT " with %" PetscInt_FMT " particles (diagnostic target = %" PetscInt_FMT ")\n",glb_b_id,grid,NN,b_target));
302             for (int pj=0, pp=0 ; pj < Npj ; pj++) {
303               for (int pk=0 ; pk < Npk ; pk++) {
304                 for (int pi=0 ; pi < Npi ; pi++, pp++) {
305                   xx_t[grid][tid][pp] = lo[0] + hp[0]/2.0 + pi*hp[0];
306                   yy_t[grid][tid][pp] = lo[1] + hp[1]/2.0 + pj*hp[1];
307                   if (dim==3) zz_t[grid][tid][pp] = lo[2] + hp[2]/2.0 + pk*hp[2];
308                   {
309                     PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim==2 ? 0 : zz_t[grid][tid][pp]};
310                     maxwellian(dim, x, kT_m, vole, &wp_t[grid][tid][pp]);
311                     //PetscCall(PetscInfo(pack,"%" PetscInt_FMT ") x = %14.7e, %14.7e, %14.7e, n = %14.7e, w = %14.7e\n", pp, x[0], x[1], dim==2 ? 0 : x[2], ctx->n[grid], wp_t[grid][tid][pp])); // temp
312                     if (glb_b_id==b_target) {
313                       PetscReal v2=0, fact = dim==2 ? 2.0*PETSC_PI*x[0] : 1;
314                       for (int i = 0; i < dim; ++i) v2 += PetscSqr(x[i]);
315                       moments_0[0] += fact*wp_t[grid][tid][pp]*ctx->n_0                  *ctx->masses[ctx->species_offset[grid]];
316                       moments_0[1] += fact*wp_t[grid][tid][pp]*ctx->n_0*ctx->v_0         *ctx->masses[ctx->species_offset[grid]] * x[1]; // z-momentum
317                       moments_0[2] += fact*wp_t[grid][tid][pp]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ctx->species_offset[grid]] * v2;
318                     }
319                   }
320                 }
321               }
322             }
323           } // grid
324         } // active
325       } // fake threads
326       /* Create particle swarm */
327       PetscPragmaOMP(parallel for)
328         for (int tid=0; tid<numthreads; tid++) {
329           const PetscInt b_id = b_id_0 + tid;
330           if ((glb_b_id = global_batch_id + b_id) < NUserV) { // the ragged edge of the last batch
331             //PetscCall(PetscInfo(pack,"Create swarms for 'glob' index %" PetscInt_FMT " create swarm\n",glb_b_id));
332             for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { // add same particels for all grids
333               PetscErrorCode  ierr_t;
334               PetscSection    section;
335               PetscInt        Nf;
336               DM              dm = grid_dm[grid];
337               ierr_t = DMGetLocalSection(dm, &section);
338               ierr_t = PetscSectionGetNumFields(section, &Nf);
339               if (Nf != 1) ierr_t = 9999;
340               else {
341                 ierr_t = DMViewFromOptions(dm,NULL,"-dm_view");
342                 ierr_t = PetscInfo(pack,"call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local batch index %" PetscInt_FMT "\n",b_id,grid, LAND_PACK_IDX(b_id,grid));
343                 ierr_t = createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(b_id,grid)]);
344               }
345               if (ierr_t) ierr = ierr_t;
346             }
347           } // active
348         }
349       PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only support one species per grid");
350       PetscCall(ierr);
351       // p --> g: make globMpArray & set X
352       PetscPragmaOMP(parallel for)
353         for (int tid=0; tid<numthreads; tid++) {
354           const PetscInt b_id = b_id_0 + tid;
355           if ((glb_b_id = global_batch_id + b_id) < NUserV) {
356             for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { // add same particels for all grids
357               PetscErrorCode ierr_t;
358               DM             dm = grid_dm[grid];
359               DM             sw = globSwarmArray[LAND_PACK_IDX(b_id,grid)];
360               Vec            subX = globXArray[LAND_PACK_IDX(b_id,grid)], work = t_fhat[grid][tid];
361               PetscInfo(pack,"particlesToGrid %" PetscInt_FMT ".%" PetscInt_FMT ") particlesToGrid for local batch %" PetscInt_FMT "\n",global_batch_id,grid,LAND_PACK_IDX(b_id,grid));
362               ierr_t = particlesToGrid(dm, sw, Np_t[grid][tid], tid, dim, xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid], wp_t[grid][tid], subX, &globMpArray[LAND_PACK_IDX(b_id,grid)]);
363               if (ierr_t) ierr = ierr_t;
364               // u = M^_1 f_w
365               ierr_t = VecCopy(subX, work);
366               ierr_t = KSPSolve(t_ksp[grid][tid], work, subX);
367               if (ierr_t) ierr = ierr_t;
368             }
369           }
370         }
371       PetscCall(ierr);
372       /* Cleanup */
373       for (int tid=0; tid<numthreads; tid++) {
374         const PetscInt b_id = b_id_0 + tid;
375         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
376           PetscCall(PetscInfo(pack,"Free for global batch %" PetscInt_FMT " of %" PetscInt_FMT "\n",glb_b_id+1,NUserV));
377           for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { // add same particels for all grids
378             PetscCall(PetscFree4(xx_t[grid][tid],yy_t[grid][tid],wp_t[grid][tid],zz_t[grid][tid]));
379           }
380         } // active
381       }
382     } // Landau
383     if (b_target >= global_batch_id && b_target < global_batch_id+ctx->batch_sz) {
384       PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(b_target%ctx->batch_sz,g_target)],NULL,"-ex30_vec_view"));
385     }
386     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
387     PetscCall(DMPlexLandauPrintNorms(X,0));
388     // advance
389     PetscCall(TSSetSolution(ts,X));
390     PetscCall(PetscInfo(pack,"Advance vertex %" PetscInt_FMT " to %" PetscInt_FMT " (with padding)\n",global_batch_id, global_batch_id+ctx->batch_sz));
391     PetscCall(TSSolve(ts,X));
392     PetscCall(DMPlexLandauPrintNorms(X,1));
393     PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray));
394     // map back to particles
395     for (PetscInt b_id_0 = 0 ; b_id_0 < ctx->batch_sz ; b_id_0 += numthreads) {
396       PetscCall(PetscInfo(pack,"g2p: global batch %" PetscInt_FMT " of %" PetscInt_FMT ", Landau batch %" PetscInt_FMT " of %" PetscInt_FMT ": map back to particles\n",global_batch_id+1,NUserV,b_id_0+1,ctx->batch_sz));
397       PetscPragmaOMP(parallel for)
398         for (int tid=0; tid<numthreads; tid++) {
399           const PetscInt b_id = b_id_0 + tid;
400           if ((glb_b_id = global_batch_id + b_id) < NUserV) {
401             for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { // add same particels for all grids
402               PetscErrorCode  ierr_t;
403               PetscInfo(pack,"gridToParticles: global batch %" PetscInt_FMT ", local batch b=%" PetscInt_FMT ", grid g=%" PetscInt_FMT ", index(b,g) %" PetscInt_FMT "\n",global_batch_id,b_id,grid,LAND_PACK_IDX(b_id,grid));
404               ierr_t = gridToParticles(grid_dm[grid], globSwarmArray[LAND_PACK_IDX(b_id,grid)], globXArray[LAND_PACK_IDX(b_id,grid)], t_fhat[grid][tid], globMpArray[LAND_PACK_IDX(b_id,grid)], g_Mass[grid]);
405               if (ierr_t) ierr = ierr_t;
406             }
407           }
408         }
409       PetscCall(ierr);
410       /* Cleanup, and get data */
411       PetscCall(PetscInfo(pack,"Cleanup batches %" PetscInt_FMT " to %" PetscInt_FMT "\n",b_id_0,b_id_0+numthreads));
412       for (int tid=0; tid<numthreads; tid++) {
413         const PetscInt b_id = b_id_0 + tid;
414         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
415           for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
416             PetscDataType dtype;
417             PetscReal     *wp,*coords;
418             DM            sw = globSwarmArray[LAND_PACK_IDX(b_id,grid)];
419             PetscInt      npoints,bs=1;
420             PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void**)&wp)); // take data out here
421             if (glb_b_id==b_target) {
422               PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void**)&coords));
423               PetscCall(DMSwarmGetLocalSize(sw,&npoints));
424               for (int p=0;p<npoints;p++) {
425               PetscReal v2 = 0, fact = dim==2 ? 2.0*PETSC_PI*coords[p*dim+0] : 1;
426               for (int i = 0; i < dim; ++i) v2 += PetscSqr(coords[p*dim+i]);
427               moments_1[0] += fact*wp[p]*ctx->n_0                  *ctx->masses[ctx->species_offset[grid]];
428               moments_1[1] += fact*wp[p]*ctx->n_0*ctx->v_0         *ctx->masses[ctx->species_offset[grid]] * coords[p*dim+1]; // z-momentum
429               moments_1[2] += fact*wp[p]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ctx->species_offset[grid]] * v2;
430               }
431               PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void**)&coords));
432             }
433             PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void**)&wp));
434             PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(b_id,grid)]));
435             PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(b_id,grid)]));
436           }
437         }
438       }
439     } // thread batch
440     PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray));
441   } // user batch
442   /* Cleanup */
443   PetscCall(PetscFree(globXArray));
444   PetscCall(PetscFree(globSwarmArray));
445   PetscCall(PetscFree(globMpArray));
446   // clean up mass matrices
447   for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { // add same particels for all grids
448     PetscCall(MatDestroy(&g_Mass[grid]));
449     for (int tid=0; tid<numthreads; tid++) {
450       PetscCall(VecDestroy(&t_fhat[grid][tid]));
451       PetscCall(KSPDestroy(&t_ksp[grid][tid]));
452     }
453   }
454   PetscCall(PetscInfo(X,"Total number density: %20.12e (%20.12e); x-momentum = %20.12e (%20.12e); energy = %20.12e (%20.12e) error = %e (log10 of error = %" PetscInt_FMT "), %" PetscInt_FMT " particles. Use %" PetscInt_FMT " threads\n",
455                       (double)moments_1[0], (double)moments_0[0], (double)moments_1[1], (double)moments_0[1], (double)moments_1[2],  (double)moments_0[2], (double)((moments_1[2]-moments_0[2])/moments_0[2]), (PetscInt)PetscLog10Real(PetscAbsReal((moments_1[2]-moments_0[2])/moments_0[2])), nTargetP, numthreads));
456   PetscFunctionReturn(0);
457 }
458 
459 int main(int argc, char **argv)
460 {
461   DM               pack;
462   Vec              X;
463   PetscInt         dim=2,nvert=1,Np=10,btarget=0,gtarget=0;
464   TS               ts;
465   Mat              J;
466   LandauCtx        *ctx;
467 #if defined(PETSC_USE_LOG)
468   PetscLogStage    stage;
469 #endif
470 
471   PetscCall(PetscInitialize(&argc, &argv, NULL,help));
472   // process args
473   PetscOptionsBegin(PETSC_COMM_SELF, "", "Collision Options", "DMPLEX");
474   PetscCall(PetscOptionsInt("-number_spatial_vertices", "Number of user spatial vertices to be batched for Landau", "ex30.c", nvert, &nvert, NULL));
475   PetscCall(PetscOptionsInt("-dim", "Velocity space dimension", "ex30.c", dim, &dim, NULL));
476   PetscCall(PetscOptionsInt("-number_particles_per_dimension", "Number of particles per grid, with slight modification per spatial vertex, in each dimension of base Cartesian grid", "ex30.c", Np, &Np, NULL));
477   PetscCall(PetscOptionsInt("-view_vertex_target", "Batch to view with diagnostics", "ex30.c", btarget, &btarget, NULL));
478   PetscCheck(btarget < nvert, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Batch to view %" PetscInt_FMT " should be < number of vertices %" PetscInt_FMT,btarget,nvert);
479   PetscCall(PetscOptionsInt("-view_grid_target", "Grid to view with diagnostics", "ex30.c", gtarget, &gtarget, NULL));
480   PetscOptionsEnd();
481   /* Create a mesh */
482   PetscCall(DMPlexLandauCreateVelocitySpace(PETSC_COMM_SELF, dim, "", &X, &J, &pack));
483   PetscCall(DMSetUp(pack));
484   PetscCall(DMSetOutputSequenceNumber(pack, 0, 0.0));
485   PetscCall(DMGetApplicationContext(pack, &ctx));
486   PetscCheck(gtarget < ctx->num_grids, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Grid to view %" PetscInt_FMT " should be < number of grids %" PetscInt_FMT,gtarget,ctx->num_grids);
487   PetscCheck(nvert >= ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number of vertices %" PetscInt_FMT " should be <= batch size %" PetscInt_FMT,nvert,ctx->batch_sz);
488   /* Create timestepping solver context */
489   PetscCall(TSCreate(PETSC_COMM_SELF,&ts));
490   PetscCall(TSSetDM(ts,pack));
491   PetscCall(TSSetIFunction(ts,NULL,DMPlexLandauIFunction,NULL));
492   PetscCall(TSSetIJacobian(ts,J,J,DMPlexLandauIJacobian,NULL));
493   PetscCall(TSSetExactFinalTime(ts,TS_EXACTFINALTIME_STEPOVER));
494   PetscCall(TSSetFromOptions(ts));
495   PetscCall(PetscObjectSetName((PetscObject)X, "X"));
496   // do particle advance, warmup
497   PetscCall(go(ts,X,nvert,Np,dim,btarget,gtarget));
498   PetscCall(MatZeroEntries(J)); // need to zero out so as to not reuse it in Landau's logic
499   // hot
500   PetscCall(PetscLogStageRegister("ex30 hot solve", &stage));
501   PetscCall(PetscLogStagePush(stage));
502   PetscCall(go(ts,X,nvert,Np,dim,btarget,gtarget));
503   PetscCall(PetscLogStagePop());
504   /* clean up */
505   PetscCall(DMPlexLandauDestroyVelocitySpace(&pack));
506   PetscCall(TSDestroy(&ts));
507   PetscCall(VecDestroy(&X));
508   PetscCall(PetscFinalize());
509   return 0;
510 }
511 
512 /*TEST
513 
514   build:
515     requires: !complex p4est
516 
517   testset:
518     requires: double defined(PETSC_USE_DMLANDAU_2D)
519     output_file: output/ex30_0.out
520     args: -dim 2 -petscspace_degree 3 -dm_landau_type p4est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
521           -dm_landau_amr_post_refine 1 -number_particles_per_dimension 10 -dm_plex_hash_location \
522           -dm_landau_batch_size 2 -number_spatial_vertices 3 -dm_landau_batch_view_idx 1 -view_vertex_target 2 -view_grid_target 1 \
523           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
524           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-10 -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_factor_shift_type nonzero -ftop_sub_pc_type lu \
525           -ksp_type preonly -pc_type lu \
526           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\
527           -snes_converged_reason -snes_monitor -snes_rtol 1e-14 -snes_stol 1e-14\
528           -ts_dt 0.01 -ts_rtol 1e-1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec
529 
530     test:
531       suffix: cpu
532       args: -dm_landau_device_type cpu
533     test:
534       suffix: kokkos
535       requires: kokkos_kernels
536       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
537     test:
538       suffix: cuda
539       requires: cuda
540       args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda
541 
542   testset:
543     requires: double !defined(PETSC_USE_DMLANDAU_2D)
544     output_file: output/ex30_3d.out
545     args: -dim 3 -petscspace_degree 2 -dm_landau_type p8est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
546           -dm_landau_amr_post_refine 0 -number_particles_per_dimension 5 -dm_plex_hash_location \
547           -dm_landau_batch_size 1 -number_spatial_vertices 1 -dm_landau_batch_view_idx 0 -view_vertex_target 0 -view_grid_target 0 \
548           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
549           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-12 -ftop_ksp_type cg -ftop_pc_type jacobi \
550           -ksp_type preonly -pc_type lu \
551           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\
552           -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12\
553           -ts_dt 0.1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec
554 
555     test:
556       suffix: cpu_3d
557       args: -dm_landau_device_type cpu
558     test:
559       suffix: kokkos_3d
560       requires: kokkos_kernels
561       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
562     test:
563       suffix: cuda_3d
564       requires: cuda
565       args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda
566 
567 TEST*/
568