1 static char help[] = "Grid based Landau collision operator with PIC interface with OpenMP setup. (one species per grid)\n"; 2 3 /* 4 Support 2.5V with axisymmetric coordinates 5 - r,z coordinates 6 - Domain and species data input by Landau operator 7 - "radius" for each grid, normalized with electron thermal velocity 8 - Domain: (0,radius) x (-radius,radius), thus first coordinate x[0] is perpendicular velocity and 2pi*x[0] term is added for axisymmetric 9 Supports full 3V 10 11 */ 12 13 #include "petscdmplex.h" 14 #include "petscds.h" 15 #include "petscdmswarm.h" 16 #include "petscksp.h" 17 #include <petsc/private/petscimpl.h> 18 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY) 19 #include <omp.h> 20 #endif 21 #include <petsclandau.h> 22 #include <petscdmcomposite.h> 23 24 typedef struct { 25 Mat MpTrans; 26 Mat Mp; 27 Vec ff; 28 Vec uu; 29 } MatShellCtx; 30 31 PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy) 32 { 33 MatShellCtx *matshellctx; 34 35 PetscFunctionBeginUser; 36 PetscCall(MatShellGetContext(MtM, &matshellctx)); 37 PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context"); 38 PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff)); 39 PetscCall(MatMult(matshellctx->MpTrans, matshellctx->ff, yy)); 40 PetscFunctionReturn(PETSC_SUCCESS); 41 } 42 43 PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy, Vec zz) 44 { 45 MatShellCtx *matshellctx; 46 47 PetscFunctionBeginUser; 48 PetscCall(MatShellGetContext(MtM, &matshellctx)); 49 PetscCheck(matshellctx, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "No context"); 50 PetscCall(MatMult(matshellctx->Mp, xx, matshellctx->ff)); 51 PetscCall(MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz)); 52 PetscFunctionReturn(PETSC_SUCCESS); 53 } 54 55 PetscErrorCode createSwarm(const DM dm, PetscInt dim, DM *sw) 56 { 57 PetscInt Nc = 1; 58 59 PetscFunctionBeginUser; 60 PetscCall(DMCreate(PETSC_COMM_SELF, sw)); 61 PetscCall(DMSetType(*sw, DMSWARM)); 62 PetscCall(DMSetDimension(*sw, dim)); 63 PetscCall(DMSwarmSetType(*sw, DMSWARM_PIC)); 64 PetscCall(DMSwarmSetCellDM(*sw, dm)); 65 PetscCall(DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_SCALAR)); 66 PetscCall(DMSwarmFinalizeFieldRegister(*sw)); 67 PetscCall(DMSetFromOptions(*sw)); 68 PetscFunctionReturn(PETSC_SUCCESS); 69 } 70 71 PetscErrorCode gridToParticles(const DM dm, DM sw, Vec rhs, Vec work, Mat M_p, Mat Mass) 72 { 73 PetscBool is_lsqr; 74 KSP ksp; 75 Mat PM_p = NULL, MtM, D; 76 Vec ff; 77 PetscInt N, M, nzl; 78 MatShellCtx *matshellctx; 79 80 PetscFunctionBeginUser; 81 PetscCall(MatMult(Mass, rhs, work)); 82 PetscCall(VecCopy(work, rhs)); 83 // pseudo-inverse 84 PetscCall(KSPCreate(PETSC_COMM_SELF, &ksp)); 85 PetscCall(KSPSetOptionsPrefix(ksp, "ftop_")); 86 PetscCall(KSPSetFromOptions(ksp)); 87 PetscCall(PetscObjectTypeCompare((PetscObject)ksp, KSPLSQR, &is_lsqr)); 88 if (!is_lsqr) { 89 PetscCall(MatGetLocalSize(M_p, &M, &N)); 90 if (N > M) { 91 PC pc; 92 PetscCall(PetscInfo(ksp, " M (%" PetscInt_FMT ") < M (%" PetscInt_FMT ") -- skip revert to lsqr\n", M, N)); 93 is_lsqr = PETSC_TRUE; 94 PetscCall(KSPSetType(ksp, KSPLSQR)); 95 PetscCall(KSPGetPC(ksp, &pc)); 96 PetscCall(PCSetType(pc, PCNONE)); // could put in better solver -ftop_pc_type bjacobi -ftop_sub_pc_type lu -ftop_sub_pc_factor_shift_type nonzero 97 } else { 98 PetscCall(PetscNew(&matshellctx)); 99 PetscCall(MatCreateShell(PetscObjectComm((PetscObject)dm), N, N, PETSC_DECIDE, PETSC_DECIDE, matshellctx, &MtM)); 100 PetscCall(MatTranspose(M_p, MAT_INITIAL_MATRIX, &matshellctx->MpTrans)); 101 matshellctx->Mp = M_p; 102 PetscCall(MatShellSetOperation(MtM, MATOP_MULT, (void (*)(void))MatMultMtM_SeqAIJ)); 103 PetscCall(MatShellSetOperation(MtM, MATOP_MULT_ADD, (void (*)(void))MatMultAddMtM_SeqAIJ)); 104 PetscCall(MatCreateVecs(M_p, &matshellctx->uu, &matshellctx->ff)); 105 PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 1, NULL, &D)); 106 PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_Mp_mat_view")); 107 for (int i = 0; i < N; i++) { 108 const PetscScalar *vals; 109 const PetscInt *cols; 110 PetscScalar dot = 0; 111 PetscCall(MatGetRow(matshellctx->MpTrans, i, &nzl, &cols, &vals)); 112 for (int ii = 0; ii < nzl; ii++) dot += PetscSqr(vals[ii]); 113 PetscCheck(dot != 0.0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Row %d is empty", i); 114 PetscCall(MatSetValue(D, i, i, dot, INSERT_VALUES)); 115 } 116 PetscCall(MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY)); 117 PetscCall(MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY)); 118 PetscCall(PetscInfo(M_p, "createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n", N, nzl)); 119 PetscCall(KSPSetOperators(ksp, MtM, D)); 120 PetscCall(MatViewFromOptions(D, NULL, "-ftop2_D_mat_view")); 121 PetscCall(MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view")); 122 PetscCall(MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpTranspose_mat_view")); 123 } 124 } 125 if (is_lsqr) { 126 PC pc; 127 PetscBool is_bjac; 128 PetscCall(KSPGetPC(ksp, &pc)); 129 PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCBJACOBI, &is_bjac)); 130 if (is_bjac) { 131 PetscCall(DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p)); 132 PetscCall(KSPSetOperators(ksp, M_p, PM_p)); 133 } else { 134 PetscCall(KSPSetOperators(ksp, M_p, M_p)); 135 } 136 } 137 PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); // this grabs access 138 if (!is_lsqr) { 139 PetscCall(KSPSolve(ksp, rhs, matshellctx->uu)); 140 PetscCall(MatMult(M_p, matshellctx->uu, ff)); 141 PetscCall(MatDestroy(&matshellctx->MpTrans)); 142 PetscCall(VecDestroy(&matshellctx->ff)); 143 PetscCall(VecDestroy(&matshellctx->uu)); 144 PetscCall(MatDestroy(&D)); 145 PetscCall(MatDestroy(&MtM)); 146 PetscCall(PetscFree(matshellctx)); 147 } else { 148 PetscCall(KSPSolveTranspose(ksp, rhs, ff)); 149 } 150 PetscCall(KSPDestroy(&ksp)); 151 /* Visualize particle field */ 152 PetscCall(VecViewFromOptions(ff, NULL, "-weights_view")); 153 PetscCall(MatDestroy(&PM_p)); 154 PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff)); 155 156 PetscFunctionReturn(PETSC_SUCCESS); 157 } 158 159 PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt Np, const PetscInt a_tid, const PetscInt dim, const PetscReal xx[], const PetscReal yy[], const PetscReal zz[], const PetscReal a_wp[], Vec rho, Mat *Mp_out) 160 { 161 PetscBool removePoints = PETSC_TRUE; 162 PetscReal *wq, *coords; 163 PetscDataType dtype; 164 Mat M_p; 165 Vec ff; 166 PetscInt bs, p, zero = 0; 167 168 PetscFunctionBeginUser; 169 PetscCall(DMSwarmSetLocalSizes(sw, Np, zero)); 170 PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq)); 171 PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords)); 172 for (p = 0; p < Np; p++) { 173 coords[p * dim + 0] = xx[p]; 174 coords[p * dim + 1] = yy[p]; 175 wq[p] = a_wp[p]; 176 if (dim == 3) coords[p * dim + 2] = zz[p]; 177 } 178 PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords)); 179 PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq)); 180 PetscCall(DMSwarmMigrate(sw, removePoints)); 181 PetscCall(PetscObjectSetName((PetscObject)sw, "Particle Grid")); 182 183 /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */ 184 PetscCall(DMCreateMassMatrix(sw, dm, &M_p)); 185 186 PetscCall(PetscObjectSetName((PetscObject)rho, "rho")); 187 PetscCall(DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff)); 188 PetscCall(PetscObjectSetName((PetscObject)ff, "weights")); 189 PetscCall(MatMultTranspose(M_p, ff, rho)); 190 PetscCall(DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff)); 191 192 // output 193 *Mp_out = M_p; 194 195 PetscFunctionReturn(PETSC_SUCCESS); 196 } 197 static void maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscScalar *u) 198 { 199 PetscInt i; 200 PetscReal v2 = 0, theta = 2.0 * kt_m; /* theta = 2kT/mc^2 */ 201 202 /* compute the exponents, v^2 */ 203 for (i = 0; i < dim; ++i) v2 += x[i] * x[i]; 204 /* evaluate the Maxwellian */ 205 u[0] = n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta)); 206 } 207 208 #define MAX_NUM_THRDS 12 209 PetscErrorCode go(TS ts, Vec X, const PetscInt NUserV, const PetscInt a_Np, const PetscInt dim, const PetscInt b_target, const PetscInt g_target) 210 { 211 DM pack, *globSwarmArray, grid_dm[LANDAU_MAX_GRIDS]; 212 Mat *globMpArray, g_Mass[LANDAU_MAX_GRIDS]; 213 KSP t_ksp[LANDAU_MAX_GRIDS][MAX_NUM_THRDS]; 214 Vec t_fhat[LANDAU_MAX_GRIDS][MAX_NUM_THRDS]; 215 PetscInt nDMs, glb_b_id, nTargetP = 0; 216 PetscErrorCode ierr = 0; 217 #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY) 218 PetscInt numthreads = PetscNumOMPThreads; 219 #else 220 PetscInt numthreads = 1; 221 #endif 222 LandauCtx *ctx; 223 Vec *globXArray; 224 PetscReal moments_0[3], moments_1[3], dt_init; 225 226 PetscFunctionBeginUser; 227 PetscCheck(numthreads <= MAX_NUM_THRDS, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Too many threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS); 228 PetscCheck(numthreads > 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number threads %" PetscInt_FMT " > %d", numthreads, MAX_NUM_THRDS); 229 PetscCall(TSGetDM(ts, &pack)); 230 PetscCall(DMGetApplicationContext(pack, &ctx)); 231 PetscCheck(ctx->batch_sz % numthreads == 0, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "batch size (-dm_landau_batch_size) %" PetscInt_FMT " mod #threads %" PetscInt_FMT " must equal zero", ctx->batch_sz, numthreads); 232 PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); 233 PetscCall(PetscInfo(pack, "Have %" PetscInt_FMT " total grids, with %" PetscInt_FMT " Landau local batched and %" PetscInt_FMT " global items (vertices)\n", ctx->num_grids, ctx->batch_sz, NUserV)); 234 PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray)); 235 PetscCall(PetscMalloc(sizeof(*globMpArray) * nDMs, &globMpArray)); 236 PetscCall(PetscMalloc(sizeof(*globSwarmArray) * nDMs, &globSwarmArray)); 237 PetscCall(DMViewFromOptions(ctx->plex[g_target], NULL, "-ex30_dm_view")); 238 // create mass matrices 239 PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); // just to duplicate 240 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids 241 Vec subX = globXArray[LAND_PACK_IDX(0, grid)]; 242 DM dm = ctx->plex[grid]; 243 PetscSection s; 244 grid_dm[grid] = dm; 245 PetscCall(DMCreateMassMatrix(dm, dm, &g_Mass[grid])); 246 // 247 PetscCall(DMGetLocalSection(dm, &s)); 248 PetscCall(DMPlexCreateClosureIndex(dm, s)); 249 for (int tid = 0; tid < numthreads; tid++) { 250 PetscCall(VecDuplicate(subX, &t_fhat[grid][tid])); 251 PetscCall(KSPCreate(PETSC_COMM_SELF, &t_ksp[grid][tid])); 252 PetscCall(KSPSetOptionsPrefix(t_ksp[grid][tid], "ptof_")); 253 PetscCall(KSPSetOperators(t_ksp[grid][tid], g_Mass[grid], g_Mass[grid])); 254 PetscCall(KSPSetFromOptions(t_ksp[grid][tid])); 255 } 256 } 257 PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray)); 258 // create particle raw data. could use OMP with a thread safe malloc, but this is just the fake user 259 for (int i = 0; i < 3; i++) moments_0[i] = moments_1[i] = 0; 260 PetscCall(TSGetTimeStep(ts, &dt_init)); // we could have an adaptive time stepper 261 for (PetscInt global_batch_id = 0; global_batch_id < NUserV; global_batch_id += ctx->batch_sz) { 262 ierr = TSSetTime(ts, 0); 263 CHKERRQ(ierr); 264 ierr = TSSetStepNumber(ts, 0); 265 CHKERRQ(ierr); 266 ierr = TSSetTimeStep(ts, dt_init); 267 CHKERRQ(ierr); 268 PetscCall(VecZeroEntries(X)); 269 PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); 270 if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) PetscCall(PetscObjectSetName((PetscObject)globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], "rho")); 271 // create fake particles 272 for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) { 273 PetscReal *xx_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS]; 274 PetscInt Np_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS]; 275 // make particles 276 for (int tid = 0; tid < numthreads; tid++) { 277 const PetscInt b_id = b_id_0 + tid; 278 if ((glb_b_id = global_batch_id + b_id) < NUserV) { // the ragged edge of the last batch 279 PetscInt Npp0 = a_Np + (glb_b_id % a_Np), NN; // fake user: number of particels in each dimension with add some load imbalance and diff (<2x) 280 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids 281 const PetscReal kT_m = ctx->k * ctx->thermal_temps[ctx->species_offset[grid]] / ctx->masses[ctx->species_offset[grid]] / (ctx->v_0 * ctx->v_0); /* theta = 2kT/mc^2 per species -- TODO */ 282 ; 283 PetscReal lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3], vole; // would be nice to get box from DM 284 PetscInt Npi = Npp0, Npj = 2 * Npp0, Npk = 1; 285 if (dim == 2) lo[0] = 0; // Landau coordinate (r,z) 286 else Npi = Npj = Npk = Npp0; 287 // User: use glb_b_id to index into your data 288 NN = Npi * Npj * Npk; // make a regular grid of particles Npp x Npp 289 if (glb_b_id == b_target) { 290 nTargetP = NN; 291 PetscCall(PetscInfo(pack, "Target %" PetscInt_FMT " with %" PetscInt_FMT " particels\n", glb_b_id, NN)); 292 } 293 Np_t[grid][tid] = NN; 294 PetscCall(PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid])); 295 hp[0] = (hi[0] - lo[0]) / Npi; 296 hp[1] = (hi[1] - lo[1]) / Npj; 297 hp[2] = (hi[2] - lo[2]) / Npk; 298 if (dim == 2) hp[2] = 1; 299 PetscCall(PetscInfo(pack, " lo = %14.7e, hi = %14.7e; hp = %14.7e, %14.7e; kT_m = %g; \n", (double)lo[1], (double)hi[1], (double)hp[0], (double)hp[1], (double)kT_m)); // temp 300 vole = hp[0] * hp[1] * hp[2] * ctx->n[grid]; // fix for multi-species 301 PetscCall(PetscInfo(pack, "Vertex %" PetscInt_FMT ", grid %" PetscInt_FMT " with %" PetscInt_FMT " particles (diagnostic target = %" PetscInt_FMT ")\n", glb_b_id, grid, NN, b_target)); 302 for (int pj = 0, pp = 0; pj < Npj; pj++) { 303 for (int pk = 0; pk < Npk; pk++) { 304 for (int pi = 0; pi < Npi; pi++, pp++) { 305 xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0]; 306 yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1]; 307 if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2]; 308 { 309 PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]}; 310 maxwellian(dim, x, kT_m, vole, &wp_t[grid][tid][pp]); 311 //PetscCall(PetscInfo(pack,"%" PetscInt_FMT ") x = %14.7e, %14.7e, %14.7e, n = %14.7e, w = %14.7e\n", pp, x[0], x[1], dim==2 ? 0 : x[2], ctx->n[grid], wp_t[grid][tid][pp])); // temp 312 if (glb_b_id == b_target) { 313 PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * x[0] : 1; 314 for (int i = 0; i < dim; ++i) v2 += PetscSqr(x[i]); 315 moments_0[0] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]]; 316 moments_0[1] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * x[1]; // z-momentum 317 moments_0[2] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2; 318 } 319 } 320 } 321 } 322 } 323 } // grid 324 } // active 325 } // fake threads 326 /* Create particle swarm */ 327 PetscPragmaOMP(parallel for) 328 for (int tid=0; tid<numthreads; tid++) 329 { 330 const PetscInt b_id = b_id_0 + tid; 331 if ((glb_b_id = global_batch_id + b_id) < NUserV) { // the ragged edge of the last batch 332 //PetscCall(PetscInfo(pack,"Create swarms for 'glob' index %" PetscInt_FMT " create swarm\n",glb_b_id)); 333 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids 334 PetscErrorCode ierr_t; 335 PetscSection section; 336 PetscInt Nf; 337 DM dm = grid_dm[grid]; 338 ierr_t = DMGetLocalSection(dm, §ion); 339 ierr_t = PetscSectionGetNumFields(section, &Nf); 340 if (Nf != 1) ierr_t = 9999; 341 else { 342 ierr_t = DMViewFromOptions(dm, NULL, "-dm_view"); 343 ierr_t = PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local batch index %" PetscInt_FMT "\n", b_id, grid, LAND_PACK_IDX(b_id, grid)); 344 ierr_t = createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(b_id, grid)]); 345 } 346 if (ierr_t) ierr = ierr_t; 347 } 348 } // active 349 } 350 PetscCheck(ierr != 9999, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Only support one species per grid"); 351 PetscCall(ierr); 352 // p --> g: make globMpArray & set X 353 PetscPragmaOMP(parallel for) 354 for (int tid=0; tid<numthreads; tid++) 355 { 356 const PetscInt b_id = b_id_0 + tid; 357 if ((glb_b_id = global_batch_id + b_id) < NUserV) { 358 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids 359 PetscErrorCode ierr_t; 360 DM dm = grid_dm[grid]; 361 DM sw = globSwarmArray[LAND_PACK_IDX(b_id, grid)]; 362 Vec subX = globXArray[LAND_PACK_IDX(b_id, grid)], work = t_fhat[grid][tid]; 363 PetscInfo(pack, "particlesToGrid %" PetscInt_FMT ".%" PetscInt_FMT ") particlesToGrid for local batch %" PetscInt_FMT "\n", global_batch_id, grid, LAND_PACK_IDX(b_id, grid)); 364 ierr_t = particlesToGrid(dm, sw, Np_t[grid][tid], tid, dim, xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid], wp_t[grid][tid], subX, &globMpArray[LAND_PACK_IDX(b_id, grid)]); 365 if (ierr_t) ierr = ierr_t; 366 // u = M^_1 f_w 367 ierr_t = VecCopy(subX, work); 368 ierr_t = KSPSolve(t_ksp[grid][tid], work, subX); 369 if (ierr_t) ierr = ierr_t; 370 } 371 } 372 } 373 PetscCall(ierr); 374 /* Cleanup */ 375 for (int tid = 0; tid < numthreads; tid++) { 376 const PetscInt b_id = b_id_0 + tid; 377 if ((glb_b_id = global_batch_id + b_id) < NUserV) { 378 PetscCall(PetscInfo(pack, "Free for global batch %" PetscInt_FMT " of %" PetscInt_FMT "\n", glb_b_id + 1, NUserV)); 379 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids 380 PetscCall(PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid])); 381 } 382 } // active 383 } 384 } // Landau 385 if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) PetscCall(VecViewFromOptions(globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], NULL, "-ex30_vec_view")); 386 PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray)); 387 PetscCall(DMPlexLandauPrintNorms(X, 0)); 388 // advance 389 PetscCall(TSSetSolution(ts, X)); 390 PetscCall(PetscInfo(pack, "Advance vertex %" PetscInt_FMT " to %" PetscInt_FMT " (with padding)\n", global_batch_id, global_batch_id + ctx->batch_sz)); 391 PetscCall(TSSolve(ts, X)); 392 PetscCall(DMPlexLandauPrintNorms(X, 1)); 393 PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); 394 // map back to particles 395 for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) { 396 PetscCall(PetscInfo(pack, "g2p: global batch %" PetscInt_FMT " of %" PetscInt_FMT ", Landau batch %" PetscInt_FMT " of %" PetscInt_FMT ": map back to particles\n", global_batch_id + 1, NUserV, b_id_0 + 1, ctx->batch_sz)); 397 PetscPragmaOMP(parallel for) 398 for (int tid=0; tid<numthreads; tid++) 399 { 400 const PetscInt b_id = b_id_0 + tid; 401 if ((glb_b_id = global_batch_id + b_id) < NUserV) { 402 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids 403 PetscErrorCode ierr_t; 404 PetscInfo(pack, "gridToParticles: global batch %" PetscInt_FMT ", local batch b=%" PetscInt_FMT ", grid g=%" PetscInt_FMT ", index(b,g) %" PetscInt_FMT "\n", global_batch_id, b_id, grid, LAND_PACK_IDX(b_id, grid)); 405 ierr_t = gridToParticles(grid_dm[grid], globSwarmArray[LAND_PACK_IDX(b_id, grid)], globXArray[LAND_PACK_IDX(b_id, grid)], t_fhat[grid][tid], globMpArray[LAND_PACK_IDX(b_id, grid)], g_Mass[grid]); 406 if (ierr_t) ierr = ierr_t; 407 } 408 } 409 } 410 PetscCall(ierr); 411 /* Cleanup, and get data */ 412 PetscCall(PetscInfo(pack, "Cleanup batches %" PetscInt_FMT " to %" PetscInt_FMT "\n", b_id_0, b_id_0 + numthreads)); 413 for (int tid = 0; tid < numthreads; tid++) { 414 const PetscInt b_id = b_id_0 + tid; 415 if ((glb_b_id = global_batch_id + b_id) < NUserV) { 416 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 417 PetscDataType dtype; 418 PetscReal *wp, *coords; 419 DM sw = globSwarmArray[LAND_PACK_IDX(b_id, grid)]; 420 PetscInt npoints, bs = 1; 421 PetscCall(DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp)); // take data out here 422 if (glb_b_id == b_target) { 423 PetscCall(DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords)); 424 PetscCall(DMSwarmGetLocalSize(sw, &npoints)); 425 for (int p = 0; p < npoints; p++) { 426 PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1; 427 for (int i = 0; i < dim; ++i) v2 += PetscSqr(coords[p * dim + i]); 428 moments_1[0] += fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]]; 429 moments_1[1] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * coords[p * dim + 1]; // z-momentum 430 moments_1[2] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2; 431 } 432 PetscCall(DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords)); 433 } 434 PetscCall(DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp)); 435 PetscCall(DMDestroy(&globSwarmArray[LAND_PACK_IDX(b_id, grid)])); 436 PetscCall(MatDestroy(&globMpArray[LAND_PACK_IDX(b_id, grid)])); 437 } 438 } 439 } 440 } // thread batch 441 PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray)); 442 } // user batch 443 /* Cleanup */ 444 PetscCall(PetscFree(globXArray)); 445 PetscCall(PetscFree(globSwarmArray)); 446 PetscCall(PetscFree(globMpArray)); 447 // clean up mass matrices 448 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids 449 PetscCall(MatDestroy(&g_Mass[grid])); 450 for (int tid = 0; tid < numthreads; tid++) { 451 PetscCall(VecDestroy(&t_fhat[grid][tid])); 452 PetscCall(KSPDestroy(&t_ksp[grid][tid])); 453 } 454 } 455 PetscCall(PetscInfo(X, "Total number density: %20.12e (%20.12e); x-momentum = %20.12e (%20.12e); energy = %20.12e (%20.12e) error = %e (log10 of error = %" PetscInt_FMT "), %" PetscInt_FMT " particles. Use %" PetscInt_FMT " threads\n", (double)moments_1[0], (double)moments_0[0], (double)moments_1[1], (double)moments_0[1], (double)moments_1[2], (double)moments_0[2], (double)((moments_1[2] - moments_0[2]) / moments_0[2]), (PetscInt)PetscLog10Real(PetscAbsReal((moments_1[2] - moments_0[2]) / moments_0[2])), nTargetP, numthreads)); 456 PetscFunctionReturn(PETSC_SUCCESS); 457 } 458 459 int main(int argc, char **argv) 460 { 461 DM pack; 462 Vec X; 463 PetscInt dim = 2, nvert = 1, Np = 10, btarget = 0, gtarget = 0; 464 TS ts; 465 Mat J; 466 LandauCtx *ctx; 467 #if defined(PETSC_USE_LOG) 468 PetscLogStage stage; 469 #endif 470 471 PetscFunctionBeginUser; 472 PetscCall(PetscInitialize(&argc, &argv, NULL, help)); 473 // process args 474 PetscOptionsBegin(PETSC_COMM_SELF, "", "Collision Options", "DMPLEX"); 475 PetscCall(PetscOptionsInt("-number_spatial_vertices", "Number of user spatial vertices to be batched for Landau", "ex30.c", nvert, &nvert, NULL)); 476 PetscCall(PetscOptionsInt("-dim", "Velocity space dimension", "ex30.c", dim, &dim, NULL)); 477 PetscCall(PetscOptionsInt("-number_particles_per_dimension", "Number of particles per grid, with slight modification per spatial vertex, in each dimension of base Cartesian grid", "ex30.c", Np, &Np, NULL)); 478 PetscCall(PetscOptionsInt("-view_vertex_target", "Batch to view with diagnostics", "ex30.c", btarget, &btarget, NULL)); 479 PetscCheck(btarget < nvert, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Batch to view %" PetscInt_FMT " should be < number of vertices %" PetscInt_FMT, btarget, nvert); 480 PetscCall(PetscOptionsInt("-view_grid_target", "Grid to view with diagnostics", "ex30.c", gtarget, >arget, NULL)); 481 PetscOptionsEnd(); 482 /* Create a mesh */ 483 PetscCall(DMPlexLandauCreateVelocitySpace(PETSC_COMM_SELF, dim, "", &X, &J, &pack)); 484 PetscCall(DMSetUp(pack)); 485 PetscCall(DMSetOutputSequenceNumber(pack, 0, 0.0)); 486 PetscCall(DMGetApplicationContext(pack, &ctx)); 487 PetscCheck(gtarget < ctx->num_grids, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Grid to view %" PetscInt_FMT " should be < number of grids %" PetscInt_FMT, gtarget, ctx->num_grids); 488 PetscCheck(nvert >= ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_ARG_OUTOFRANGE, "Number of vertices %" PetscInt_FMT " should be <= batch size %" PetscInt_FMT, nvert, ctx->batch_sz); 489 /* Create timestepping solver context */ 490 PetscCall(TSCreate(PETSC_COMM_SELF, &ts)); 491 PetscCall(TSSetDM(ts, pack)); 492 PetscCall(TSSetIFunction(ts, NULL, DMPlexLandauIFunction, NULL)); 493 PetscCall(TSSetIJacobian(ts, J, J, DMPlexLandauIJacobian, NULL)); 494 PetscCall(TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER)); 495 PetscCall(TSSetFromOptions(ts)); 496 PetscCall(PetscObjectSetName((PetscObject)X, "X")); 497 // do particle advance, warmup 498 PetscCall(go(ts, X, nvert, Np, dim, btarget, gtarget)); 499 PetscCall(MatZeroEntries(J)); // need to zero out so as to not reuse it in Landau's logic 500 // hot 501 PetscCall(PetscLogStageRegister("ex30 hot solve", &stage)); 502 PetscCall(PetscLogStagePush(stage)); 503 PetscCall(go(ts, X, nvert, Np, dim, btarget, gtarget)); 504 PetscCall(PetscLogStagePop()); 505 /* clean up */ 506 PetscCall(DMPlexLandauDestroyVelocitySpace(&pack)); 507 PetscCall(TSDestroy(&ts)); 508 PetscCall(VecDestroy(&X)); 509 PetscCall(PetscFinalize()); 510 return 0; 511 } 512 513 /*TEST 514 515 build: 516 requires: !complex p4est 517 518 testset: 519 requires: double defined(PETSC_USE_DMLANDAU_2D) 520 output_file: output/ex30_0.out 521 args: -dim 2 -petscspace_degree 3 -dm_landau_type p4est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \ 522 -dm_landau_amr_post_refine 1 -number_particles_per_dimension 10 -dm_plex_hash_location \ 523 -dm_landau_batch_size 2 -number_spatial_vertices 3 -dm_landau_batch_view_idx 1 -view_vertex_target 2 -view_grid_target 1 \ 524 -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \ 525 -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-10 -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_factor_shift_type nonzero -ftop_sub_pc_type lu \ 526 -ksp_type preonly -pc_type lu \ 527 -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\ 528 -snes_converged_reason -snes_monitor -snes_rtol 1e-14 -snes_stol 1e-14\ 529 -ts_dt 0.01 -ts_rtol 1e-1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec 530 531 test: 532 suffix: cpu 533 args: -dm_landau_device_type cpu 534 test: 535 suffix: kokkos 536 requires: kokkos_kernels 537 args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos 538 test: 539 suffix: cuda 540 requires: cuda 541 args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda 542 543 testset: 544 requires: double !defined(PETSC_USE_DMLANDAU_2D) 545 output_file: output/ex30_3d.out 546 args: -dim 3 -petscspace_degree 2 -dm_landau_type p8est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \ 547 -dm_landau_amr_post_refine 0 -number_particles_per_dimension 5 -dm_plex_hash_location \ 548 -dm_landau_batch_size 1 -number_spatial_vertices 1 -dm_landau_batch_view_idx 0 -view_vertex_target 0 -view_grid_target 0 \ 549 -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \ 550 -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-12 -ftop_ksp_type cg -ftop_pc_type jacobi \ 551 -ksp_type preonly -pc_type lu \ 552 -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\ 553 -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12\ 554 -ts_dt 0.1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec 555 556 test: 557 suffix: cpu_3d 558 args: -dm_landau_device_type cpu 559 test: 560 suffix: kokkos_3d 561 requires: kokkos_kernels 562 args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos 563 test: 564 suffix: cuda_3d 565 requires: cuda 566 args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda 567 568 TEST*/ 569