1 #include <../src/mat/impls/aij/seq/aij.h> 2 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 3 #include <petsclandau.h> /*I "petsclandau.h" I*/ 4 #include <petscts.h> 5 #include <petscdmforest.h> 6 #include <petscdmcomposite.h> 7 8 /* Landau collision operator */ 9 10 /* relativistic terms */ 11 #if defined(PETSC_USE_REAL_SINGLE) 12 #define SPEED_OF_LIGHT 2.99792458e8F 13 #define C_0(v0) (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */ 14 #else 15 #define SPEED_OF_LIGHT 2.99792458e8 16 #define C_0(v0) (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */ 17 #endif 18 19 #include "land_tensors.h" 20 21 #if defined(PETSC_HAVE_OPENMP) 22 #include <omp.h> 23 #endif 24 25 static PetscErrorCode LandauGPUMapsDestroy(void **ptr) 26 { 27 P4estVertexMaps *maps = (P4estVertexMaps *)*ptr; 28 29 PetscFunctionBegin; 30 // free device data 31 if (maps[0].deviceType != LANDAU_CPU) { 32 #if defined(PETSC_HAVE_KOKKOS) 33 if (maps[0].deviceType == LANDAU_KOKKOS) PetscCall(LandauKokkosDestroyMatMaps(maps, maps[0].numgrids)); // implies Kokkos does 34 #endif 35 } 36 // free host data 37 for (PetscInt grid = 0; grid < maps[0].numgrids; grid++) { 38 PetscCall(PetscFree(maps[grid].c_maps)); 39 PetscCall(PetscFree(maps[grid].gIdx)); 40 } 41 PetscCall(PetscFree(maps)); 42 PetscFunctionReturn(PETSC_SUCCESS); 43 } 44 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 45 { 46 PetscReal v2 = 0; 47 48 PetscFunctionBegin; 49 /* compute v^2 / 2 */ 50 for (PetscInt i = 0; i < dim; ++i) v2 += x[i] * x[i]; 51 /* evaluate the Maxwellian */ 52 u[0] = v2 / 2; 53 PetscFunctionReturn(PETSC_SUCCESS); 54 } 55 56 /* needs double */ 57 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 58 { 59 PetscReal *c2_0_arr = ((PetscReal *)actx); 60 double u2 = 0, c02 = (double)*c2_0_arr, xx; 61 62 PetscFunctionBegin; 63 /* compute u^2 / 2 */ 64 for (PetscInt i = 0; i < dim; ++i) u2 += x[i] * x[i]; 65 /* gamma - 1 = g_eps, for conditioning and we only take derivatives */ 66 xx = u2 / c02; 67 #if defined(PETSC_USE_DEBUG) 68 u[0] = PetscSqrtReal(1. + xx); 69 #else 70 u[0] = xx / (PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative 71 #endif 72 PetscFunctionReturn(PETSC_SUCCESS); 73 } 74 75 /* 76 LandauFormJacobian_Internal - Evaluates Jacobian matrix. 77 78 Input Parameters: 79 . globX - input vector 80 . actx - optional user-defined context 81 . dim - dimension 82 83 Output Parameter: 84 . J0acP - Jacobian matrix filled, not created 85 */ 86 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx) 87 { 88 LandauCtx *ctx = (LandauCtx *)a_ctx; 89 PetscInt numCells[LANDAU_MAX_GRIDS], Nq, Nb; 90 PetscQuadrature quad; 91 PetscReal Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2) 92 PetscScalar *cellClosure = NULL; 93 const PetscScalar *xdata = NULL; 94 PetscDS prob; 95 PetscContainer container; 96 P4estVertexMaps *maps; 97 Mat subJ[LANDAU_MAX_GRIDS * LANDAU_MAX_BATCH_SZ]; 98 99 PetscFunctionBegin; 100 PetscValidHeaderSpecific(a_X, VEC_CLASSID, 1); 101 PetscValidHeaderSpecific(JacP, MAT_CLASSID, 2); 102 PetscAssertPointer(ctx, 5); 103 /* check for matrix container for GPU assembly. Support CPU assembly for debugging */ 104 PetscCheck(ctx->plex[0] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created"); 105 PetscCall(PetscLogEventBegin(ctx->events[10], 0, 0, 0, 0)); 106 PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids 107 PetscCall(PetscObjectQuery((PetscObject)JacP, "assembly_maps", (PetscObject *)&container)); 108 if (container) { 109 PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "maps but no GPU assembly"); 110 PetscCall(PetscContainerGetPointer(container, (void **)&maps)); 111 PetscCheck(maps, ctx->comm, PETSC_ERR_ARG_WRONG, "empty GPU matrix container"); 112 for (PetscInt i = 0; i < ctx->num_grids * ctx->batch_sz; i++) subJ[i] = NULL; 113 } else { 114 PetscCheck(!ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "No maps but GPU assembly"); 115 for (PetscInt tid = 0; tid < ctx->batch_sz; tid++) { 116 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[LAND_PACK_IDX(tid, grid)])); 117 } 118 maps = NULL; 119 } 120 // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck) 121 PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad)); 122 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); 123 PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb)); 124 PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND); 125 PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND); 126 // get metadata for collecting dynamic data 127 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 128 PetscInt cStart, cEnd; 129 PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created"); 130 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 131 numCells[grid] = cEnd - cStart; // grids can have different topology 132 } 133 PetscCall(PetscLogEventEnd(ctx->events[10], 0, 0, 0, 0)); 134 if (shift == 0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */ 135 DM pack; 136 PetscCall(VecGetDM(a_X, &pack)); 137 PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM"); 138 PetscCall(PetscLogEventBegin(ctx->events[1], 0, 0, 0, 0)); 139 for (PetscInt fieldA = 0; fieldA < ctx->num_species; fieldA++) { 140 Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */ 141 if (dim == 2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */ 142 } 143 if (!ctx->gpu_assembly) { 144 Vec *locXArray, *globXArray; 145 PetscScalar *cellClosure_it; 146 PetscInt cellClosure_sz = 0, nDMs, Nf[LANDAU_MAX_GRIDS]; 147 PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS]; 148 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 149 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 150 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 151 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 152 } 153 /* count cellClosure size */ 154 PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); 155 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) cellClosure_sz += Nb * Nf[grid] * numCells[grid]; 156 PetscCall(PetscMalloc1(cellClosure_sz * ctx->batch_sz, &cellClosure)); 157 cellClosure_it = cellClosure; 158 PetscCall(PetscMalloc(sizeof(*locXArray) * nDMs, &locXArray)); 159 PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray)); 160 PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray)); 161 PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray)); 162 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP (once) 163 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 164 Vec locX = locXArray[LAND_PACK_IDX(b_id, grid)], globX = globXArray[LAND_PACK_IDX(b_id, grid)], locX2; 165 PetscInt cStart, cEnd, ei; 166 PetscCall(VecDuplicate(locX, &locX2)); 167 PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2)); 168 PetscCall(DMGlobalToLocalEnd(ctx->plex[grid], globX, INSERT_VALUES, locX2)); 169 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 170 for (ei = cStart; ei < cEnd; ++ei) { 171 PetscScalar *coef = NULL; 172 PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef)); 173 PetscCall(PetscMemcpy(cellClosure_it, coef, Nb * Nf[grid] * sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */ 174 PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef)); 175 cellClosure_it += Nb * Nf[grid]; 176 } 177 PetscCall(VecDestroy(&locX2)); 178 } 179 } 180 PetscCheck(cellClosure_it - cellClosure == cellClosure_sz * ctx->batch_sz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscCount_FMT " != cellClosure_sz = %" PetscInt_FMT, cellClosure_it - cellClosure, cellClosure_sz * ctx->batch_sz); 181 PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray)); 182 PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray)); 183 PetscCall(PetscFree(locXArray)); 184 PetscCall(PetscFree(globXArray)); 185 xdata = NULL; 186 } else { 187 PetscMemType mtype; 188 if (ctx->jacobian_field_major_order) { // get data in batch ordering 189 PetscCall(VecScatterBegin(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD)); 190 PetscCall(VecScatterEnd(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD)); 191 PetscCall(VecGetArrayReadAndMemType(ctx->work_vec, &xdata, &mtype)); 192 } else { 193 PetscCall(VecGetArrayReadAndMemType(a_X, &xdata, &mtype)); 194 } 195 PetscCheck(mtype == PETSC_MEMTYPE_HOST || ctx->deviceType != LANDAU_CPU, ctx->comm, PETSC_ERR_ARG_WRONG, "CPU run with device data: use -mat_type aij"); 196 cellClosure = NULL; 197 } 198 PetscCall(PetscLogEventEnd(ctx->events[1], 0, 0, 0, 0)); 199 } else xdata = cellClosure = NULL; 200 201 /* do it */ 202 if (ctx->deviceType == LANDAU_KOKKOS) { 203 #if defined(PETSC_HAVE_KOKKOS) 204 PetscCall(LandauKokkosJacobian(ctx->plex, Nq, Nb, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP)); 205 #else 206 SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos"); 207 #endif 208 } else { /* CPU version */ 209 PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species 210 PetscInt ip_offset[LANDAU_MAX_GRIDS + 1], ipf_offset[LANDAU_MAX_GRIDS + 1], elem_offset[LANDAU_MAX_GRIDS + 1], IPf_sz_glb, IPf_sz_tot, num_grids = ctx->num_grids, Nf[LANDAU_MAX_GRIDS]; 211 PetscReal *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w; 212 PetscReal *nu_alpha = (PetscReal *)ctx->SData_d.alpha, *nu_beta = (PetscReal *)ctx->SData_d.beta, *invMass = (PetscReal *)ctx->SData_d.invMass; 213 PetscReal (*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal (*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas; 214 PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS]; 215 PetscScalar *coo_vals = NULL; 216 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 217 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 218 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 219 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 220 } 221 /* count IPf size, etc */ 222 PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids 223 const PetscReal *const BB = Tf[0]->T[0], *const DD = Tf[0]->T[1]; 224 ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0; 225 for (PetscInt grid = 0; grid < num_grids; grid++) { 226 PetscInt nfloc = ctx->species_offset[grid + 1] - ctx->species_offset[grid]; 227 elem_offset[grid + 1] = elem_offset[grid] + numCells[grid]; 228 ip_offset[grid + 1] = ip_offset[grid] + numCells[grid] * Nq; 229 ipf_offset[grid + 1] = ipf_offset[grid] + Nq * nfloc * numCells[grid]; 230 } 231 IPf_sz_glb = ipf_offset[num_grids]; 232 IPf_sz_tot = IPf_sz_glb * ctx->batch_sz; 233 // prep COO 234 PetscCall(PetscMalloc1(ctx->SData_d.coo_size, &coo_vals)); // allocate every time? 235 if (shift == 0.0) { /* compute dynamic data f and df and init data for Jacobian */ 236 #if defined(PETSC_HAVE_THREADSAFETY) 237 double starttime, endtime; 238 starttime = MPI_Wtime(); 239 #endif 240 PetscCall(PetscLogEventBegin(ctx->events[8], 0, 0, 0, 0)); 241 PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, (dim == 3 ? IPf_sz_tot : 0), &dudz)); 242 // F df/dx 243 for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) { // for each element 244 const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem; // b_id == OMP thd_id in batch 245 // find my grid: 246 PetscInt grid = 0; 247 while (b_elem_idx >= elem_offset[grid + 1]) grid++; // yuck search for grid 248 { 249 const PetscInt loc_nip = numCells[grid] * Nq, loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid]; 250 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid]; 251 PetscScalar *coef, coef_buff[LANDAU_MAX_SPECIES * LANDAU_MAX_NQND]; 252 PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim]; // ingJ is static data on batch 0 253 PetscInt b, f, q; 254 if (cellClosure) { 255 coef = &cellClosure[b_id * IPf_sz_glb + ipf_offset[grid] + loc_elem * Nb * loc_Nf]; // this is const 256 } else { 257 coef = coef_buff; 258 for (f = 0; f < loc_Nf; ++f) { 259 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0]; 260 for (b = 0; b < Nb; ++b) { 261 PetscInt idx = Idxs[b]; 262 if (idx >= 0) { 263 coef[f * Nb + b] = xdata[idx + moffset]; 264 } else { 265 idx = -idx - 1; 266 coef[f * Nb + b] = 0; 267 for (q = 0; q < maps[grid].num_face; q++) { 268 PetscInt id = maps[grid].c_maps[idx][q].gid; 269 PetscScalar scale = maps[grid].c_maps[idx][q].scale; 270 coef[f * Nb + b] += scale * xdata[id + moffset]; 271 } 272 } 273 } 274 } 275 } 276 /* get f and df */ 277 for (PetscInt qi = 0; qi < Nq; qi++) { 278 const PetscReal *invJ = &invJe[qi * dim * dim]; 279 const PetscReal *Bq = &BB[qi * Nb]; 280 const PetscReal *Dq = &DD[qi * Nb * dim]; 281 PetscReal u_x[LANDAU_DIM]; 282 /* get f & df */ 283 for (f = 0; f < loc_Nf; ++f) { 284 const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid] + f * loc_nip + loc_elem * Nq + qi; 285 PetscInt b, e; 286 PetscReal refSpaceDer[LANDAU_DIM]; 287 ff[idx] = 0.0; 288 for (PetscInt d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0; 289 for (b = 0; b < Nb; ++b) { 290 const PetscInt cidx = b; 291 ff[idx] += Bq[cidx] * PetscRealPart(coef[f * Nb + cidx]); 292 for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx * dim + d] * PetscRealPart(coef[f * Nb + cidx]); 293 } 294 for (PetscInt d = 0; d < LANDAU_DIM; ++d) { 295 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) u_x[d] += invJ[e * dim + d] * refSpaceDer[e]; 296 } 297 dudx[idx] = u_x[0]; 298 dudy[idx] = u_x[1]; 299 #if LANDAU_DIM == 3 300 dudz[idx] = u_x[2]; 301 #endif 302 } 303 } // q 304 } // grid 305 } // grid*batch 306 PetscCall(PetscLogEventEnd(ctx->events[8], 0, 0, 0, 0)); 307 #if defined(PETSC_HAVE_THREADSAFETY) 308 endtime = MPI_Wtime(); 309 if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime); 310 #endif 311 } // Jacobian setup 312 // assemble Jacobian (or mass) 313 for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) { // for each element 314 const PetscInt b_Nelem = elem_offset[num_grids]; 315 const PetscInt glb_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem; 316 PetscInt grid = 0; 317 #if defined(PETSC_HAVE_THREADSAFETY) 318 double starttime, endtime; 319 starttime = MPI_Wtime(); 320 #endif 321 while (glb_elem_idx >= elem_offset[grid + 1]) grid++; 322 { 323 const PetscInt loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid]; 324 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset), totDim = loc_Nf * Nq, elemMatSize = totDim * totDim; 325 PetscScalar *elemMat; 326 const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim]; 327 PetscCall(PetscMalloc1(elemMatSize, &elemMat)); 328 PetscCall(PetscMemzero(elemMat, elemMatSize * sizeof(*elemMat))); 329 if (shift == 0.0) { // Jacobian 330 PetscCall(PetscLogEventBegin(ctx->events[4], 0, 0, 0, 0)); 331 } else { // mass 332 PetscCall(PetscLogEventBegin(ctx->events[16], 0, 0, 0, 0)); 333 } 334 for (PetscInt qj = 0; qj < Nq; ++qj) { 335 const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq; 336 PetscReal g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1 337 PetscInt d, d2, dp, d3, IPf_idx; 338 if (shift == 0.0) { // Jacobian 339 const PetscReal *const invJj = &invJe[qj * dim * dim]; 340 PetscReal gg2[LANDAU_MAX_SPECIES][LANDAU_DIM], gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM]; 341 const PetscReal vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb]; 342 // create g2 & g3 343 for (d = 0; d < LANDAU_DIM; d++) { // clear accumulation data D & K 344 gg2_temp[d] = 0; 345 for (d2 = 0; d2 < LANDAU_DIM; d2++) gg3_temp[d][d2] = 0; 346 } 347 /* inner beta reduction */ 348 IPf_idx = 0; 349 for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r 350 PetscInt nip_loc_r = numCells[grid_r] * Nq, Nfloc_r = Nf[grid_r]; 351 for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) { 352 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) { 353 const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx]; 354 PetscReal temp1[3] = {0, 0, 0}, temp2 = 0; 355 #if LANDAU_DIM == 2 356 PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 357 LandauTensor2D(vj, x, y, Ud, Uk, mask); 358 #else 359 PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2] - z) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 360 if (ctx->use_relativistic_corrections) { 361 LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0)); 362 } else { 363 LandauTensor3D(vj, x, y, z, U, mask); 364 } 365 #endif 366 for (PetscInt f = 0; f < Nfloc_r; ++f) { 367 const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid_r] + f * nip_loc_r + ei_r * Nq + qi; // IPf_idx + f*nip_loc_r + loc_fdf_idx; 368 temp1[0] += dudx[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r]; 369 temp1[1] += dudy[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r]; 370 #if LANDAU_DIM == 3 371 temp1[2] += dudz[idx] * nu_beta[f + f_off] * invMass[f + f_off] * (*lambdas)[grid][grid_r]; 372 #endif 373 temp2 += ff[idx] * nu_beta[f + f_off] * (*lambdas)[grid][grid_r]; 374 } 375 temp1[0] *= wi; 376 temp1[1] *= wi; 377 #if LANDAU_DIM == 3 378 temp1[2] *= wi; 379 #endif 380 temp2 *= wi; 381 #if LANDAU_DIM == 2 382 for (d2 = 0; d2 < 2; d2++) { 383 for (d3 = 0; d3 < 2; ++d3) { 384 /* K = U * grad(f): g2=e: i,A */ 385 gg2_temp[d2] += Uk[d2][d3] * temp1[d3]; 386 /* D = -U * (I \kron (fx)): g3=f: i,j,A */ 387 gg3_temp[d2][d3] += Ud[d2][d3] * temp2; 388 } 389 } 390 #else 391 for (d2 = 0; d2 < 3; ++d2) { 392 for (d3 = 0; d3 < 3; ++d3) { 393 /* K = U * grad(f): g2 = e: i,A */ 394 gg2_temp[d2] += U[d2][d3] * temp1[d3]; 395 /* D = -U * (I \kron (fx)): g3 = f: i,j,A */ 396 gg3_temp[d2][d3] += U[d2][d3] * temp2; 397 } 398 } 399 #endif 400 } // qi 401 } // ei_r 402 IPf_idx += nip_loc_r * Nfloc_r; 403 } /* grid_r - IPs */ 404 PetscCheck(IPf_idx == IPf_sz_glb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT, IPf_idx, IPf_sz_glb); 405 // add alpha and put in gg2/3 406 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) { 407 for (d2 = 0; d2 < LANDAU_DIM; d2++) { 408 gg2[fieldA][d2] = gg2_temp[d2] * nu_alpha[fieldA + f_off]; 409 for (d3 = 0; d3 < LANDAU_DIM; d3++) gg3[fieldA][d2][d3] = -gg3_temp[d2][d3] * nu_alpha[fieldA + f_off] * invMass[fieldA + f_off]; 410 } 411 } 412 /* add electric field term once per IP */ 413 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) gg2[fieldA][LANDAU_DIM - 1] += Eq_m[fieldA + f_off]; 414 /* Jacobian transform - g2, g3 */ 415 for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) { 416 for (d = 0; d < dim; ++d) { 417 g2[fieldA][d] = 0.0; 418 for (d2 = 0; d2 < dim; ++d2) { 419 g2[fieldA][d] += invJj[d * dim + d2] * gg2[fieldA][d2]; 420 g3[fieldA][d][d2] = 0.0; 421 for (d3 = 0; d3 < dim; ++d3) { 422 for (dp = 0; dp < dim; ++dp) g3[fieldA][d][d2] += invJj[d * dim + d3] * gg3[fieldA][d3][dp] * invJj[d2 * dim + dp]; 423 } 424 g3[fieldA][d][d2] *= wj; 425 } 426 g2[fieldA][d] *= wj; 427 } 428 } 429 } else { // mass 430 PetscReal wj = ww[jpidx_glb]; 431 /* Jacobian transform - g0 */ 432 for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) { 433 if (dim == 2) { 434 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0 435 } else { 436 g0[fieldA] = wj * shift; // move this to below and remove g0 437 } 438 } 439 } 440 /* FE matrix construction */ 441 { 442 PetscInt fieldA, d, f, d2, g; 443 const PetscReal *BJq = &BB[qj * Nb], *DIq = &DD[qj * Nb * dim]; 444 /* assemble - on the diagonal (I,I) */ 445 for (fieldA = 0; fieldA < loc_Nf; fieldA++) { 446 for (f = 0; f < Nb; f++) { 447 const PetscInt i = fieldA * Nb + f; /* Element matrix row */ 448 for (g = 0; g < Nb; ++g) { 449 const PetscInt j = fieldA * Nb + g; /* Element matrix column */ 450 const PetscInt fOff = i * totDim + j; 451 if (shift == 0.0) { 452 for (d = 0; d < dim; ++d) { 453 elemMat[fOff] += DIq[f * dim + d] * g2[fieldA][d] * BJq[g]; 454 for (d2 = 0; d2 < dim; ++d2) elemMat[fOff] += DIq[f * dim + d] * g3[fieldA][d][d2] * DIq[g * dim + d2]; 455 } 456 } else { // mass 457 elemMat[fOff] += BJq[f] * g0[fieldA] * BJq[g]; 458 } 459 } 460 } 461 } 462 } 463 } /* qj loop */ 464 if (shift == 0.0) { // Jacobian 465 PetscCall(PetscLogEventEnd(ctx->events[4], 0, 0, 0, 0)); 466 } else { 467 PetscCall(PetscLogEventEnd(ctx->events[16], 0, 0, 0, 0)); 468 } 469 #if defined(PETSC_HAVE_THREADSAFETY) 470 endtime = MPI_Wtime(); 471 if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime); 472 #endif 473 /* assemble matrix */ 474 if (!container) { 475 PetscInt cStart; 476 PetscCall(PetscLogEventBegin(ctx->events[6], 0, 0, 0, 0)); 477 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL)); 478 PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[LAND_PACK_IDX(b_id, grid)], loc_elem + cStart, elemMat, ADD_VALUES)); 479 PetscCall(PetscLogEventEnd(ctx->events[6], 0, 0, 0, 0)); 480 } else { // GPU like assembly for debugging 481 PetscInt fieldA, q, f, g, d, nr, nc, rows0[LANDAU_MAX_Q_FACE] = {0}, cols0[LANDAU_MAX_Q_FACE] = {0}, rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE]; 482 PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE] = {0}, row_scale[LANDAU_MAX_Q_FACE] = {0}, col_scale[LANDAU_MAX_Q_FACE] = {0}; 483 LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = (LandauIdx(*)[LANDAU_MAX_NQND + 1]) ctx->SData_d.coo_elem_point_offsets; 484 /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */ 485 for (fieldA = 0; fieldA < loc_Nf; fieldA++) { 486 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0]; 487 for (f = 0; f < Nb; f++) { 488 PetscInt idx = Idxs[f]; 489 if (idx >= 0) { 490 nr = 1; 491 rows0[0] = idx; 492 row_scale[0] = 1.; 493 } else { 494 idx = -idx - 1; 495 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) { 496 if (maps[grid].c_maps[idx][q].gid < 0) break; 497 rows0[q] = maps[grid].c_maps[idx][q].gid; 498 row_scale[q] = maps[grid].c_maps[idx][q].scale; 499 } 500 } 501 for (g = 0; g < Nb; ++g) { 502 idx = Idxs[g]; 503 if (idx >= 0) { 504 nc = 1; 505 cols0[0] = idx; 506 col_scale[0] = 1.; 507 } else { 508 idx = -idx - 1; 509 nc = maps[grid].num_face; 510 for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) { 511 if (maps[grid].c_maps[idx][q].gid < 0) break; 512 cols0[q] = maps[grid].c_maps[idx][q].gid; 513 col_scale[q] = maps[grid].c_maps[idx][q].scale; 514 } 515 } 516 const PetscInt i = fieldA * Nb + f; /* Element matrix row */ 517 const PetscInt j = fieldA * Nb + g; /* Element matrix column */ 518 const PetscScalar Aij = elemMat[i * totDim + j]; 519 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData 520 const PetscInt fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb; 521 const PetscInt idx0 = b_id * coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g]; 522 for (PetscInt q = 0, idx2 = idx0; q < nr; q++) { 523 for (PetscInt d = 0; d < nc; d++, idx2++) coo_vals[idx2] = row_scale[q] * col_scale[d] * Aij; 524 } 525 } else { 526 for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset; 527 for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset; 528 for (q = 0; q < nr; q++) { 529 for (d = 0; d < nc; d++) vals[q * nc + d] = row_scale[q] * col_scale[d] * Aij; 530 } 531 PetscCall(MatSetValues(JacP, nr, rows, nc, cols, vals, ADD_VALUES)); 532 } 533 } 534 } 535 } 536 } 537 if (loc_elem == -1) { 538 PetscCall(PetscPrintf(ctx->comm, "CPU Element matrix\n")); 539 for (PetscInt d = 0; d < totDim; ++d) { 540 for (PetscInt f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm, " %12.5e", (double)PetscRealPart(elemMat[d * totDim + f]))); 541 PetscCall(PetscPrintf(ctx->comm, "\n")); 542 } 543 exit(12); 544 } 545 PetscCall(PetscFree(elemMat)); 546 } /* grid */ 547 } /* outer element & batch loop */ 548 if (shift == 0.0) { // mass 549 PetscCall(PetscFree4(ff, dudx, dudy, dudz)); 550 } 551 if (!container) { // 'CPU' assembly move nest matrix to global JacP 552 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP 553 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 554 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid]; 555 PetscInt nloc, nzl, colbuf[1024], row; 556 const PetscInt *cols; 557 const PetscScalar *vals; 558 Mat B = subJ[LAND_PACK_IDX(b_id, grid)]; 559 PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY)); 560 PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY)); 561 PetscCall(MatGetSize(B, &nloc, NULL)); 562 for (PetscInt i = 0; i < nloc; i++) { 563 PetscCall(MatGetRow(B, i, &nzl, &cols, &vals)); 564 PetscCheck(nzl <= 1024, PetscObjectComm((PetscObject)B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT, nzl); 565 for (PetscInt j = 0; j < nzl; j++) colbuf[j] = moffset + cols[j]; 566 row = moffset + i; 567 PetscCall(MatSetValues(JacP, 1, &row, nzl, colbuf, vals, ADD_VALUES)); 568 PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals)); 569 } 570 PetscCall(MatDestroy(&B)); 571 } 572 } 573 } 574 if (coo_vals) { 575 PetscCall(MatSetValuesCOO(JacP, coo_vals, ADD_VALUES)); 576 PetscCall(PetscFree(coo_vals)); 577 } 578 } /* CPU version */ 579 PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY)); 580 PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY)); 581 /* clean up */ 582 if (cellClosure) PetscCall(PetscFree(cellClosure)); 583 if (xdata) PetscCall(VecRestoreArrayReadAndMemType(a_X, &xdata)); 584 PetscFunctionReturn(PETSC_SUCCESS); 585 } 586 587 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx) 588 { 589 PetscReal r = abc[0], z = abc[1]; 590 LandauCtx *ctx = (LandauCtx *)a_ctx; 591 592 PetscFunctionBegin; 593 if (ctx->sphere && dim == 3) { // make sphere: works for one AMR and Q2 594 PetscInt nzero = 0, idx = 0; 595 xyz[0] = r; 596 xyz[1] = z; 597 xyz[2] = abc[2]; 598 for (PetscInt i = 0; i < 3; i++) { 599 if (PetscAbs(xyz[i]) < PETSC_SQRT_MACHINE_EPSILON) nzero++; 600 else idx = i; 601 } 602 if (nzero == 2) xyz[idx] *= 1.732050807568877; // sqrt(3) 603 else if (nzero == 1) { 604 for (PetscInt i = 0; i < 3; i++) xyz[i] *= 1.224744871391589; // sqrt(3/2) 605 } 606 } else { 607 xyz[0] = r; 608 xyz[1] = z; 609 if (dim == 3) xyz[2] = abc[2]; 610 } 611 PetscFunctionReturn(PETSC_SUCCESS); 612 } 613 614 /* create DMComposite of meshes for each species group */ 615 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack) 616 { 617 PetscFunctionBegin; 618 { /* p4est, quads */ 619 /* Create plex mesh of Landau domain */ 620 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 621 PetscReal par_radius = ctx->radius_par[grid], perp_radius = ctx->radius_perp[grid]; 622 if (!ctx->sphere && !ctx->simplex) { // 2 or 3D (only 3D option) 623 PetscReal lo[] = {-perp_radius, -par_radius, -par_radius}, hi[] = {perp_radius, par_radius, par_radius}; 624 DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim == 2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE}; 625 if (dim == 2) lo[0] = 0; 626 else { 627 lo[1] = -perp_radius; 628 hi[1] = perp_radius; // 3D y is a perp 629 } 630 PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, ctx->cells0, lo, hi, periodicity, PETSC_TRUE, 0, PETSC_TRUE, &ctx->plex[grid])); // TODO: make composite and create dm[grid] here 631 PetscCall(DMLocalizeCoordinates(ctx->plex[grid])); /* needed for periodic */ 632 if (dim == 3) PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "cube")); 633 else PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "half-plane")); 634 } else if (dim == 2) { 635 size_t len; 636 PetscCall(PetscStrlen(ctx->filename, &len)); 637 if (len) { 638 Vec coords; 639 PetscScalar *x; 640 PetscInt N; 641 char str[] = "-dm_landau_view_file_0"; 642 str[21] += grid; 643 PetscCall(DMPlexCreateFromFile(comm_self, ctx->filename, "plexland.c", PETSC_TRUE, &ctx->plex[grid])); 644 PetscCall(DMPlexOrient(ctx->plex[grid])); 645 PetscCall(DMGetCoordinatesLocal(ctx->plex[grid], &coords)); 646 PetscCall(VecGetSize(coords, &N)); 647 PetscCall(VecGetArray(coords, &x)); 648 /* scale by domain size */ 649 for (PetscInt i = 0; i < N; i += 2) { 650 x[i + 0] *= ctx->radius_perp[grid]; 651 x[i + 1] *= ctx->radius_par[grid]; 652 } 653 PetscCall(VecRestoreArray(coords, &x)); 654 PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], ctx->filename)); 655 PetscCall(PetscInfo(ctx->plex[grid], "%" PetscInt_FMT ") Read %s mesh file (%s)\n", grid, ctx->filename, str)); 656 PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, str)); 657 } else { // simplex forces a sphere 658 PetscInt numCells = ctx->simplex ? 12 : 6, cell_size = ctx->simplex ? 3 : 4, j; 659 const PetscInt numVerts = 11; 660 PetscInt cellsT[][4] = { 661 {0, 1, 6, 5 }, 662 {1, 2, 7, 6 }, 663 {2, 3, 8, 7 }, 664 {3, 4, 9, 8 }, 665 {5, 6, 7, 10}, 666 {10, 7, 8, 9 } 667 }; 668 PetscInt cellsS[][3] = { 669 {0, 1, 6 }, 670 {1, 2, 6 }, 671 {6, 2, 7 }, 672 {7, 2, 8 }, 673 {8, 2, 3 }, 674 {8, 3, 4 }, 675 {0, 6, 5 }, 676 {5, 6, 7 }, 677 {5, 7, 10}, 678 {10, 7, 9 }, 679 {9, 7, 8 }, 680 {9, 8, 4 } 681 }; 682 const PetscInt *pcell = (const PetscInt *)(ctx->simplex ? &cellsS[0][0] : &cellsT[0][0]); 683 PetscReal coords[11][2], *flatCoords = &coords[0][0]; 684 PetscReal rad = ctx->radius[grid]; 685 for (j = 0; j < 5; j++) { // outside edge 686 PetscReal z, r, theta = -PETSC_PI / 2 + (j % 5) * PETSC_PI / 4; 687 r = rad * PetscCosReal(theta); 688 coords[j][0] = r; 689 z = rad * PetscSinReal(theta); 690 coords[j][1] = z; 691 } 692 coords[j][0] = 0; 693 coords[j++][1] = -rad * ctx->sphere_inner_radius_90degree[grid]; 694 coords[j][0] = rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548; 695 coords[j++][1] = -rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548; 696 coords[j][0] = rad * ctx->sphere_inner_radius_90degree[grid]; 697 coords[j++][1] = 0; 698 coords[j][0] = rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548; 699 coords[j++][1] = rad * ctx->sphere_inner_radius_45degree[grid] * 0.707106781186548; 700 coords[j][0] = 0; 701 coords[j++][1] = rad * ctx->sphere_inner_radius_90degree[grid]; 702 coords[j][0] = 0; 703 coords[j++][1] = 0; 704 PetscCall(DMPlexCreateFromCellListPetsc(comm_self, 2, numCells, numVerts, cell_size, ctx->interpolate, pcell, 2, flatCoords, &ctx->plex[grid])); 705 PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "semi-circle")); 706 PetscCall(PetscInfo(ctx->plex[grid], "\t%" PetscInt_FMT ") Make circle %s mesh\n", grid, ctx->simplex ? "simplex" : "tensor")); 707 } 708 } else { 709 PetscCheck(dim == 3 && ctx->sphere && !ctx->simplex, ctx->comm, PETSC_ERR_ARG_WRONG, "not: dim == 3 && ctx->sphere && !ctx->simplex"); 710 PetscReal rad = ctx->radius[grid] / 1.732050807568877, inner_rad = rad * ctx->sphere_inner_radius_45degree[grid], outer_rad = rad; 711 const PetscInt numCells = 7, cell_size = 8, numVerts = 16; 712 const PetscInt cells[][8] = { 713 {0, 3, 2, 1, 4, 5, 6, 7 }, 714 {0, 4, 5, 1, 8, 9, 13, 12}, 715 {1, 5, 6, 2, 9, 10, 14, 13}, 716 {2, 6, 7, 3, 10, 11, 15, 14}, 717 {0, 3, 7, 4, 8, 12, 15, 11}, 718 {0, 1, 2, 3, 8, 11, 10, 9 }, 719 {4, 7, 6, 5, 12, 13, 14, 15} 720 }; 721 PetscReal coords[16 /* numVerts */][3]; 722 for (PetscInt j = 0; j < 4; j++) { // inner edge, low 723 coords[j][0] = inner_rad * (j == 0 || j == 3 ? 1 : -1); 724 coords[j][1] = inner_rad * (j / 2 < 1 ? 1 : -1); 725 coords[j][2] = inner_rad * -1; 726 } 727 for (PetscInt j = 0, jj = 4; j < 4; j++, jj++) { // inner edge, hi 728 coords[jj][0] = inner_rad * (j == 0 || j == 3 ? 1 : -1); 729 coords[jj][1] = inner_rad * (j / 2 < 1 ? 1 : -1); 730 coords[jj][2] = inner_rad * 1; 731 } 732 for (PetscInt j = 0, jj = 8; j < 4; j++, jj++) { // outer edge, low 733 coords[jj][0] = outer_rad * (j == 0 || j == 3 ? 1 : -1); 734 coords[jj][1] = outer_rad * (j / 2 < 1 ? 1 : -1); 735 coords[jj][2] = outer_rad * -1; 736 } 737 for (PetscInt j = 0, jj = 12; j < 4; j++, jj++) { // outer edge, hi 738 coords[jj][0] = outer_rad * (j == 0 || j == 3 ? 1 : -1); 739 coords[jj][1] = outer_rad * (j / 2 < 1 ? 1 : -1); 740 coords[jj][2] = outer_rad * 1; 741 } 742 PetscCall(DMPlexCreateFromCellListPetsc(comm_self, 3, numCells, numVerts, cell_size, ctx->interpolate, (const PetscInt *)cells, 3, (const PetscReal *)coords, &ctx->plex[grid])); 743 PetscCall(PetscObjectSetName((PetscObject)ctx->plex[grid], "cubed sphere")); 744 PetscCall(PetscInfo(ctx->plex[grid], "\t%" PetscInt_FMT ") Make cubed sphere %s mesh\n", grid, ctx->simplex ? "simplex" : "tensor")); 745 } 746 PetscCall(DMSetFromOptions(ctx->plex[grid])); 747 } // grid loop 748 PetscCall(PetscObjectSetOptionsPrefix((PetscObject)pack, prefix)); 749 { /* convert to p4est (or whatever), wait for discretization to create pack */ 750 char convType[256]; 751 PetscBool flg; 752 753 PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX"); 754 PetscCall(PetscOptionsFList("-dm_landau_type", "Convert DMPlex to another format (p4est)", "plexland.c", DMList, DMPLEX, convType, 256, &flg)); 755 PetscOptionsEnd(); 756 if (flg) { 757 ctx->use_p4est = PETSC_TRUE; /* flag for Forest */ 758 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 759 DM dmforest; 760 PetscBool isForest; 761 762 PetscCall(DMConvert(ctx->plex[grid], convType, &dmforest)); 763 PetscCheck(dmforest, ctx->comm, PETSC_ERR_PLIB, "Convert failed?"); 764 PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dmforest, prefix)); 765 PetscCall(DMIsForest(dmforest, &isForest)); 766 PetscCheck(isForest, ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?"); 767 if (ctx->sphere) PetscCall(DMForestSetBaseCoordinateMapping(dmforest, GeometryDMLandau, ctx)); 768 PetscCall(DMDestroy(&ctx->plex[grid])); 769 ctx->plex[grid] = dmforest; // Forest for adaptivity 770 } 771 } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */ 772 } 773 } /* non-file */ 774 PetscCall(DMSetDimension(pack, dim)); 775 PetscCall(PetscObjectSetName((PetscObject)pack, "Mesh")); 776 PetscCall(DMSetApplicationContext(pack, ctx)); 777 PetscFunctionReturn(PETSC_SUCCESS); 778 } 779 780 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx) 781 { 782 PetscInt ii, i0; 783 char buf[256]; 784 PetscSection section; 785 786 PetscFunctionBegin; 787 for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) { 788 if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e")); 789 else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii)); 790 /* Setup Discretization - FEM */ 791 PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, ctx->simplex, NULL, PETSC_DECIDE, &ctx->fe[ii])); 792 PetscCall(PetscObjectSetName((PetscObject)ctx->fe[ii], buf)); 793 PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject)ctx->fe[ii])); 794 } 795 PetscCall(DMCreateDS(ctx->plex[grid])); 796 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion)); 797 for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) { 798 if (ii == 0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se")); 799 else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii)); 800 PetscCall(PetscSectionSetComponentName(section, i0, 0, buf)); 801 } 802 PetscFunctionReturn(PETSC_SUCCESS); 803 } 804 805 /* Define a Maxwellian function for testing out the operator. */ 806 807 /* Using cartesian velocity space coordinates, the particle */ 808 /* density, [1/m^3], is defined according to */ 809 810 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */ 811 812 /* Using some constant, c, we normalize the velocity vector into a */ 813 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */ 814 815 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */ 816 817 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */ 818 /* for finding the particle within the interval in a box dx^3 around x is */ 819 820 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */ 821 822 typedef struct { 823 PetscReal v_0; 824 PetscReal kT_m; 825 PetscReal n; 826 PetscReal shift; 827 } MaxwellianCtx; 828 829 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 830 { 831 MaxwellianCtx *mctx = (MaxwellianCtx *)actx; 832 PetscInt i; 833 PetscReal v2 = 0, theta = 2 * mctx->kT_m / (mctx->v_0 * mctx->v_0), shift; /* theta = 2kT/mc^2 */ 834 835 PetscFunctionBegin; 836 /* compute the exponents, v^2 */ 837 for (i = 0; i < dim; ++i) v2 += x[i] * x[i]; 838 /* evaluate the Maxwellian */ 839 if (mctx->shift < 0) shift = -mctx->shift; 840 else { 841 u[0] = mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta)); 842 shift = mctx->shift; 843 } 844 if (shift != 0.) { 845 v2 = 0; 846 for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i]; 847 v2 += (x[dim - 1] - shift) * (x[dim - 1] - shift); 848 /* evaluate the shifted Maxwellian */ 849 u[0] += mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta)); 850 } 851 PetscFunctionReturn(PETSC_SUCCESS); 852 } 853 854 /*@ 855 DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state 856 857 Collective 858 859 Input Parameters: 860 + dm - The mesh (local) 861 . time - Current time 862 . temps - Temperatures of each species (global) 863 . ns - Number density of each species (global) 864 . grid - index into current grid - just used for offset into `temp` and `ns` 865 . b_id - batch index 866 . n_batch - number of batches 867 - actx - Landau context 868 869 Output Parameter: 870 . X - The state (local to this grid) 871 872 Level: beginner 873 874 .seealso: `DMPlexLandauCreateVelocitySpace()` 875 @*/ 876 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx) 877 { 878 LandauCtx *ctx = (LandauCtx *)actx; 879 PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *); 880 PetscInt dim; 881 MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES]; 882 883 PetscFunctionBegin; 884 PetscCall(DMGetDimension(dm, &dim)); 885 if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx)); 886 for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) { 887 mctxs[i0] = &data[i0]; 888 data[i0].v_0 = ctx->v_0; // v_0 same for all grids 889 data[i0].kT_m = ctx->k * temps[ii] / ctx->masses[ii]; /* kT/m */ 890 data[i0].n = ns[ii]; 891 initu[i0] = maxwellian; 892 data[i0].shift = 0; 893 } 894 data[0].shift = ctx->electronShift; 895 /* need to make ADD_ALL_VALUES work - TODO */ 896 PetscCall(DMProjectFunction(dm, time, initu, (void **)mctxs, INSERT_ALL_VALUES, X)); 897 PetscFunctionReturn(PETSC_SUCCESS); 898 } 899 900 /* 901 LandauSetInitialCondition - Adds Maxwellians with context 902 903 Collective 904 905 Input Parameters: 906 . dm - The mesh 907 - grid - index into current grid - just used for offset into temp and ns 908 . b_id - batch index 909 - n_batch - number of batches 910 + actx - Landau context with T and n 911 912 Output Parameter: 913 . X - The state 914 915 Level: beginner 916 917 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()` 918 */ 919 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx) 920 { 921 LandauCtx *ctx = (LandauCtx *)actx; 922 923 PetscFunctionBegin; 924 if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx)); 925 PetscCall(VecZeroEntries(X)); 926 PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, n_batch, ctx)); 927 PetscFunctionReturn(PETSC_SUCCESS); 928 } 929 930 // adapt a level once. Forest in/out 931 #if defined(PETSC_USE_INFO) 932 static const char *s_refine_names[] = {"RE", "Z1", "Origin", "Z2", "Uniform"}; 933 #endif 934 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest) 935 { 936 DM forest, plex, adaptedDM = NULL; 937 PetscDS prob; 938 PetscBool isForest; 939 PetscQuadrature quad; 940 PetscInt Nq, Nb, *Nb2, cStart, cEnd, c, dim, qj, k; 941 DMLabel adaptLabel = NULL; 942 943 PetscFunctionBegin; 944 forest = ctx->plex[grid]; 945 PetscCall(DMCreateDS(forest)); 946 PetscCall(DMGetDS(forest, &prob)); 947 PetscCall(DMGetDimension(forest, &dim)); 948 PetscCall(DMIsForest(forest, &isForest)); 949 PetscCheck(isForest, ctx->comm, PETSC_ERR_ARG_WRONG, "! Forest"); 950 PetscCall(DMConvert(forest, DMPLEX, &plex)); 951 PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd)); 952 PetscCall(DMLabelCreate(PETSC_COMM_SELF, "adapt", &adaptLabel)); 953 PetscCall(PetscFEGetQuadrature(fem, &quad)); 954 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); 955 PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND); 956 PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb)); 957 PetscCall(PetscDSGetDimensions(prob, &Nb2)); 958 PetscCheck(Nb2[0] == Nb, ctx->comm, PETSC_ERR_ARG_WRONG, " Nb = %" PetscInt_FMT " != Nb (%" PetscInt_FMT ")", Nb, Nb2[0]); 959 PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND); 960 PetscCall(PetscInfo(sol, "%" PetscInt_FMT ") Refine phase: %s\n", grid, s_refine_names[type])); 961 if (type == 4) { 962 for (c = cStart; c < cEnd; c++) PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); 963 } else if (type == 2) { 964 PetscInt rCellIdx[8], nr = 0, nrmax = (dim == 3) ? 8 : 2; 965 PetscReal minRad = PETSC_INFINITY, r; 966 for (c = cStart; c < cEnd; c++) { 967 PetscReal tt, v0[LANDAU_MAX_NQND * 3], J[LANDAU_MAX_NQND * 9], invJ[LANDAU_MAX_NQND * 9], detJ[LANDAU_MAX_NQND]; 968 PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, J, invJ, detJ)); 969 (void)J; 970 (void)invJ; 971 for (qj = 0; qj < Nq; ++qj) { 972 tt = PetscSqr(v0[dim * qj + 0]) + PetscSqr(v0[dim * qj + 1]) + PetscSqr((dim == 3) ? v0[dim * qj + 2] : 0); 973 r = PetscSqrtReal(tt); 974 if (r < minRad - PETSC_SQRT_MACHINE_EPSILON * 10.) { 975 minRad = r; 976 nr = 0; 977 rCellIdx[nr++] = c; 978 PetscCall(PetscInfo(sol, "\t\t%" PetscInt_FMT ") Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", grid, (double)r, c, qj + 1, Nq)); 979 } else if ((r - minRad) < PETSC_SQRT_MACHINE_EPSILON * 100. && nr < nrmax) { 980 for (k = 0; k < nr; k++) 981 if (c == rCellIdx[k]) break; 982 if (k == nr) { 983 rCellIdx[nr++] = c; 984 PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", grid, (double)r, c, qj + 1, Nq, (double)(r - minRad))); 985 } 986 } 987 } 988 } 989 for (k = 0; k < nr; k++) PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE)); 990 PetscCall(PetscInfo(sol, "\t\t\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n", grid, nr, rCellIdx[0], rCellIdx[1], (double)minRad)); 991 } else if (type == 0 || type == 1 || type == 3) { /* refine along r=0 axis */ 992 PetscScalar *coef = NULL; 993 Vec coords; 994 PetscInt csize, Nv, d, nz, nrefined = 0; 995 DM cdm; 996 PetscSection cs; 997 PetscCall(DMGetCoordinatesLocal(forest, &coords)); 998 PetscCall(DMGetCoordinateDM(forest, &cdm)); 999 PetscCall(DMGetLocalSection(cdm, &cs)); 1000 for (c = cStart; c < cEnd; c++) { 1001 PetscInt doit = 0, outside = 0; 1002 PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef)); 1003 Nv = csize / dim; 1004 for (nz = d = 0; d < Nv; d++) { 1005 PetscReal z = PetscRealPart(coef[d * dim + (dim - 1)]), x = PetscSqr(PetscRealPart(coef[d * dim + 0])) + ((dim == 3) ? PetscSqr(PetscRealPart(coef[d * dim + 1])) : 0); 1006 x = PetscSqrtReal(x); 1007 if (type == 0) { 1008 if (ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON && (z < -PETSC_MACHINE_EPSILON * 10. || z > ctx->re_radius + PETSC_MACHINE_EPSILON * 10.)) outside++; /* first pass don't refine bottom */ 1009 } else if (type == 1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) { 1010 outside++; /* don't refine outside electron refine radius */ 1011 PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type])); 1012 } else if (type == 3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) { 1013 outside++; /* refine r=0 cells on refinement front */ 1014 PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") (debug) found %s cells\n", grid, s_refine_names[type])); 1015 } 1016 if (x < PETSC_MACHINE_EPSILON * 10. && (type != 0 || ctx->re_radius > PETSC_SQRT_MACHINE_EPSILON)) nz++; 1017 } 1018 PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef)); 1019 if (doit || (outside < Nv && nz)) { 1020 PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); 1021 nrefined++; 1022 } 1023 } 1024 PetscCall(PetscInfo(sol, "\t%" PetscInt_FMT ") Refined %" PetscInt_FMT " cells\n", grid, nrefined)); 1025 } 1026 PetscCall(DMDestroy(&plex)); 1027 PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM)); 1028 PetscCall(DMLabelDestroy(&adaptLabel)); 1029 *newForest = adaptedDM; 1030 if (adaptedDM) { 1031 if (isForest) PetscCall(DMForestSetAdaptivityForest(adaptedDM, NULL)); // ???? 1032 PetscCall(DMConvert(adaptedDM, DMPLEX, &plex)); 1033 PetscCall(DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd)); 1034 PetscCall(PetscInfo(sol, "\t\t\t\t%" PetscInt_FMT ") %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n", grid, cEnd - cStart, Nq * (cEnd - cStart))); 1035 PetscCall(DMDestroy(&plex)); 1036 } else *newForest = NULL; 1037 PetscFunctionReturn(PETSC_SUCCESS); 1038 } 1039 1040 // forest goes in (ctx->plex[grid]), plex comes out 1041 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu) 1042 { 1043 PetscInt adaptIter; 1044 1045 PetscFunctionBegin; 1046 PetscInt type, limits[5] = {(grid == 0) ? ctx->numRERefine : 0, (grid == 0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid == 0) ? ctx->nZRefine2 : 0, ctx->postAMRRefine[grid]}; 1047 for (type = 0; type < 5; type++) { 1048 for (adaptIter = 0; adaptIter < limits[type]; adaptIter++) { 1049 DM newForest = NULL; 1050 PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest)); 1051 if (newForest) { 1052 PetscCall(DMDestroy(&ctx->plex[grid])); 1053 PetscCall(VecDestroy(uu)); 1054 PetscCall(DMCreateGlobalVector(newForest, uu)); 1055 PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, 1, ctx)); 1056 ctx->plex[grid] = newForest; 1057 } else { 1058 PetscCall(PetscInfo(*uu, "No refinement\n")); 1059 } 1060 } 1061 } 1062 PetscCall(PetscObjectSetName((PetscObject)*uu, "uAMR")); 1063 PetscFunctionReturn(PETSC_SUCCESS); 1064 } 1065 1066 // make log(Lambdas) from NRL Plasma formulary 1067 static PetscErrorCode makeLambdas(LandauCtx *ctx) 1068 { 1069 PetscFunctionBegin; 1070 for (PetscInt gridi = 0; gridi < ctx->num_grids; gridi++) { 1071 PetscInt iii = ctx->species_offset[gridi]; 1072 PetscReal Ti_ev = (ctx->thermal_temps[iii] / 1.1604525e7) * 1000; // convert (back) to eV 1073 PetscReal ni = ctx->n[iii] * ctx->n_0; 1074 for (PetscInt gridj = gridi; gridj < ctx->num_grids; gridj++) { 1075 PetscInt jjj = ctx->species_offset[gridj]; 1076 PetscReal Zj = ctx->charges[jjj] / 1.6022e-19; 1077 if (gridi == 0) { 1078 if (gridj == 0) { // lam_ee 1079 ctx->lambdas[gridi][gridj] = 23.5 - PetscLogReal(PetscSqrtReal(ni) * PetscPowReal(Ti_ev, -1.25)) - PetscSqrtReal(1e-5 + PetscSqr(PetscLogReal(Ti_ev) - 2) / 16); 1080 } else { // lam_ei == lam_ie 1081 if (10 * Zj * Zj > Ti_ev) { 1082 ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(PetscSqrtReal(ni) * Zj * PetscPowReal(Ti_ev, -1.5)); 1083 } else { 1084 ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 24 - PetscLogReal(PetscSqrtReal(ni) / Ti_ev); 1085 } 1086 } 1087 } else { // lam_ii' 1088 PetscReal mui = ctx->masses[iii] / 1.6720e-27, Zi = ctx->charges[iii] / 1.6022e-19; 1089 PetscReal Tj_ev = (ctx->thermal_temps[jjj] / 1.1604525e7) * 1000; // convert (back) to eV 1090 PetscReal muj = ctx->masses[jjj] / 1.6720e-27; 1091 PetscReal nj = ctx->n[jjj] * ctx->n_0; 1092 ctx->lambdas[gridi][gridj] = ctx->lambdas[gridj][gridi] = 23 - PetscLogReal(Zi * Zj * (mui + muj) / (mui * Tj_ev + muj * Ti_ev) * PetscSqrtReal(ni * Zi * Zi / Ti_ev + nj * Zj * Zj / Tj_ev)); 1093 } 1094 } 1095 } 1096 //PetscReal v0 = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */ 1097 PetscFunctionReturn(PETSC_SUCCESS); 1098 } 1099 1100 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[]) 1101 { 1102 PetscBool flg, fileflg; 1103 PetscInt ii, nt, nm, nc, num_species_grid[LANDAU_MAX_GRIDS], non_dim_grid; 1104 PetscReal lnLam = 10; 1105 DM dummy; 1106 1107 PetscFunctionBegin; 1108 PetscCall(DMCreate(ctx->comm, &dummy)); 1109 /* get options - initialize context */ 1110 ctx->verbose = 1; // should be 0 for silent compliance 1111 ctx->batch_sz = 1; 1112 ctx->batch_view_idx = 0; 1113 ctx->interpolate = PETSC_TRUE; 1114 ctx->gpu_assembly = PETSC_TRUE; 1115 ctx->norm_state = 0; 1116 ctx->electronShift = 0; 1117 ctx->M = NULL; 1118 ctx->J = NULL; 1119 /* geometry and grids */ 1120 ctx->sphere = PETSC_FALSE; 1121 ctx->use_p4est = PETSC_FALSE; 1122 ctx->simplex = PETSC_FALSE; 1123 for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) { 1124 ctx->radius[grid] = 5.; /* thermal radius (velocity) */ 1125 ctx->radius_perp[grid] = 5.; /* thermal radius (velocity) */ 1126 ctx->radius_par[grid] = 5.; /* thermal radius (velocity) */ 1127 ctx->numAMRRefine[grid] = 0; 1128 ctx->postAMRRefine[grid] = 0; 1129 ctx->species_offset[grid + 1] = 1; // one species default 1130 num_species_grid[grid] = 0; 1131 ctx->plex[grid] = NULL; /* cache as expensive to Convert */ 1132 } 1133 ctx->species_offset[0] = 0; 1134 ctx->re_radius = 0.; 1135 ctx->vperp0_radius1 = 0; 1136 ctx->vperp0_radius2 = 0; 1137 ctx->nZRefine1 = 0; 1138 ctx->nZRefine2 = 0; 1139 ctx->numRERefine = 0; 1140 num_species_grid[0] = 1; // one species default 1141 /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */ 1142 ctx->charges[0] = -1; /* electron charge (MKS) */ 1143 ctx->masses[0] = 1 / 1835.469965278441013; /* temporary value in proton mass */ 1144 ctx->n[0] = 1; 1145 ctx->v_0 = 1; /* thermal velocity, we could start with a scale != 1 */ 1146 ctx->thermal_temps[0] = 1; 1147 /* constants, etc. */ 1148 ctx->epsilon0 = 8.8542e-12; /* permittivity of free space (MKS) F/m */ 1149 ctx->k = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */ 1150 ctx->n_0 = 1.e20; /* typical plasma n, but could set it to 1 */ 1151 ctx->Ez = 0; 1152 for (PetscInt grid = 0; grid < LANDAU_NUM_TIMERS; grid++) ctx->times[grid] = 0; 1153 for (PetscInt ii = 0; ii < LANDAU_DIM; ii++) ctx->cells0[ii] = 2; 1154 if (LANDAU_DIM == 2) ctx->cells0[0] = 1; 1155 ctx->use_matrix_mass = PETSC_FALSE; 1156 ctx->use_relativistic_corrections = PETSC_FALSE; 1157 ctx->use_energy_tensor_trick = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */ 1158 ctx->SData_d.w = NULL; 1159 ctx->SData_d.x = NULL; 1160 ctx->SData_d.y = NULL; 1161 ctx->SData_d.z = NULL; 1162 ctx->SData_d.invJ = NULL; 1163 ctx->jacobian_field_major_order = PETSC_FALSE; 1164 ctx->SData_d.coo_elem_offsets = NULL; 1165 ctx->SData_d.coo_elem_point_offsets = NULL; 1166 ctx->SData_d.coo_elem_fullNb = NULL; 1167 ctx->SData_d.coo_size = 0; 1168 PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none"); 1169 #if defined(PETSC_HAVE_KOKKOS) 1170 ctx->deviceType = LANDAU_KOKKOS; 1171 PetscCall(PetscStrncpy(ctx->filename, "kokkos", sizeof(ctx->filename))); 1172 #else 1173 ctx->deviceType = LANDAU_CPU; 1174 PetscCall(PetscStrncpy(ctx->filename, "cpu", sizeof(ctx->filename))); 1175 #endif 1176 PetscCall(PetscOptionsString("-dm_landau_device_type", "Use kernels on 'cpu' 'kokkos'", "plexland.c", ctx->filename, ctx->filename, sizeof(ctx->filename), NULL)); 1177 PetscCall(PetscStrcmp("cpu", ctx->filename, &flg)); 1178 if (flg) { 1179 ctx->deviceType = LANDAU_CPU; 1180 } else { 1181 PetscCall(PetscStrcmp("kokkos", ctx->filename, &flg)); 1182 PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_device_type %s", ctx->filename); 1183 ctx->deviceType = LANDAU_KOKKOS; 1184 } 1185 ctx->filename[0] = '\0'; 1186 PetscCall(PetscOptionsString("-dm_landau_filename", "file to read mesh from", "plexland.c", ctx->filename, ctx->filename, sizeof(ctx->filename), &fileflg)); 1187 PetscCall(PetscOptionsReal("-dm_landau_electron_shift", "Shift in thermal velocity of electrons", "none", ctx->electronShift, &ctx->electronShift, NULL)); 1188 PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL)); 1189 PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL)); 1190 PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "LANDAU_MAX_BATCH_SZ %d < ctx->batch_sz %" PetscInt_FMT, LANDAU_MAX_BATCH_SZ, ctx->batch_sz); 1191 PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL)); 1192 PetscCheck(ctx->batch_view_idx < ctx->batch_sz, ctx->comm, PETSC_ERR_ARG_WRONG, "-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT, ctx->batch_view_idx, ctx->batch_sz); 1193 PetscCall(PetscOptionsReal("-dm_landau_Ez", "Initial parallel electric field in unites of Conner-Hastie critical field", "plexland.c", ctx->Ez, &ctx->Ez, NULL)); 1194 PetscCall(PetscOptionsReal("-dm_landau_n_0", "Normalization constant for number density", "plexland.c", ctx->n_0, &ctx->n_0, NULL)); 1195 PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL)); 1196 PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL)); 1197 PetscCall(PetscOptionsBool("-dm_landau_simplex", "Use simplex elements", "plexland.c", ctx->simplex, &ctx->simplex, NULL)); 1198 PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, NULL)); 1199 if (LANDAU_DIM == 2 && ctx->use_relativistic_corrections) ctx->use_relativistic_corrections = PETSC_FALSE; // should warn 1200 PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, 1201 &ctx->use_energy_tensor_trick, NULL)); 1202 1203 /* get num species with temperature, set defaults */ 1204 for (ii = 1; ii < LANDAU_MAX_SPECIES; ii++) { 1205 ctx->thermal_temps[ii] = 1; 1206 ctx->charges[ii] = 1; 1207 ctx->masses[ii] = 1; 1208 ctx->n[ii] = 1; 1209 } 1210 nt = LANDAU_MAX_SPECIES; 1211 PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg)); 1212 PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species"); 1213 PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n", nt)); 1214 ctx->num_species = nt; 1215 for (ii = 0; ii < ctx->num_species; ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */ 1216 nm = LANDAU_MAX_SPECIES - 1; 1217 PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg)); 1218 PetscCheck(!flg || nm == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species - 1); 1219 nm = LANDAU_MAX_SPECIES; 1220 PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg)); 1221 PetscCheck(!flg || nm == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT, nm, ctx->num_species); 1222 for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */ 1223 ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */ 1224 nc = LANDAU_MAX_SPECIES - 1; 1225 PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg)); 1226 if (flg) PetscCheck(nc == ctx->num_species - 1, ctx->comm, PETSC_ERR_ARG_WRONG, "num charges %" PetscInt_FMT " != num species %" PetscInt_FMT, nc, ctx->num_species - 1); 1227 for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */ 1228 /* geometry and grids */ 1229 nt = LANDAU_MAX_GRIDS; 1230 PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid", "Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid", "plexland.c", num_species_grid, &nt, &flg)); 1231 if (flg) { 1232 ctx->num_grids = nt; 1233 for (ii = nt = 0; ii < ctx->num_grids; ii++) nt += num_species_grid[ii]; 1234 PetscCheck(ctx->num_species == nt, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %d)", nt, ctx->num_species, 1235 ctx->num_grids, LANDAU_MAX_GRIDS); 1236 } else { 1237 if (ctx->num_species > LANDAU_MAX_GRIDS) { 1238 num_species_grid[0] = 1; 1239 num_species_grid[1] = ctx->num_species - 1; 1240 ctx->num_grids = 2; 1241 } else { 1242 ctx->num_grids = ctx->num_species; 1243 for (ii = 0; ii < ctx->num_grids; ii++) num_species_grid[ii] = 1; 1244 } 1245 } 1246 for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids; ii++) ctx->species_offset[ii + 1] = ctx->species_offset[ii] + num_species_grid[ii]; 1247 PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????", ctx->species_offset[ctx->num_grids], 1248 ctx->num_species); 1249 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1250 PetscInt iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid 1251 ctx->thermal_speed[grid] = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: plasma formulary def */ 1252 } 1253 // get lambdas here because we need them for t_0 etc 1254 PetscCall(PetscOptionsReal("-dm_landau_ln_lambda", "Universal cross section parameter. Default uses NRL formulas", "plexland.c", lnLam, &lnLam, &flg)); 1255 if (flg) { 1256 for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) { 1257 for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) ctx->lambdas[gridj][grid] = lnLam; /* cross section ratio large - small angle collisions */ 1258 } 1259 } else { 1260 PetscCall(makeLambdas(ctx)); 1261 } 1262 non_dim_grid = 0; 1263 PetscCall(PetscOptionsInt("-dm_landau_normalization_grid", "Index of grid to use for setting v_0, m_0, t_0. (Not recommended)", "plexland.c", non_dim_grid, &non_dim_grid, &flg)); 1264 if (non_dim_grid != 0) PetscCall(PetscInfo(dummy, "Normalization grid set to %" PetscInt_FMT ", but non-default not well verified\n", non_dim_grid)); 1265 PetscCheck(non_dim_grid >= 0 && non_dim_grid < ctx->num_species, ctx->comm, PETSC_ERR_ARG_WRONG, "Normalization grid wrong: %" PetscInt_FMT, non_dim_grid); 1266 ctx->v_0 = ctx->thermal_speed[non_dim_grid]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */ 1267 ctx->m_0 = ctx->masses[non_dim_grid]; /* arbitrary reference mass, electrons */ 1268 ctx->t_0 = 8 * PETSC_PI * PetscSqr(ctx->epsilon0 * ctx->m_0 / PetscSqr(ctx->charges[non_dim_grid])) / ctx->lambdas[non_dim_grid][non_dim_grid] / ctx->n_0 * PetscPowReal(ctx->v_0, 3); /* note, this t_0 makes nu[non_dim_grid,non_dim_grid]=1 */ 1269 /* domain */ 1270 nt = LANDAU_MAX_GRIDS; 1271 PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius", "Phase space size in units of thermal velocity of grid", "plexland.c", ctx->radius, &nt, &flg)); 1272 if (flg) { 1273 PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids); 1274 while (nt--) ctx->radius_par[nt] = ctx->radius_perp[nt] = ctx->radius[nt]; 1275 } else { 1276 nt = LANDAU_MAX_GRIDS; 1277 PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_par", "Parallel velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_par, &nt, &flg)); 1278 if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_par: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids); 1279 PetscCall(PetscOptionsRealArray("-dm_landau_domain_max_perp", "Perpendicular velocity domain size in units of thermal velocity of grid", "plexland.c", ctx->radius_perp, &nt, &flg)); 1280 if (flg) PetscCheck(nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_domain_max_perp: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT, nt, ctx->num_grids); 1281 } 1282 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1283 if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c - need to set par and perp with this -- todo */ 1284 if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75; 1285 else ctx->radius[grid] = -ctx->radius[grid]; 1286 ctx->radius[grid] = ctx->radius[grid] * SPEED_OF_LIGHT / ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid) 1287 PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n", (double)ctx->radius[grid], grid)); 1288 } 1289 ctx->radius[grid] *= ctx->thermal_speed[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0 1290 ctx->radius_perp[grid] *= ctx->thermal_speed[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0 1291 ctx->radius_par[grid] *= ctx->thermal_speed[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0 1292 } 1293 /* amr parameters */ 1294 if (!fileflg) { 1295 nt = LANDAU_MAX_GRIDS; 1296 PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg)); 1297 PetscCheck(!flg || nt >= ctx->num_grids, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT, nt, ctx->num_grids); 1298 nt = LANDAU_MAX_GRIDS; 1299 PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg)); 1300 for (ii = 1; ii < ctx->num_grids; ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now 1301 PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg)); 1302 PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_pre", "Number of levels to refine along v_perp=0 before origin refine", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg)); 1303 PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine_post", "Number of levels to refine along v_perp=0 after origin refine", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg)); 1304 PetscCall(PetscOptionsReal("-dm_landau_re_radius", "velocity range to refine on positive (z>0) r=0 axis for runaways", "plexland.c", ctx->re_radius, &ctx->re_radius, &flg)); 1305 PetscCall(PetscOptionsReal("-dm_landau_z_radius_pre", "velocity range to refine r=0 axis (for electrons)", "plexland.c", ctx->vperp0_radius1, &ctx->vperp0_radius1, &flg)); 1306 PetscCall(PetscOptionsReal("-dm_landau_z_radius_post", "velocity range to refine r=0 axis (for electrons) after origin AMR", "plexland.c", ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg)); 1307 /* spherical domain */ 1308 if (ctx->sphere || ctx->simplex) { 1309 ctx->sphere_uniform_normal = PETSC_FALSE; 1310 PetscCall(PetscOptionsBool("-dm_landau_sphere_uniform_normal", "Scaling of circle radius to get uniform particles per cell with Maxwellians (not used)", "plexland.c", ctx->sphere_uniform_normal, &ctx->sphere_uniform_normal, NULL)); 1311 if (!ctx->sphere_uniform_normal) { // true 1312 nt = LANDAU_MAX_GRIDS; 1313 PetscCall(PetscOptionsRealArray("-dm_landau_sphere_inner_radius_90degree_scale", "Scaling of radius for inner circle on 90 degree grid", "plexland.c", ctx->sphere_inner_radius_90degree, &nt, &flg)); 1314 if (flg && nt < ctx->num_grids) { 1315 for (PetscInt grid = nt; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = ctx->sphere_inner_radius_90degree[0]; 1316 } else if (!flg || nt == 0) { 1317 if (LANDAU_DIM == 2) { 1318 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = 0.4; // optimized for R=5, Q4, AMR=0 1319 } else { 1320 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_90degree[grid] = 0.577 * 0.40; 1321 } 1322 } 1323 nt = LANDAU_MAX_GRIDS; 1324 PetscCall(PetscOptionsRealArray("-dm_landau_sphere_inner_radius_45degree_scale", "Scaling of radius for inner circle on 45 degree grid", "plexland.c", ctx->sphere_inner_radius_45degree, &nt, &flg)); 1325 if (flg && nt < ctx->num_grids) { 1326 for (PetscInt grid = nt; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_45degree[grid] = ctx->sphere_inner_radius_45degree[0]; 1327 } else if (!flg || nt == 0) { 1328 if (LANDAU_DIM == 2) { 1329 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_45degree[grid] = 0.45; // optimized for R=5, Q4, AMR=0 1330 } else { 1331 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ctx->sphere_inner_radius_45degree[grid] = 0.4; // 3D sphere 1332 } 1333 } 1334 if (ctx->sphere) PetscCall(PetscInfo(ctx->plex[0], "sphere : , 45 degree scaling = %g; 90 degree scaling = %g\n", (double)ctx->sphere_inner_radius_45degree[0], (double)ctx->sphere_inner_radius_90degree[0])); 1335 } else { 1336 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1337 switch (ctx->numAMRRefine[grid]) { 1338 case 0: 1339 case 1: 1340 case 2: 1341 case 3: 1342 default: 1343 if (LANDAU_DIM == 2) { 1344 ctx->sphere_inner_radius_90degree[grid] = 0.40; 1345 ctx->sphere_inner_radius_45degree[grid] = 0.45; 1346 } else { 1347 ctx->sphere_inner_radius_45degree[grid] = 0.25; 1348 } 1349 } 1350 } 1351 } 1352 } else { 1353 nt = LANDAU_DIM; 1354 PetscCall(PetscOptionsIntArray("-dm_landau_num_cells", "Number of cells in each dimension of base grid", "plexland.c", ctx->cells0, &nt, &flg)); 1355 } 1356 } 1357 /* processing options */ 1358 PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL)); 1359 PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering (DEPRECATED)", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL)); 1360 if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly"); 1361 PetscCheck(!ctx->jacobian_field_major_order, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED"); 1362 PetscOptionsEnd(); 1363 1364 for (ii = ctx->num_species; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0; 1365 if (ctx->verbose != 0) { 1366 PetscReal pmassunit = PetscRealConstant(1.6720e-27); 1367 1368 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "masses: e=%10.3e; ions in proton mass units: %10.3e %10.3e ...\n", (double)ctx->masses[0], (double)(ctx->masses[1] / pmassunit), (double)(ctx->num_species > 2 ? ctx->masses[2] / pmassunit : 0))); 1369 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "charges: e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0], (double)(-ctx->charges[1] / ctx->charges[0]), (double)(ctx->num_species > 2 ? -ctx->charges[2] / ctx->charges[0] : 0))); 1370 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "n: e: %10.3e i: %10.3e %10.3e\n", (double)ctx->n[0], (double)ctx->n[1], (double)(ctx->num_species > 2 ? ctx->n[2] : 0))); 1371 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "thermal T (K): e=%10.3e i=%10.3e %10.3e. Normalization grid %" PetscInt_FMT ": v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e %" PetscInt_FMT " batched, view batch %" PetscInt_FMT "\n", (double)ctx->thermal_temps[0], 1372 (double)ctx->thermal_temps[1], (double)((ctx->num_species > 2) ? ctx->thermal_temps[2] : 0), non_dim_grid, (double)ctx->v_0, (double)(ctx->v_0 / SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->batch_sz, ctx->batch_view_idx)); 1373 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Domain radius (AMR levels) grid %d: par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", 0, (double)ctx->radius_par[0], (double)ctx->radius_perp[0], ctx->numAMRRefine[0])); 1374 for (ii = 1; ii < ctx->num_grids; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, ", %" PetscInt_FMT ": par=%10.3e perp=%10.3e (%" PetscInt_FMT ") ", ii, (double)ctx->radius_par[ii], (double)ctx->radius_perp[ii], ctx->numAMRRefine[ii])); 1375 if (ctx->use_relativistic_corrections) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nUse relativistic corrections\n")); 1376 else PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 1377 } 1378 PetscCall(DMDestroy(&dummy)); 1379 { 1380 PetscMPIInt rank; 1381 PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank)); 1382 ctx->stage = 0; 1383 PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13])); /* 13 */ 1384 PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2])); /* 2 */ 1385 PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12])); /* 12 */ 1386 PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15])); /* 15 */ 1387 PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */ 1388 PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */ 1389 PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0])); /* 0 */ 1390 PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9])); /* 9 */ 1391 PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10])); /* 10 */ 1392 PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7])); /* 7 */ 1393 PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1])); /* 1 */ 1394 PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3])); /* 3 */ 1395 PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8])); /* 8 */ 1396 PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4])); /* 4 */ 1397 PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */ 1398 PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5])); /* 5 */ 1399 PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6])); /* 6 */ 1400 1401 if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */ 1402 PetscCall(PetscOptionsClearValue(NULL, "-snes_converged_reason")); 1403 PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason")); 1404 PetscCall(PetscOptionsClearValue(NULL, "-snes_monitor")); 1405 PetscCall(PetscOptionsClearValue(NULL, "-ksp_monitor")); 1406 PetscCall(PetscOptionsClearValue(NULL, "-ts_monitor")); 1407 PetscCall(PetscOptionsClearValue(NULL, "-ts_view")); 1408 PetscCall(PetscOptionsClearValue(NULL, "-ts_adapt_monitor")); 1409 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_dm_view")); 1410 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_amr_vec_view")); 1411 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_dm_view")); 1412 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mass_view")); 1413 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_jacobian_view")); 1414 PetscCall(PetscOptionsClearValue(NULL, "-dm_landau_mat_view")); 1415 PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_converged_reason")); 1416 PetscCall(PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_monitor")); 1417 PetscCall(PetscOptionsClearValue(NULL, "-")); 1418 PetscCall(PetscOptionsClearValue(NULL, "-info")); 1419 } 1420 } 1421 PetscFunctionReturn(PETSC_SUCCESS); 1422 } 1423 1424 static PetscErrorCode CreateStaticData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx) 1425 { 1426 PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS]; 1427 PetscQuadrature quad; 1428 const PetscReal *quadWeights; 1429 PetscReal invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES]; 1430 PetscInt numCells[LANDAU_MAX_GRIDS], Nq, Nb, Nf[LANDAU_MAX_GRIDS], ncellsTot = 0, MAP_BF_SIZE = 64 * LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_Q_FACE * LANDAU_MAX_SPECIES; 1431 PetscTabulation *Tf; 1432 PetscDS prob; 1433 1434 PetscFunctionBegin; 1435 PetscCall(PetscFEGetDimension(ctx->fe[0], &Nb)); 1436 PetscCheck(Nb <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nb = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nb, LANDAU_MAX_NQND); 1437 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1438 for (PetscInt ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++) { 1439 invMass[ii] = ctx->m_0 / ctx->masses[ii]; 1440 nu_alpha[ii] = PetscSqr(ctx->charges[ii] / ctx->m_0) * ctx->m_0 / ctx->masses[ii]; 1441 nu_beta[ii] = PetscSqr(ctx->charges[ii] / ctx->epsilon0) / (8 * PETSC_PI) * ctx->t_0 * ctx->n_0 / PetscPowReal(ctx->v_0, 3); 1442 } 1443 } 1444 if (ctx->verbose == 4) { 1445 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "nu_alpha: ")); 1446 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1447 PetscInt iii = ctx->species_offset[grid]; 1448 for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_alpha[ii])); 1449 } 1450 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_beta: ")); 1451 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1452 PetscInt iii = ctx->species_offset[grid]; 1453 for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %e", (double)nu_beta[ii])); 1454 } 1455 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\nnu_alpha[i]*nu_beta[j]*lambda[i][j]:\n")); 1456 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1457 PetscInt iii = ctx->species_offset[grid]; 1458 for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) { 1459 for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) { 1460 PetscInt jjj = ctx->species_offset[gridj]; 1461 for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)(nu_alpha[ii] * nu_beta[jj] * ctx->lambdas[grid][gridj]))); 1462 } 1463 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 1464 } 1465 } 1466 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "lambda[i][j]:\n")); 1467 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1468 PetscInt iii = ctx->species_offset[grid]; 1469 for (PetscInt ii = iii; ii < ctx->species_offset[grid + 1]; ii++) { 1470 for (PetscInt gridj = 0; gridj < ctx->num_grids; gridj++) { 1471 PetscInt jjj = ctx->species_offset[gridj]; 1472 for (PetscInt jj = jjj; jj < ctx->species_offset[gridj + 1]; jj++) PetscCall(PetscPrintf(PETSC_COMM_WORLD, " %14.9e", (double)ctx->lambdas[grid][gridj])); 1473 } 1474 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 1475 } 1476 } 1477 } 1478 PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids 1479 PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids 1480 /* DS, Tab and quad is same on all grids */ 1481 PetscCheck(ctx->plex[0], ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created"); 1482 PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad)); 1483 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights)); 1484 PetscCheck(Nq <= LANDAU_MAX_NQND, ctx->comm, PETSC_ERR_ARG_WRONG, "Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQND (%d)", Nq, LANDAU_MAX_NQND); 1485 /* setup each grid */ 1486 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1487 PetscInt cStart, cEnd; 1488 PetscCheck(ctx->plex[grid] != NULL, ctx->comm, PETSC_ERR_ARG_WRONG, "Plex not created"); 1489 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1490 numCells[grid] = cEnd - cStart; // grids can have different topology 1491 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 1492 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 1493 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 1494 ncellsTot += numCells[grid]; 1495 } 1496 /* create GPU assembly data */ 1497 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 1498 PetscContainer container; 1499 PetscScalar *elemMatrix, *elMat; 1500 pointInterpolationP4est(*pointMaps)[LANDAU_MAX_Q_FACE]; 1501 P4estVertexMaps *maps; 1502 const PetscInt *plex_batch = NULL, elMatSz = Nb * Nb * ctx->num_species * ctx->num_species; 1503 LandauIdx *coo_elem_offsets = NULL, *coo_elem_fullNb = NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = NULL; 1504 /* create GPU assembly data */ 1505 PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n", 1)); 1506 PetscCall(PetscLogEventBegin(ctx->events[2], 0, 0, 0, 0)); 1507 PetscCall(PetscMalloc(sizeof(*maps) * ctx->num_grids, &maps)); 1508 PetscCall(PetscMalloc(sizeof(*pointMaps) * MAP_BF_SIZE, &pointMaps)); 1509 PetscCall(PetscMalloc(sizeof(*elemMatrix) * elMatSz, &elemMatrix)); 1510 1511 { // setup COO assembly -- put COO metadata directly in ctx->SData_d 1512 PetscCall(PetscMalloc3(ncellsTot + 1, &coo_elem_offsets, ncellsTot, &coo_elem_fullNb, ncellsTot, &coo_elem_point_offsets)); // array of integer pointers 1513 coo_elem_offsets[0] = 0; // finish later 1514 PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n", ncellsTot)); 1515 ctx->SData_d.coo_n_cellsTot = ncellsTot; 1516 ctx->SData_d.coo_elem_offsets = (void *)coo_elem_offsets; 1517 ctx->SData_d.coo_elem_fullNb = (void *)coo_elem_fullNb; 1518 ctx->SData_d.coo_elem_point_offsets = (void *)coo_elem_point_offsets; 1519 } 1520 1521 ctx->SData_d.coo_max_fullnb = 0; 1522 for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) { 1523 PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc * Nb; 1524 if (grid_batch_is_inv[grid]) PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch)); 1525 PetscCheck(!plex_batch, ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_jacobian_field_major_order DEPRECATED"); 1526 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1527 // make maps 1528 maps[grid].d_self = NULL; 1529 maps[grid].num_elements = numCells[grid]; 1530 maps[grid].num_face = (PetscInt)(pow(Nq, 1. / ((double)dim)) + .001); // Q 1531 maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face, (double)(dim - 1)) + .001); // Q^2 1532 maps[grid].num_reduced = 0; 1533 maps[grid].deviceType = ctx->deviceType; 1534 maps[grid].numgrids = ctx->num_grids; 1535 // count reduced and get 1536 PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx)); 1537 for (PetscInt ej = cStart, eidx = 0; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) { 1538 if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx + 1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add 1539 for (PetscInt fieldA = 0; fieldA < Nf[grid]; fieldA++) { 1540 PetscInt fullNb = 0; 1541 for (PetscInt q = 0; q < Nb; ++q) { 1542 PetscInt numindices, *indices; 1543 PetscScalar *valuesOrig = elMat = elemMatrix; 1544 PetscCall(PetscArrayzero(elMat, totDim * totDim)); 1545 elMat[(fieldA * Nb + q) * totDim + fieldA * Nb + q] = 1; 1546 PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, &elMat)); 1547 if (ctx->simplex) { 1548 PetscCheck(numindices == Nb, ctx->comm, PETSC_ERR_ARG_WRONG, "numindices != Nb numindices=%" PetscInt_FMT " Nb=%" PetscInt_FMT, numindices, Nb); 1549 for (PetscInt q = 0; q < numindices; ++q) maps[grid].gIdx[eidx][fieldA][q] = indices[q]; 1550 fullNb++; 1551 } else { 1552 for (PetscInt f = 0; f < numindices; ++f) { // look for a non-zero on the diagonal (is this too complicated for simplices?) 1553 if (PetscAbs(PetscRealPart(elMat[f * numindices + f])) > PETSC_MACHINE_EPSILON) { 1554 // found it 1555 if (PetscAbs(PetscRealPart(elMat[f * numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0 1556 if (plex_batch) { 1557 maps[grid].gIdx[eidx][fieldA][q] = plex_batch[indices[f]]; 1558 } else { 1559 maps[grid].gIdx[eidx][fieldA][q] = indices[f]; 1560 } 1561 fullNb++; 1562 } else { //found a constraint 1563 PetscInt jj = 0; 1564 PetscReal sum = 0; 1565 const PetscInt ff = f; 1566 maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1 1567 PetscCheck(!ctx->simplex, ctx->comm, PETSC_ERR_ARG_WRONG, "No constraints with simplex"); 1568 do { // constraints are continuous in Plex - exploit that here 1569 PetscInt ii; // get 'scale' 1570 for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value 1571 if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not 1572 pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f * numindices + ff + ii]); 1573 } 1574 } 1575 sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic 1576 // get 'gid' 1577 if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps 1578 else { 1579 if (plex_batch) { 1580 pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]]; 1581 } else { 1582 pointMaps[maps[grid].num_reduced][jj].gid = indices[f]; 1583 } 1584 fullNb++; 1585 } 1586 } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end 1587 while (jj < maps[grid].num_face) { 1588 pointMaps[maps[grid].num_reduced][jj].scale = 0; 1589 pointMaps[maps[grid].num_reduced][jj].gid = -1; 1590 jj++; 1591 } 1592 if (PetscAbs(sum - 1.0) > 10 * PETSC_MACHINE_EPSILON) { // debug 1593 PetscInt d, f; 1594 PetscReal tmp = 0; 1595 PetscCall( 1596 PetscPrintf(PETSC_COMM_SELF, "\t\t%" PetscInt_FMT ".%" PetscInt_FMT ".%" PetscInt_FMT ") ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%" PetscInt_FMT ")\n", eidx, q, fieldA, (double)sum, LANDAU_MAX_Q_FACE, maps[grid].num_face)); 1597 for (d = 0, tmp = 0; d < numindices; ++d) { 1598 if (tmp != 0 && PetscAbs(tmp - 1.0) > 10 * PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") %3" PetscInt_FMT ": ", d, indices[d])); 1599 for (f = 0; f < numindices; ++f) tmp += PetscRealPart(elMat[d * numindices + f]); 1600 if (tmp != 0) PetscCall(PetscPrintf(ctx->comm, " | %22.16e\n", (double)tmp)); 1601 } 1602 } 1603 maps[grid].num_reduced++; 1604 PetscCheck(maps[grid].num_reduced < MAP_BF_SIZE, PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %" PetscInt_FMT " > %" PetscInt_FMT, maps[grid].num_reduced, MAP_BF_SIZE); 1605 } 1606 break; 1607 } 1608 } 1609 } // !simplex 1610 // cleanup 1611 PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, &elMat)); 1612 if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices * numindices, MPIU_SCALAR, &elMat)); 1613 } 1614 { // setup COO assembly 1615 coo_elem_offsets[glb_elem_idx + 1] += fullNb * fullNb; // one species block, adds a block for each species, on this element in this grid 1616 if (fieldA == 0) { // cache full Nb for this element, on this grid per species 1617 coo_elem_fullNb[glb_elem_idx] = fullNb; 1618 if (fullNb > ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb; 1619 } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %" PetscInt_FMT " %" PetscInt_FMT, coo_elem_fullNb[glb_elem_idx], fullNb); 1620 } 1621 } // field 1622 } // cell 1623 // allocate and copy point data maps[grid].gIdx[eidx][field][q] 1624 PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps)); 1625 for (PetscInt ej = 0; ej < maps[grid].num_reduced; ++ej) { 1626 for (PetscInt q = 0; q < maps[grid].num_face; ++q) { 1627 maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale; 1628 maps[grid].c_maps[ej][q].gid = pointMaps[ej][q].gid; 1629 } 1630 } 1631 #if defined(PETSC_HAVE_KOKKOS) 1632 if (ctx->deviceType == LANDAU_KOKKOS) PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, grid)); // implies Kokkos does 1633 #endif 1634 if (plex_batch) { 1635 PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch)); 1636 PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this 1637 } 1638 } /* grids */ 1639 // finish COO 1640 { // setup COO assembly 1641 PetscInt *oor, *ooc; 1642 ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot] * ctx->batch_sz; 1643 PetscCall(PetscMalloc2(ctx->SData_d.coo_size, &oor, ctx->SData_d.coo_size, &ooc)); 1644 for (PetscInt i = 0; i < ctx->SData_d.coo_size; i++) oor[i] = ooc[i] = -1; 1645 // get 1646 for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) { 1647 for (PetscInt ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) { 1648 const PetscInt fullNb = coo_elem_fullNb[glb_elem_idx]; 1649 const LandauIdx *const Idxs = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage 1650 coo_elem_point_offsets[glb_elem_idx][0] = 0; 1651 for (PetscInt f = 0, cnt2 = 0; f < Nb; f++) { 1652 PetscInt idx = Idxs[f]; 1653 coo_elem_point_offsets[glb_elem_idx][f + 1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last 1654 if (idx >= 0) { 1655 cnt2++; 1656 coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc 1657 } else { 1658 idx = -idx - 1; 1659 for (PetscInt q = 0; q < maps[grid].num_face; q++) { 1660 if (maps[grid].c_maps[idx][q].gid < 0) break; 1661 cnt2++; 1662 coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc 1663 } 1664 } 1665 PetscCheck(cnt2 <= fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %" PetscInt_FMT " < %" PetscInt_FMT, fullNb, cnt2); 1666 } 1667 PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb] == fullNb, PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %" PetscInt_FMT " != fullNb=%" PetscInt_FMT, coo_elem_point_offsets[glb_elem_idx][Nb], fullNb); 1668 } 1669 } 1670 // set 1671 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 1672 for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) { 1673 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 1674 for (PetscInt ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) { 1675 const PetscInt fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb; 1676 // set (i,j) 1677 for (PetscInt fieldA = 0; fieldA < Nf[grid]; fieldA++) { 1678 const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0]; 1679 PetscInt rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE]; 1680 for (PetscInt f = 0; f < Nb; ++f) { 1681 const PetscInt nr = coo_elem_point_offsets[glb_elem_idx][f + 1] - coo_elem_point_offsets[glb_elem_idx][f]; 1682 if (nr == 1) rows[0] = Idxs[f]; 1683 else { 1684 const PetscInt idx = -Idxs[f] - 1; 1685 for (PetscInt q = 0; q < nr; q++) rows[q] = maps[grid].c_maps[idx][q].gid; 1686 } 1687 for (PetscInt g = 0; g < Nb; ++g) { 1688 const PetscInt nc = coo_elem_point_offsets[glb_elem_idx][g + 1] - coo_elem_point_offsets[glb_elem_idx][g]; 1689 if (nc == 1) cols[0] = Idxs[g]; 1690 else { 1691 const PetscInt idx = -Idxs[g] - 1; 1692 for (PetscInt q = 0; q < nc; q++) cols[q] = maps[grid].c_maps[idx][q].gid; 1693 } 1694 const PetscInt idx0 = b_id * coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g]; 1695 for (PetscInt q = 0, idx = idx0; q < nr; q++) { 1696 for (PetscInt d = 0; d < nc; d++, idx++) { 1697 oor[idx] = rows[q] + moffset; 1698 ooc[idx] = cols[d] + moffset; 1699 } 1700 } 1701 } 1702 } 1703 } 1704 } // cell 1705 } // grid 1706 } // batch 1707 PetscCall(MatSetPreallocationCOO(ctx->J, ctx->SData_d.coo_size, oor, ooc)); 1708 PetscCall(PetscFree2(oor, ooc)); 1709 } 1710 PetscCall(PetscFree(pointMaps)); 1711 PetscCall(PetscFree(elemMatrix)); 1712 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 1713 PetscCall(PetscContainerSetPointer(container, (void *)maps)); 1714 PetscCall(PetscContainerSetCtxDestroy(container, LandauGPUMapsDestroy)); 1715 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "assembly_maps", (PetscObject)container)); 1716 PetscCall(PetscContainerDestroy(&container)); 1717 PetscCall(PetscLogEventEnd(ctx->events[2], 0, 0, 0, 0)); 1718 } // end GPU assembly 1719 { /* create static point data, Jacobian called first, only one vertex copy */ 1720 PetscReal *invJe, *ww, *xx, *yy, *zz = NULL, *invJ_a; 1721 PetscInt outer_ipidx, outer_ej, grid, nip_glb = 0; 1722 PetscFE fe; 1723 PetscCall(PetscLogEventBegin(ctx->events[7], 0, 0, 0, 0)); 1724 PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n")); 1725 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) nip_glb += Nq * numCells[grid]; 1726 /* collect f data, first time is for Jacobian, but make mass now */ 1727 if (ctx->verbose != 0) { 1728 PetscInt ncells = 0, N; 1729 MatInfo info; 1730 PetscCall(MatGetInfo(ctx->J, MAT_LOCAL, &info)); 1731 PetscCall(MatGetSize(ctx->J, &N, NULL)); 1732 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ncells += numCells[grid]; 1733 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT " nnz= %" PetscInt_FMT "\n", 0, "FormLandau", nip_glb, ncells, Nb, Nq, dim, Nb, 1734 ctx->num_species, Nb, dim, N, (PetscInt)info.nz_used)); 1735 } 1736 PetscCall(PetscMalloc4(nip_glb, &ww, nip_glb, &xx, nip_glb, &yy, nip_glb * dim * dim, &invJ_a)); 1737 if (dim == 3) PetscCall(PetscMalloc1(nip_glb, &zz)); 1738 if (ctx->use_energy_tensor_trick) { 1739 PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, ctx->simplex, NULL, PETSC_DECIDE, &fe)); 1740 PetscCall(PetscObjectSetName((PetscObject)fe, "energy")); 1741 } 1742 /* init each grids static data - no batch */ 1743 for (grid = 0, outer_ipidx = 0, outer_ej = 0; grid < ctx->num_grids; grid++) { // OpenMP (once) 1744 Vec v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic 1745 PetscSection e_section; 1746 DM dmEnergy; 1747 PetscInt cStart, cEnd, ej; 1748 1749 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1750 // prep energy trick, get v^2 / 2 vector 1751 if (ctx->use_energy_tensor_trick) { 1752 PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f}; 1753 Vec glob_v2; 1754 PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))}; 1755 1756 PetscCall(DMClone(ctx->plex[grid], &dmEnergy)); 1757 PetscCall(PetscObjectSetName((PetscObject)dmEnergy, "energy")); 1758 PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe)); 1759 PetscCall(DMCreateDS(dmEnergy)); 1760 PetscCall(DMGetLocalSection(dmEnergy, &e_section)); 1761 PetscCall(DMGetGlobalVector(dmEnergy, &glob_v2)); 1762 PetscCall(PetscObjectSetName((PetscObject)glob_v2, "trick")); 1763 c2_0[0] = &data[0]; 1764 PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void **)c2_0, INSERT_ALL_VALUES, glob_v2)); 1765 PetscCall(DMGetLocalVector(dmEnergy, &v2_2)); 1766 PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */ 1767 PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2)); 1768 PetscCall(DMGlobalToLocalEnd(dmEnergy, glob_v2, INSERT_VALUES, v2_2)); 1769 PetscCall(DMViewFromOptions(dmEnergy, NULL, "-energy_dm_view")); 1770 PetscCall(VecViewFromOptions(glob_v2, NULL, "-energy_vec_view")); 1771 PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2)); 1772 } 1773 /* append part of the IP data for each grid */ 1774 for (ej = 0; ej < numCells[grid]; ++ej, ++outer_ej) { 1775 PetscScalar *coefs = NULL; 1776 PetscReal vj[LANDAU_MAX_NQND * LANDAU_DIM], detJj[LANDAU_MAX_NQND], Jdummy[LANDAU_MAX_NQND * LANDAU_DIM * LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0); 1777 invJe = invJ_a + outer_ej * Nq * dim * dim; 1778 PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej + cStart, quad, vj, Jdummy, invJe, detJj)); 1779 if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs)); 1780 /* create static point data */ 1781 for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) { 1782 const PetscInt gidx = outer_ipidx; 1783 const PetscReal *invJ = &invJe[qj * dim * dim]; 1784 ww[gidx] = detJj[qj] * quadWeights[qj]; 1785 if (dim == 2) ww[gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */ 1786 // get xx, yy, zz 1787 if (ctx->use_energy_tensor_trick) { 1788 double refSpaceDer[3], eGradPhi[3]; 1789 const PetscReal *const DD = Tf[0]->T[1]; 1790 const PetscReal *Dq = &DD[qj * Nb * dim]; 1791 for (PetscInt d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0; 1792 for (PetscInt b = 0; b < Nb; ++b) { 1793 for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b * dim + d] * PetscRealPart(coefs[b]); 1794 } 1795 xx[gidx] = 1e10; 1796 if (ctx->use_relativistic_corrections) { 1797 double dg2_c2 = 0; 1798 //for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] *= c02; 1799 for (PetscInt d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]); 1800 dg2_c2 *= (double)c02; 1801 if (dg2_c2 >= .999) { 1802 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1803 yy[gidx] = vj[qj * dim + 1]; 1804 if (dim == 3) zz[gidx] = vj[qj * dim + 2]; 1805 PetscCall(PetscPrintf(ctx->comm, "Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n", (double)PetscSqrtReal(xx[gidx] * xx[gidx] + yy[gidx] * yy[gidx] + zz[gidx] * zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx])); 1806 } else { 1807 PetscReal fact = c02 / PetscSqrtReal(1. - dg2_c2); 1808 for (PetscInt d = 0; d < dim; ++d) refSpaceDer[d] *= fact; 1809 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0 1810 } 1811 } 1812 if (xx[gidx] == 1e10) { 1813 for (PetscInt d = 0; d < dim; ++d) { 1814 for (PetscInt e = 0; e < dim; ++e) eGradPhi[d] += invJ[e * dim + d] * refSpaceDer[e]; 1815 } 1816 xx[gidx] = eGradPhi[0]; 1817 yy[gidx] = eGradPhi[1]; 1818 if (dim == 3) zz[gidx] = eGradPhi[2]; 1819 } 1820 } else { 1821 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1822 yy[gidx] = vj[qj * dim + 1]; 1823 if (dim == 3) zz[gidx] = vj[qj * dim + 2]; 1824 } 1825 } /* q */ 1826 if (ctx->use_energy_tensor_trick) PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs)); 1827 } /* ej */ 1828 if (ctx->use_energy_tensor_trick) { 1829 PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2)); 1830 PetscCall(DMDestroy(&dmEnergy)); 1831 } 1832 } /* grid */ 1833 if (ctx->use_energy_tensor_trick) PetscCall(PetscFEDestroy(&fe)); 1834 /* cache static data */ 1835 if (ctx->deviceType == LANDAU_KOKKOS) { 1836 #if defined(PETSC_HAVE_KOKKOS) 1837 PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, Nb, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, (PetscReal *)ctx->lambdas, invJ_a, xx, yy, zz, ww, &ctx->SData_d)); 1838 /* free */ 1839 PetscCall(PetscFree4(ww, xx, yy, invJ_a)); 1840 if (dim == 3) PetscCall(PetscFree(zz)); 1841 #else 1842 SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type kokkos not built"); 1843 #endif 1844 } else { /* CPU version, just copy in, only use part */ 1845 PetscReal *nu_alpha_p = (PetscReal *)ctx->SData_d.alpha, *nu_beta_p = (PetscReal *)ctx->SData_d.beta, *invMass_p = (PetscReal *)ctx->SData_d.invMass, *lambdas_p = NULL; // why set these ? 1846 ctx->SData_d.w = (void *)ww; 1847 ctx->SData_d.x = (void *)xx; 1848 ctx->SData_d.y = (void *)yy; 1849 ctx->SData_d.z = (void *)zz; 1850 ctx->SData_d.invJ = (void *)invJ_a; 1851 PetscCall(PetscMalloc4(ctx->num_species, &nu_alpha_p, ctx->num_species, &nu_beta_p, ctx->num_species, &invMass_p, LANDAU_MAX_GRIDS * LANDAU_MAX_GRIDS, &lambdas_p)); 1852 for (PetscInt ii = 0; ii < ctx->num_species; ii++) { 1853 nu_alpha_p[ii] = nu_alpha[ii]; 1854 nu_beta_p[ii] = nu_beta[ii]; 1855 invMass_p[ii] = invMass[ii]; 1856 } 1857 ctx->SData_d.alpha = (void *)nu_alpha_p; 1858 ctx->SData_d.beta = (void *)nu_beta_p; 1859 ctx->SData_d.invMass = (void *)invMass_p; 1860 ctx->SData_d.lambdas = (void *)lambdas_p; 1861 for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) { 1862 PetscReal (*lambdas)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS] = (PetscReal (*)[LANDAU_MAX_GRIDS][LANDAU_MAX_GRIDS])ctx->SData_d.lambdas; 1863 for (PetscInt gridj = 0; gridj < LANDAU_MAX_GRIDS; gridj++) (*lambdas)[grid][gridj] = ctx->lambdas[grid][gridj]; 1864 } 1865 } 1866 PetscCall(PetscLogEventEnd(ctx->events[7], 0, 0, 0, 0)); 1867 } // initialize 1868 PetscFunctionReturn(PETSC_SUCCESS); 1869 } 1870 1871 /* < v, u > */ 1872 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1873 { 1874 g0[0] = 1.; 1875 } 1876 1877 /* < v, u > */ 1878 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1879 { 1880 static double ttt = 1e-12; 1881 g0[0] = ttt++; 1882 } 1883 1884 /* < v, u > */ 1885 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1886 { 1887 g0[0] = 2. * PETSC_PI * x[0]; 1888 } 1889 1890 /* 1891 LandauCreateJacobianMatrix - creates ctx->J with without real data. Hard to keep sparse. 1892 - Like DMPlexLandauCreateMassMatrix. Should remove one and combine 1893 - has old support for field major ordering 1894 */ 1895 static PetscErrorCode LandauCreateJacobianMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx) 1896 { 1897 PetscInt *idxs = NULL; 1898 Mat subM[LANDAU_MAX_GRIDS]; 1899 1900 PetscFunctionBegin; 1901 if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 1902 PetscFunctionReturn(PETSC_SUCCESS); 1903 } 1904 // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is' -- not used 1905 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, &idxs)); 1906 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1907 const PetscInt *values, n = ctx->mat_offset[grid + 1] - ctx->mat_offset[grid]; 1908 Mat gMat; 1909 DM massDM; 1910 PetscDS prob; 1911 Vec tvec; 1912 // get "mass" matrix for reordering 1913 PetscCall(DMClone(ctx->plex[grid], &massDM)); 1914 PetscCall(DMCopyFields(ctx->plex[grid], PETSC_DETERMINE, PETSC_DETERMINE, massDM)); 1915 PetscCall(DMCreateDS(massDM)); 1916 PetscCall(DMGetDS(massDM, &prob)); 1917 for (PetscInt ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL)); 1918 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); // this trick is need to both sparsify the matrix and avoid runtime error 1919 PetscCall(DMCreateMatrix(massDM, &gMat)); 1920 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false")); 1921 PetscCall(MatSetOption(gMat, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 1922 PetscCall(MatSetOption(gMat, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE)); 1923 PetscCall(DMCreateLocalVector(ctx->plex[grid], &tvec)); 1924 PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx)); 1925 PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view")); 1926 PetscCall(DMDestroy(&massDM)); 1927 PetscCall(VecDestroy(&tvec)); 1928 subM[grid] = gMat; 1929 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1930 MatOrderingType rtype = MATORDERINGRCM; 1931 IS isrow, isicol; 1932 PetscCall(MatGetOrdering(gMat, rtype, &isrow, &isicol)); 1933 PetscCall(ISInvertPermutation(isrow, PETSC_DECIDE, &grid_batch_is_inv[grid])); 1934 PetscCall(ISGetIndices(isrow, &values)); 1935 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid 1936 #if !defined(LANDAU_SPECIES_MAJOR) 1937 PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id * N; 1938 for (PetscInt ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0; 1939 #else 1940 PetscInt n0 = ctx->mat_offset[grid] * ctx->batch_sz + b_id * n; 1941 for (PetscInt ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0; 1942 #endif 1943 } 1944 PetscCall(ISRestoreIndices(isrow, &values)); 1945 PetscCall(ISDestroy(&isrow)); 1946 PetscCall(ISDestroy(&isicol)); 1947 } 1948 } 1949 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscCall(ISCreateGeneral(comm, ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, idxs, PETSC_OWN_POINTER, &ctx->batch_is)); 1950 // get a block matrix 1951 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 1952 Mat B = subM[grid]; 1953 PetscInt nloc, nzl, *colbuf, row, COL_BF_SIZE = 1024; 1954 PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf)); 1955 PetscCall(MatGetSize(B, &nloc, NULL)); 1956 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 1957 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 1958 const PetscInt *cols; 1959 const PetscScalar *vals; 1960 for (PetscInt i = 0; i < nloc; i++) { 1961 PetscCall(MatGetRow(B, i, &nzl, NULL, NULL)); 1962 if (nzl > COL_BF_SIZE) { 1963 PetscCall(PetscFree(colbuf)); 1964 PetscCall(PetscInfo(ctx->plex[grid], "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl)); 1965 COL_BF_SIZE = nzl; 1966 PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf)); 1967 } 1968 PetscCall(MatGetRow(B, i, &nzl, &cols, &vals)); 1969 for (PetscInt j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset; 1970 row = i + moffset; 1971 PetscCall(MatSetValues(ctx->J, 1, &row, nzl, colbuf, vals, INSERT_VALUES)); 1972 PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals)); 1973 } 1974 } 1975 PetscCall(PetscFree(colbuf)); 1976 } 1977 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid])); 1978 PetscCall(MatAssemblyBegin(ctx->J, MAT_FINAL_ASSEMBLY)); 1979 PetscCall(MatAssemblyEnd(ctx->J, MAT_FINAL_ASSEMBLY)); 1980 1981 // debug 1982 PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view")); 1983 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1984 Mat mat_block_order; 1985 PetscCall(MatCreateSubMatrix(ctx->J, ctx->batch_is, ctx->batch_is, MAT_INITIAL_MATRIX, &mat_block_order)); // use MatPermute 1986 PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_mat_view")); 1987 PetscCall(MatDestroy(&mat_block_order)); 1988 PetscCall(VecScatterCreate(X, ctx->batch_is, X, NULL, &ctx->plex_batch)); 1989 PetscCall(VecDuplicate(X, &ctx->work_vec)); 1990 } 1991 PetscFunctionReturn(PETSC_SUCCESS); 1992 } 1993 1994 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat); 1995 /*@C 1996 DMPlexLandauCreateVelocitySpace - Create a `DMPLEX` velocity space mesh 1997 1998 Collective 1999 2000 Input Parameters: 2001 + comm - The MPI communicator 2002 . dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver) 2003 - prefix - prefix for options (not tested) 2004 2005 Output Parameters: 2006 + pack - The `DM` object representing the mesh 2007 . X - A vector (user destroys) 2008 - J - Optional matrix (object destroys) 2009 2010 Level: beginner 2011 2012 .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()` 2013 @*/ 2014 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack) 2015 { 2016 LandauCtx *ctx; 2017 Vec Xsub[LANDAU_MAX_GRIDS]; 2018 IS grid_batch_is_inv[LANDAU_MAX_GRIDS]; 2019 2020 PetscFunctionBegin; 2021 PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported"); 2022 PetscCheck(LANDAU_DIM == dim, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d", dim, LANDAU_DIM); 2023 PetscCall(PetscNew(&ctx)); 2024 ctx->comm = comm; /* used for diagnostics and global errors */ 2025 /* process options */ 2026 PetscCall(ProcessOptions(ctx, prefix)); 2027 if (dim == 2) ctx->use_relativistic_corrections = PETSC_FALSE; 2028 /* Create Mesh */ 2029 PetscCall(DMCompositeCreate(PETSC_COMM_SELF, pack)); 2030 PetscCall(PetscLogEventBegin(ctx->events[13], 0, 0, 0, 0)); 2031 PetscCall(PetscLogEventBegin(ctx->events[15], 0, 0, 0, 0)); 2032 PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR) 2033 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2034 /* create FEM */ 2035 PetscCall(SetupDS(ctx->plex[grid], dim, grid, ctx)); 2036 /* set initial state */ 2037 PetscCall(DMCreateGlobalVector(ctx->plex[grid], &Xsub[grid])); 2038 PetscCall(PetscObjectSetName((PetscObject)Xsub[grid], "u_orig")); 2039 /* initial static refinement, no solve */ 2040 PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx)); 2041 /* forest refinement - forest goes in (if forest), plex comes out */ 2042 if (ctx->use_p4est) { 2043 DM plex; 2044 PetscCall(adapt(grid, ctx, &Xsub[grid])); // forest goes in, plex comes out 2045 if (grid == 0) { 2046 PetscCall(DMViewFromOptions(ctx->plex[grid], NULL, "-dm_landau_amr_dm_view")); // need to differentiate - todo 2047 PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view")); 2048 } 2049 // convert to plex, all done with this level 2050 PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex)); 2051 PetscCall(DMDestroy(&ctx->plex[grid])); 2052 ctx->plex[grid] = plex; 2053 } 2054 #if !defined(LANDAU_SPECIES_MAJOR) 2055 PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid])); 2056 #else 2057 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid 2058 PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid])); 2059 } 2060 #endif 2061 PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx)); 2062 } 2063 #if !defined(LANDAU_SPECIES_MAJOR) 2064 // stack the batched DMs, could do it all here!!! b_id=0 2065 for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) { 2066 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(*pack, ctx->plex[grid])); 2067 } 2068 #endif 2069 // create ctx->mat_offset 2070 ctx->mat_offset[0] = 0; 2071 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2072 PetscInt n; 2073 PetscCall(VecGetLocalSize(Xsub[grid], &n)); 2074 ctx->mat_offset[grid + 1] = ctx->mat_offset[grid] + n; 2075 } 2076 // creat DM & Jac 2077 PetscCall(DMSetApplicationContext(*pack, ctx)); 2078 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); 2079 PetscCall(DMCreateMatrix(*pack, &ctx->J)); 2080 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false")); 2081 PetscCall(MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 2082 PetscCall(MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE)); 2083 PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac")); 2084 // construct initial conditions in X 2085 PetscCall(DMCreateGlobalVector(*pack, X)); 2086 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2087 PetscInt n; 2088 PetscCall(VecGetLocalSize(Xsub[grid], &n)); 2089 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 2090 PetscScalar const *values; 2091 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 2092 PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx->batch_sz, ctx)); 2093 PetscCall(VecGetArrayRead(Xsub[grid], &values)); // Drop whole grid in Plex ordering 2094 for (PetscInt i = 0, idx = moffset; i < n; i++, idx++) PetscCall(VecSetValue(*X, idx, values[i], INSERT_VALUES)); 2095 PetscCall(VecRestoreArrayRead(Xsub[grid], &values)); 2096 } 2097 } 2098 // cleanup 2099 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(VecDestroy(&Xsub[grid])); 2100 /* check for correct matrix type */ 2101 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 2102 PetscBool flg; 2103 if (ctx->deviceType == LANDAU_KOKKOS) { 2104 PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, MATAIJKOKKOS, "")); 2105 #if defined(PETSC_HAVE_KOKKOS) 2106 PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2107 #else 2108 PetscCheck(flg, ctx->comm, PETSC_ERR_ARG_WRONG, "must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2109 #endif 2110 } 2111 } 2112 PetscCall(PetscLogEventEnd(ctx->events[15], 0, 0, 0, 0)); 2113 2114 // create field major ordering 2115 ctx->work_vec = NULL; 2116 ctx->plex_batch = NULL; 2117 ctx->batch_is = NULL; 2118 for (PetscInt i = 0; i < LANDAU_MAX_GRIDS; i++) grid_batch_is_inv[i] = NULL; 2119 PetscCall(PetscLogEventBegin(ctx->events[12], 0, 0, 0, 0)); 2120 PetscCall(LandauCreateJacobianMatrix(comm, *X, grid_batch_is_inv, ctx)); 2121 PetscCall(PetscLogEventEnd(ctx->events[12], 0, 0, 0, 0)); 2122 2123 // create AMR GPU assembly maps and static GPU data 2124 PetscCall(CreateStaticData(dim, grid_batch_is_inv, ctx)); 2125 2126 PetscCall(PetscLogEventEnd(ctx->events[13], 0, 0, 0, 0)); 2127 2128 // create mass matrix 2129 PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL)); 2130 2131 if (J) *J = ctx->J; 2132 2133 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 2134 PetscContainer container; 2135 // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order 2136 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2137 PetscCall(PetscContainerSetPointer(container, (void *)ctx)); 2138 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "LandauCtx", (PetscObject)container)); 2139 PetscCall(PetscContainerDestroy(&container)); 2140 // batch solvers need to map -- can batch solvers work 2141 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2142 PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch)); 2143 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "plex_batch_is", (PetscObject)container)); 2144 PetscCall(PetscContainerDestroy(&container)); 2145 } 2146 // for batch solvers 2147 { 2148 PetscContainer container; 2149 PetscInt *pNf; 2150 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2151 PetscCall(PetscMalloc1(sizeof(*pNf), &pNf)); 2152 *pNf = ctx->batch_sz; 2153 PetscCall(PetscContainerSetPointer(container, (void *)pNf)); 2154 PetscCall(PetscContainerSetCtxDestroy(container, PetscCtxDestroyDefault)); 2155 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject)container)); 2156 PetscCall(PetscContainerDestroy(&container)); 2157 } 2158 PetscFunctionReturn(PETSC_SUCCESS); 2159 } 2160 2161 /*@C 2162 DMPlexLandauAccess - Access to the distribution function with user callback 2163 2164 Collective 2165 2166 Input Parameters: 2167 + pack - the `DMCOMPOSITE` 2168 . func - call back function 2169 - user_ctx - user context 2170 2171 Input/Output Parameter: 2172 . X - Vector to data to 2173 2174 Level: advanced 2175 2176 .seealso: `DMPlexLandauCreateVelocitySpace()` 2177 @*/ 2178 PetscErrorCode DMPlexLandauAccess(DM pack, Vec X, PetscErrorCode (*func)(DM, Vec, PetscInt, PetscInt, PetscInt, void *), void *user_ctx) 2179 { 2180 LandauCtx *ctx; 2181 2182 PetscFunctionBegin; 2183 PetscCall(DMGetApplicationContext(pack, &ctx)); // uses ctx->num_grids; ctx->plex[grid]; ctx->batch_sz; ctx->mat_offset 2184 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2185 PetscInt dim, n; 2186 PetscCall(DMGetDimension(pack, &dim)); 2187 for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) { 2188 Vec vec; 2189 PetscInt vf[1] = {i0}; 2190 IS vis; 2191 DM vdm; 2192 PetscCall(DMCreateSubDM(ctx->plex[grid], 1, vf, &vis, &vdm)); 2193 PetscCall(DMSetApplicationContext(vdm, ctx)); // the user might want this 2194 PetscCall(DMCreateGlobalVector(vdm, &vec)); 2195 PetscCall(VecGetSize(vec, &n)); 2196 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 2197 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 2198 PetscCall(VecZeroEntries(vec)); 2199 /* Add your data with 'dm' for species 'sp' to 'vec' */ 2200 PetscCall(func(vdm, vec, i0, grid, b_id, user_ctx)); 2201 /* add to global */ 2202 PetscScalar const *values; 2203 const PetscInt *offsets; 2204 PetscCall(VecGetArrayRead(vec, &values)); 2205 PetscCall(ISGetIndices(vis, &offsets)); 2206 for (PetscInt i = 0; i < n; i++) PetscCall(VecSetValue(X, moffset + offsets[i], values[i], ADD_VALUES)); 2207 PetscCall(VecRestoreArrayRead(vec, &values)); 2208 PetscCall(ISRestoreIndices(vis, &offsets)); 2209 } // batch 2210 PetscCall(VecDestroy(&vec)); 2211 PetscCall(ISDestroy(&vis)); 2212 PetscCall(DMDestroy(&vdm)); 2213 } 2214 } // grid 2215 PetscFunctionReturn(PETSC_SUCCESS); 2216 } 2217 2218 /*@ 2219 DMPlexLandauDestroyVelocitySpace - Destroy a `DMPLEX` velocity space mesh 2220 2221 Collective 2222 2223 Input/Output Parameters: 2224 . dm - the `DM` to destroy 2225 2226 Level: beginner 2227 2228 .seealso: `DMPlexLandauCreateVelocitySpace()` 2229 @*/ 2230 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm) 2231 { 2232 LandauCtx *ctx; 2233 2234 PetscFunctionBegin; 2235 PetscCall(DMGetApplicationContext(*dm, &ctx)); 2236 PetscCall(MatDestroy(&ctx->M)); 2237 PetscCall(MatDestroy(&ctx->J)); 2238 for (PetscInt ii = 0; ii < ctx->num_species; ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii])); 2239 PetscCall(ISDestroy(&ctx->batch_is)); 2240 PetscCall(VecDestroy(&ctx->work_vec)); 2241 PetscCall(VecScatterDestroy(&ctx->plex_batch)); 2242 if (ctx->deviceType == LANDAU_KOKKOS) { 2243 #if defined(PETSC_HAVE_KOKKOS) 2244 PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d)); 2245 #else 2246 SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos"); 2247 #endif 2248 } else { 2249 if (ctx->SData_d.x) { /* in a CPU run */ 2250 PetscReal *invJ = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w; 2251 LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQND + 1] = (LandauIdx(*)[LANDAU_MAX_NQND + 1]) ctx->SData_d.coo_elem_point_offsets; 2252 PetscCall(PetscFree4(ww, xx, yy, invJ)); 2253 if (zz) PetscCall(PetscFree(zz)); 2254 if (coo_elem_offsets) PetscCall(PetscFree3(coo_elem_offsets, coo_elem_fullNb, coo_elem_point_offsets)); // could be NULL 2255 PetscCall(PetscFree4(ctx->SData_d.alpha, ctx->SData_d.beta, ctx->SData_d.invMass, ctx->SData_d.lambdas)); 2256 } 2257 } 2258 2259 if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings 2260 PetscCall(PetscPrintf(ctx->comm, "TSStep N 1.0 %10.3e\n", ctx->times[LANDAU_EX2_TSSOLVE])); 2261 PetscCall(PetscPrintf(ctx->comm, "2: Solve: %10.3e with %" PetscInt_FMT " threads\n", ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL], ctx->batch_sz)); 2262 PetscCall(PetscPrintf(ctx->comm, "3: Landau: %10.3e\n", ctx->times[LANDAU_MATRIX_TOTAL])); 2263 PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian %" PetscInt_FMT " 1.0 %10.3e\n", (PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT], ctx->times[LANDAU_JACOBIAN])); 2264 PetscCall(PetscPrintf(ctx->comm, "Landau Operator N 1.0 %10.3e\n", ctx->times[LANDAU_OPERATOR])); 2265 PetscCall(PetscPrintf(ctx->comm, "Landau Mass N 1.0 %10.3e\n", ctx->times[LANDAU_MASS])); 2266 PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU) N 1.0 %10.3e\n", ctx->times[LANDAU_F_DF])); 2267 PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU) N 1.0 %10.3e\n", ctx->times[LANDAU_KERNEL])); 2268 PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum X 1.0 %10.3e\n", ctx->times[KSP_FACTOR])); 2269 PetscCall(PetscPrintf(ctx->comm, "MatSolve X 1.0 %10.3e\n", ctx->times[KSP_SOLVE])); 2270 } 2271 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMDestroy(&ctx->plex[grid])); 2272 PetscCall(PetscFree(ctx)); 2273 PetscCall(DMDestroy(dm)); 2274 PetscFunctionReturn(PETSC_SUCCESS); 2275 } 2276 2277 /* < v, ru > */ 2278 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2279 { 2280 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2281 f0[0] = u[ii]; 2282 } 2283 2284 /* < v, ru > */ 2285 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2286 { 2287 PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]); 2288 f0[0] = x[jj] * u[ii]; /* x momentum */ 2289 } 2290 2291 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2292 { 2293 PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]); 2294 double tmp1 = 0.; 2295 for (i = 0; i < dim; ++i) tmp1 += x[i] * x[i]; 2296 f0[0] = tmp1 * u[ii]; 2297 } 2298 2299 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx) 2300 { 2301 const PetscReal *c2_0_arr = ((PetscReal *)actx); 2302 const PetscReal c02 = c2_0_arr[0]; 2303 2304 PetscFunctionBegin; 2305 for (PetscInt s = 0; s < Nf; s++) { 2306 PetscReal tmp1 = 0.; 2307 for (PetscInt i = 0; i < dim; ++i) tmp1 += x[i] * x[i]; 2308 #if defined(PETSC_USE_DEBUG) 2309 u[s] = PetscSqrtReal(1. + tmp1 / c02); // u[0] = PetscSqrtReal(1. + xx); 2310 #else 2311 { 2312 PetscReal xx = tmp1 / c02; 2313 u[s] = xx / (PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.) 2314 } 2315 #endif 2316 } 2317 PetscFunctionReturn(PETSC_SUCCESS); 2318 } 2319 2320 /* < v, ru > */ 2321 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2322 { 2323 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2324 f0[0] = 2. * PETSC_PI * x[0] * u[ii]; 2325 } 2326 2327 /* < v, ru > */ 2328 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2329 { 2330 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2331 f0[0] = 2. * PETSC_PI * x[0] * x[1] * u[ii]; 2332 } 2333 2334 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2335 { 2336 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2337 f0[0] = 2. * PETSC_PI * x[0] * (x[0] * x[0] + x[1] * x[1]) * u[ii]; 2338 } 2339 2340 /*@ 2341 DMPlexLandauPrintNorms - collects moments and prints them 2342 2343 Collective 2344 2345 Input Parameters: 2346 + X - the state 2347 - stepi - current step to print 2348 2349 Level: beginner 2350 2351 .seealso: `DMPlexLandauCreateVelocitySpace()` 2352 @*/ 2353 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi) 2354 { 2355 LandauCtx *ctx; 2356 PetscDS prob; 2357 DM pack; 2358 PetscInt cStart, cEnd, dim, ii, i0, nDMs; 2359 PetscScalar xmomentumtot = 0, ymomentumtot = 0, zmomentumtot = 0, energytot = 0, densitytot = 0, tt[LANDAU_MAX_SPECIES]; 2360 PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES]; 2361 Vec *globXArray; 2362 2363 PetscFunctionBegin; 2364 PetscCall(VecGetDM(X, &pack)); 2365 PetscCheck(pack, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM"); 2366 PetscCall(DMGetDimension(pack, &dim)); 2367 PetscCheck(dim == 2 || dim == 3, PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]", dim); 2368 PetscCall(DMGetApplicationContext(pack, &ctx)); 2369 PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2370 /* print momentum and energy */ 2371 PetscCall(DMCompositeGetNumberDM(pack, &nDMs)); 2372 PetscCheck(nDMs == ctx->num_grids * ctx->batch_sz, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT, nDMs, ctx->num_grids * ctx->batch_sz); 2373 PetscCall(PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray)); 2374 PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); 2375 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2376 Vec Xloc = globXArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)]; 2377 PetscCall(DMGetDS(ctx->plex[grid], &prob)); 2378 for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) { 2379 PetscScalar user[2] = {(PetscScalar)i0, ctx->charges[ii]}; 2380 PetscCall(PetscDSSetConstants(prob, 2, user)); 2381 if (dim == 2) { /* 2/3X + 3V (cylindrical coordinates) */ 2382 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden)); 2383 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2384 density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii]; 2385 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom)); 2386 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2387 zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii]; 2388 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2)); 2389 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2390 energy[ii] = tt[0] * 0.5 * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii]; 2391 zmomentumtot += zmomentum[ii]; 2392 energytot += energy[ii]; 2393 densitytot += density[ii]; 2394 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii]))); 2395 } else { /* 2/3Xloc + 3V */ 2396 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den)); 2397 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2398 density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii]; 2399 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom)); 2400 user[1] = 0; 2401 PetscCall(PetscDSSetConstants(prob, 2, user)); 2402 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2403 xmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii]; 2404 user[1] = 1; 2405 PetscCall(PetscDSSetConstants(prob, 2, user)); 2406 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2407 ymomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii]; 2408 user[1] = 2; 2409 PetscCall(PetscDSSetConstants(prob, 2, user)); 2410 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2411 zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii]; 2412 if (ctx->use_relativistic_corrections) { 2413 /* gamma * M * f */ 2414 if (ii == 0 && grid == 0) { // do all at once 2415 Vec Mf, globGamma, *globMfArray, *globGammaArray; 2416 PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {gamma_n_f}; 2417 PetscReal *c2_0[1], data[1]; 2418 2419 PetscCall(VecDuplicate(X, &globGamma)); 2420 PetscCall(VecDuplicate(X, &Mf)); 2421 PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globMfArray)); 2422 PetscCall(PetscMalloc(sizeof(*globMfArray) * nDMs, &globGammaArray)); 2423 /* M * f */ 2424 PetscCall(MatMult(ctx->M, X, Mf)); 2425 /* gamma */ 2426 PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2427 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching 2428 Vec v1 = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)]; 2429 data[0] = PetscSqr(C_0(ctx->v_0)); 2430 c2_0[0] = &data[0]; 2431 PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void **)c2_0, INSERT_ALL_VALUES, v1)); 2432 } 2433 PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2434 /* gamma * Mf */ 2435 PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2436 PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray)); 2437 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice 2438 PetscInt Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], N, bs; 2439 Vec Mfsub = globMfArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], Gsub = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], v1, v2; 2440 // get each component 2441 PetscCall(VecGetSize(Mfsub, &N)); 2442 PetscCall(VecCreate(ctx->comm, &v1)); 2443 PetscCall(VecSetSizes(v1, PETSC_DECIDE, N / Nf)); 2444 PetscCall(VecCreate(ctx->comm, &v2)); 2445 PetscCall(VecSetSizes(v2, PETSC_DECIDE, N / Nf)); 2446 PetscCall(VecSetFromOptions(v1)); // ??? 2447 PetscCall(VecSetFromOptions(v2)); 2448 // get each component 2449 PetscCall(VecGetBlockSize(Gsub, &bs)); 2450 PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub", bs, Nf); 2451 PetscCall(VecGetBlockSize(Mfsub, &bs)); 2452 PetscCheck(bs == Nf, PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT, bs, Nf); 2453 for (PetscInt i = 0, ix = ctx->species_offset[grid]; i < Nf; i++, ix++) { 2454 PetscScalar val; 2455 PetscCall(VecStrideGather(Gsub, i, v1, INSERT_VALUES)); // this is not right -- TODO 2456 PetscCall(VecStrideGather(Mfsub, i, v2, INSERT_VALUES)); 2457 PetscCall(VecDot(v1, v2, &val)); 2458 energy[ix] = PetscRealPart(val) * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ix]; 2459 } 2460 PetscCall(VecDestroy(&v1)); 2461 PetscCall(VecDestroy(&v2)); 2462 } /* grids */ 2463 PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2464 PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray)); 2465 PetscCall(PetscFree(globGammaArray)); 2466 PetscCall(PetscFree(globMfArray)); 2467 PetscCall(VecDestroy(&globGamma)); 2468 PetscCall(VecDestroy(&Mf)); 2469 } 2470 } else { 2471 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2)); 2472 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx)); 2473 energy[ii] = 0.5 * tt[0] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii]; 2474 } 2475 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(xmomentum[ii]), (double)PetscRealPart(ymomentum[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii]))); 2476 xmomentumtot += xmomentum[ii]; 2477 ymomentumtot += ymomentum[ii]; 2478 zmomentumtot += zmomentum[ii]; 2479 energytot += energy[ii]; 2480 densitytot += density[ii]; 2481 } 2482 if (ctx->num_species > 1) PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 2483 } 2484 } 2485 PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray)); 2486 PetscCall(PetscFree(globXArray)); 2487 /* totals */ 2488 PetscCall(DMPlexGetHeightStratum(ctx->plex[0], 0, &cStart, &cEnd)); 2489 if (ctx->num_species > 1) { 2490 if (dim == 2) { 2491 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot), 2492 (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart)); 2493 } else { 2494 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(xmomentumtot), (double)PetscRealPart(ymomentumtot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot), 2495 (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart)); 2496 } 2497 } else PetscCall(PetscPrintf(PETSC_COMM_WORLD, " -- %" PetscInt_FMT " cells", cEnd - cStart)); 2498 PetscCall(PetscPrintf(PETSC_COMM_WORLD, "\n")); 2499 PetscFunctionReturn(PETSC_SUCCESS); 2500 } 2501 2502 /*@ 2503 DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian) 2504 - puts mass matrix into ctx->M 2505 2506 Collective 2507 2508 Input Parameter: 2509 . pack - the `DM` object. Puts matrix in Landau context M field 2510 2511 Output Parameter: 2512 . Amat - The mass matrix (optional), mass matrix is added to the `DM` context 2513 2514 Level: beginner 2515 2516 .seealso: `DMPlexLandauCreateVelocitySpace()` 2517 @*/ 2518 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat) 2519 { 2520 DM mass_pack, massDM[LANDAU_MAX_GRIDS]; 2521 PetscDS prob; 2522 PetscInt ii, dim, N1 = 1, N2; 2523 LandauCtx *ctx; 2524 Mat packM, subM[LANDAU_MAX_GRIDS]; 2525 2526 PetscFunctionBegin; 2527 PetscValidHeaderSpecific(pack, DM_CLASSID, 1); 2528 if (Amat) PetscAssertPointer(Amat, 2); 2529 PetscCall(DMGetApplicationContext(pack, &ctx)); 2530 PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2531 PetscCall(PetscLogEventBegin(ctx->events[14], 0, 0, 0, 0)); 2532 PetscCall(DMGetDimension(pack, &dim)); 2533 PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject)pack), &mass_pack)); 2534 /* create pack mass matrix */ 2535 for (PetscInt grid = 0, ix = 0; grid < ctx->num_grids; grid++) { 2536 PetscCall(DMClone(ctx->plex[grid], &massDM[grid])); 2537 PetscCall(DMCopyFields(ctx->plex[grid], PETSC_DETERMINE, PETSC_DETERMINE, massDM[grid])); 2538 PetscCall(DMCreateDS(massDM[grid])); 2539 PetscCall(DMGetDS(massDM[grid], &prob)); 2540 for (ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) { 2541 if (dim == 3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL)); 2542 else PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL)); 2543 } 2544 #if !defined(LANDAU_SPECIES_MAJOR) 2545 PetscCall(DMCompositeAddDM(mass_pack, massDM[grid])); 2546 #else 2547 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid 2548 PetscCall(DMCompositeAddDM(mass_pack, massDM[grid])); 2549 } 2550 #endif 2551 PetscCall(DMCreateMatrix(massDM[grid], &subM[grid])); 2552 } 2553 #if !defined(LANDAU_SPECIES_MAJOR) 2554 // stack the batched DMs 2555 for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) { 2556 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(DMCompositeAddDM(mass_pack, massDM[grid])); 2557 } 2558 #endif 2559 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only")); 2560 PetscCall(DMCreateMatrix(mass_pack, &packM)); 2561 PetscCall(PetscOptionsInsertString(NULL, "-dm_preallocate_only false")); 2562 PetscCall(MatSetOption(packM, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 2563 PetscCall(MatSetOption(packM, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE)); 2564 PetscCall(DMDestroy(&mass_pack)); 2565 /* make mass matrix for each block */ 2566 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2567 Vec locX; 2568 DM plex = massDM[grid]; 2569 PetscCall(DMGetLocalVector(plex, &locX)); 2570 /* Mass matrix is independent of the input, so no need to fill locX */ 2571 PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx)); 2572 PetscCall(DMRestoreLocalVector(plex, &locX)); 2573 PetscCall(DMDestroy(&massDM[grid])); 2574 } 2575 PetscCall(MatGetSize(ctx->J, &N1, NULL)); 2576 PetscCall(MatGetSize(packM, &N2, NULL)); 2577 PetscCheck(N1 == N2, PetscObjectComm((PetscObject)pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT, N1, N2); 2578 /* assemble block diagonals */ 2579 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { 2580 Mat B = subM[grid]; 2581 PetscInt nloc, nzl, *colbuf, COL_BF_SIZE = 1024, row; 2582 PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf)); 2583 PetscCall(MatGetSize(B, &nloc, NULL)); 2584 for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { 2585 const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); 2586 const PetscInt *cols; 2587 const PetscScalar *vals; 2588 for (PetscInt i = 0; i < nloc; i++) { 2589 PetscCall(MatGetRow(B, i, &nzl, NULL, NULL)); 2590 if (nzl > COL_BF_SIZE) { 2591 PetscCall(PetscFree(colbuf)); 2592 PetscCall(PetscInfo(pack, "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl)); 2593 COL_BF_SIZE = nzl; 2594 PetscCall(PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf)); 2595 } 2596 PetscCall(MatGetRow(B, i, &nzl, &cols, &vals)); 2597 for (PetscInt j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset; 2598 row = i + moffset; 2599 PetscCall(MatSetValues(packM, 1, &row, nzl, colbuf, vals, INSERT_VALUES)); 2600 PetscCall(MatRestoreRow(B, i, &nzl, &cols, &vals)); 2601 } 2602 } 2603 PetscCall(PetscFree(colbuf)); 2604 } 2605 // cleanup 2606 for (PetscInt grid = 0; grid < ctx->num_grids; grid++) PetscCall(MatDestroy(&subM[grid])); 2607 PetscCall(MatAssemblyBegin(packM, MAT_FINAL_ASSEMBLY)); 2608 PetscCall(MatAssemblyEnd(packM, MAT_FINAL_ASSEMBLY)); 2609 PetscCall(PetscObjectSetName((PetscObject)packM, "mass")); 2610 PetscCall(MatViewFromOptions(packM, NULL, "-dm_landau_mass_view")); 2611 ctx->M = packM; 2612 if (Amat) *Amat = packM; 2613 PetscCall(PetscLogEventEnd(ctx->events[14], 0, 0, 0, 0)); 2614 PetscFunctionReturn(PETSC_SUCCESS); 2615 } 2616 2617 /*@ 2618 DMPlexLandauIFunction - `TS` residual calculation, confusingly this computes the Jacobian w/o mass 2619 2620 Collective 2621 2622 Input Parameters: 2623 + ts - The time stepping context 2624 . time_dummy - current time (not used) 2625 . X - Current state 2626 . X_t - Time derivative of current state 2627 - actx - Landau context 2628 2629 Output Parameter: 2630 . F - The residual 2631 2632 Level: beginner 2633 2634 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()` 2635 @*/ 2636 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx) 2637 { 2638 LandauCtx *ctx = (LandauCtx *)actx; 2639 PetscInt dim; 2640 DM pack; 2641 #if defined(PETSC_HAVE_THREADSAFETY) 2642 double starttime, endtime; 2643 #endif 2644 PetscObjectState state; 2645 2646 PetscFunctionBegin; 2647 PetscCall(TSGetDM(ts, &pack)); 2648 PetscCall(DMGetApplicationContext(pack, &ctx)); 2649 PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2650 if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage)); 2651 PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0)); 2652 PetscCall(PetscLogEventBegin(ctx->events[0], 0, 0, 0, 0)); 2653 #if defined(PETSC_HAVE_THREADSAFETY) 2654 starttime = MPI_Wtime(); 2655 #endif 2656 PetscCall(DMGetDimension(pack, &dim)); 2657 PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state)); 2658 if (state != ctx->norm_state) { 2659 PetscCall(MatZeroEntries(ctx->J)); 2660 PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, 0.0, (void *)ctx)); 2661 PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view")); 2662 PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state)); 2663 ctx->norm_state = state; 2664 } else { 2665 PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n", state)); 2666 } 2667 /* mat vec for op */ 2668 PetscCall(MatMult(ctx->J, X, F)); /* C*f */ 2669 /* add time term */ 2670 if (X_t) PetscCall(MatMultAdd(ctx->M, X_t, F, F)); 2671 #if defined(PETSC_HAVE_THREADSAFETY) 2672 if (ctx->stage) { 2673 endtime = MPI_Wtime(); 2674 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2675 ctx->times[LANDAU_JACOBIAN] += (endtime - starttime); 2676 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2677 ctx->times[LANDAU_JACOBIAN_COUNT] += 1; 2678 } 2679 #endif 2680 PetscCall(PetscLogEventEnd(ctx->events[0], 0, 0, 0, 0)); 2681 PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0)); 2682 if (ctx->stage) PetscCall(PetscLogStagePop()); 2683 PetscFunctionReturn(PETSC_SUCCESS); 2684 } 2685 2686 /*@ 2687 DMPlexLandauIJacobian - `TS` Jacobian construction, confusingly this adds mass 2688 2689 Collective 2690 2691 Input Parameters: 2692 + ts - The time stepping context 2693 . time_dummy - current time (not used) 2694 . X - Current state 2695 . U_tdummy - Time derivative of current state (not used) 2696 . shift - shift for du/dt term 2697 - actx - Landau context 2698 2699 Output Parameters: 2700 + Amat - Jacobian 2701 - Pmat - same as Amat 2702 2703 Level: beginner 2704 2705 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()` 2706 @*/ 2707 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx) 2708 { 2709 LandauCtx *ctx = NULL; 2710 PetscInt dim; 2711 DM pack; 2712 #if defined(PETSC_HAVE_THREADSAFETY) 2713 double starttime, endtime; 2714 #endif 2715 PetscObjectState state; 2716 2717 PetscFunctionBegin; 2718 PetscCall(TSGetDM(ts, &pack)); 2719 PetscCall(DMGetApplicationContext(pack, &ctx)); 2720 PetscCheck(ctx, PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2721 PetscCheck(Amat == Pmat && Amat == ctx->J, ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J"); 2722 PetscCall(DMGetDimension(pack, &dim)); 2723 /* get collision Jacobian into A */ 2724 if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage)); 2725 PetscCall(PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0)); 2726 PetscCall(PetscLogEventBegin(ctx->events[9], 0, 0, 0, 0)); 2727 #if defined(PETSC_HAVE_THREADSAFETY) 2728 starttime = MPI_Wtime(); 2729 #endif 2730 PetscCheck(shift != 0.0, ctx->comm, PETSC_ERR_PLIB, "zero shift"); 2731 PetscCall(PetscObjectStateGet((PetscObject)ctx->J, &state)); 2732 PetscCheck(state == ctx->norm_state, ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT, ctx->norm_state, state); 2733 if (!ctx->use_matrix_mass) { 2734 PetscCall(LandauFormJacobian_Internal(X, ctx->J, dim, shift, (void *)ctx)); 2735 } else { /* add mass */ 2736 PetscCall(MatAXPY(Pmat, shift, ctx->M, SAME_NONZERO_PATTERN)); 2737 } 2738 #if defined(PETSC_HAVE_THREADSAFETY) 2739 if (ctx->stage) { 2740 endtime = MPI_Wtime(); 2741 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2742 ctx->times[LANDAU_MASS] += (endtime - starttime); 2743 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2744 } 2745 #endif 2746 PetscCall(PetscLogEventEnd(ctx->events[9], 0, 0, 0, 0)); 2747 PetscCall(PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0)); 2748 if (ctx->stage) PetscCall(PetscLogStagePop()); 2749 PetscFunctionReturn(PETSC_SUCCESS); 2750 } 2751