1 #include <../src/mat/impls/aij/seq/aij.h> 2 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 3 #include <petsclandau.h> /*I "petsclandau.h" I*/ 4 #include <petscts.h> 5 #include <petscdmforest.h> 6 #include <petscdmcomposite.h> 7 8 /* Landau collision operator */ 9 10 /* relativistic terms */ 11 #if defined(PETSC_USE_REAL_SINGLE) 12 #define SPEED_OF_LIGHT 2.99792458e8F 13 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */ 14 #else 15 #define SPEED_OF_LIGHT 2.99792458e8 16 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */ 17 #endif 18 19 #define PETSC_THREAD_SYNC 20 #include "land_tensors.h" 21 22 #if defined(PETSC_HAVE_OPENMP) 23 #include <omp.h> 24 #endif 25 26 static PetscErrorCode LandauGPUMapsDestroy(void *ptr) 27 { 28 P4estVertexMaps *maps = (P4estVertexMaps*)ptr; 29 PetscFunctionBegin; 30 // free device data 31 if (maps[0].deviceType != LANDAU_CPU) { 32 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 33 if (maps[0].deviceType == LANDAU_KOKKOS) { 34 PetscCall(LandauKokkosDestroyMatMaps(maps, maps[0].numgrids)); // imples Kokkos does 35 } // else could be CUDA 36 #elif defined(PETSC_HAVE_CUDA) 37 if (maps[0].deviceType == LANDAU_CUDA) { 38 PetscCall(LandauCUDADestroyMatMaps(maps, maps[0].numgrids)); 39 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %d ?????",maps->deviceType); 40 #endif 41 } 42 // free host data 43 for (PetscInt grid=0 ; grid < maps[0].numgrids ; grid++) { 44 PetscCall(PetscFree(maps[grid].c_maps)); 45 PetscCall(PetscFree(maps[grid].gIdx)); 46 } 47 PetscCall(PetscFree(maps)); 48 49 PetscFunctionReturn(0); 50 } 51 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 52 { 53 PetscReal v2 = 0; 54 PetscFunctionBegin; 55 /* compute v^2 / 2 */ 56 for (int i = 0; i < dim; ++i) v2 += x[i]*x[i]; 57 /* evaluate the Maxwellian */ 58 u[0] = v2/2; 59 PetscFunctionReturn(0); 60 } 61 62 /* needs double */ 63 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 64 { 65 PetscReal *c2_0_arr = ((PetscReal*)actx); 66 double u2 = 0, c02 = (double)*c2_0_arr, xx; 67 68 PetscFunctionBegin; 69 /* compute u^2 / 2 */ 70 for (int i = 0; i < dim; ++i) u2 += x[i]*x[i]; 71 /* gamma - 1 = g_eps, for conditioning and we only take derivatives */ 72 xx = u2/c02; 73 #if defined(PETSC_USE_DEBUG) 74 u[0] = PetscSqrtReal(1. + xx); 75 #else 76 u[0] = xx/(PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative 77 #endif 78 PetscFunctionReturn(0); 79 } 80 81 /* 82 LandauFormJacobian_Internal - Evaluates Jacobian matrix. 83 84 Input Parameters: 85 . globX - input vector 86 . actx - optional user-defined context 87 . dim - dimension 88 89 Output Parameters: 90 . J0acP - Jacobian matrix filled, not created 91 */ 92 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx) 93 { 94 LandauCtx *ctx = (LandauCtx*)a_ctx; 95 PetscInt numCells[LANDAU_MAX_GRIDS],Nq,Nb; 96 PetscQuadrature quad; 97 PetscReal Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2) 98 PetscScalar *cellClosure=NULL; 99 const PetscScalar *xdata=NULL; 100 PetscDS prob; 101 PetscContainer container; 102 P4estVertexMaps *maps; 103 Mat subJ[LANDAU_MAX_GRIDS*LANDAU_MAX_BATCH_SZ]; 104 105 PetscFunctionBegin; 106 PetscValidHeaderSpecific(a_X,VEC_CLASSID,1); 107 PetscValidHeaderSpecific(JacP,MAT_CLASSID,2); 108 PetscValidPointer(ctx,5); 109 /* check for matrix container for GPU assembly. Support CPU assembly for debugging */ 110 PetscCheck(ctx->plex[0] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 111 PetscCall(PetscLogEventBegin(ctx->events[10],0,0,0,0)); 112 PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids 113 PetscCall(PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container)); 114 if (container) { 115 PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"maps but no GPU assembly"); 116 PetscCall(PetscContainerGetPointer(container, (void **) &maps)); 117 PetscCheck(maps,ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container"); 118 for (PetscInt i=0;i<ctx->num_grids*ctx->batch_sz;i++) subJ[i] = NULL; 119 } else { 120 PetscCheck(!ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"No maps but GPU assembly"); 121 for (PetscInt tid=0 ; tid<ctx->batch_sz ; tid++) { 122 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 123 PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[ LAND_PACK_IDX(tid,grid) ])); 124 } 125 } 126 maps = NULL; 127 } 128 // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck) 129 PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad)); 130 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); Nb = Nq; 131 PetscCheck(Nq <=LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)",Nq,LANDAU_MAX_NQ); 132 // get metadata for collecting dynamic data 133 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 134 PetscInt cStart, cEnd; 135 PetscCheck(ctx->plex[grid] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 136 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 137 numCells[grid] = cEnd - cStart; // grids can have different topology 138 } 139 PetscCall(PetscLogEventEnd(ctx->events[10],0,0,0,0)); 140 if (shift==0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */ 141 DM pack; 142 PetscCall(VecGetDM(a_X, &pack)); 143 PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM"); 144 PetscCall(PetscLogEventBegin(ctx->events[1],0,0,0,0)); 145 for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) { 146 Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */ 147 if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */ 148 } 149 if (!ctx->gpu_assembly) { 150 Vec *locXArray,*globXArray; 151 PetscScalar *cellClosure_it; 152 PetscInt cellClosure_sz=0,nDMs,Nf[LANDAU_MAX_GRIDS]; 153 PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS]; 154 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 155 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 156 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 157 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 158 } 159 /* count cellClosure size */ 160 PetscCall(DMCompositeGetNumberDM(pack,&nDMs)); 161 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) cellClosure_sz += Nb*Nf[grid]*numCells[grid]; 162 PetscCall(PetscMalloc1(cellClosure_sz*ctx->batch_sz,&cellClosure)); 163 cellClosure_it = cellClosure; 164 PetscCall(PetscMalloc(sizeof(*locXArray)*nDMs, &locXArray)); 165 PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray)); 166 PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray)); 167 PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray)); 168 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP (once) 169 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 170 Vec locX = locXArray[ LAND_PACK_IDX(b_id,grid) ], globX = globXArray[ LAND_PACK_IDX(b_id,grid) ], locX2; 171 PetscInt cStart, cEnd, ei; 172 PetscCall(VecDuplicate(locX,&locX2)); 173 PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2)); 174 PetscCall(DMGlobalToLocalEnd (ctx->plex[grid], globX, INSERT_VALUES, locX2)); 175 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 176 for (ei = cStart ; ei < cEnd; ++ei) { 177 PetscScalar *coef = NULL; 178 PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef)); 179 PetscCall(PetscMemcpy(cellClosure_it,coef,Nb*Nf[grid]*sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */ 180 PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef)); 181 cellClosure_it += Nb*Nf[grid]; 182 } 183 PetscCall(VecDestroy(&locX2)); 184 } 185 } 186 PetscCheck(cellClosure_it-cellClosure == cellClosure_sz*ctx->batch_sz,PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscCount_FMT " != cellClosure_sz = %" PetscInt_FMT,(PetscCount)(cellClosure_it-cellClosure),cellClosure_sz*ctx->batch_sz); 187 PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray)); 188 PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray)); 189 PetscCall(PetscFree(locXArray)); 190 PetscCall(PetscFree(globXArray)); 191 xdata = NULL; 192 } else { 193 PetscMemType mtype; 194 if (ctx->jacobian_field_major_order) { // get data in batch ordering 195 PetscCall(VecScatterBegin(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD)); 196 PetscCall(VecScatterEnd(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD)); 197 PetscCall(VecGetArrayReadAndMemType(ctx->work_vec,&xdata,&mtype)); 198 } else { 199 PetscCall(VecGetArrayReadAndMemType(a_X,&xdata,&mtype)); 200 } 201 if (mtype!=PETSC_MEMTYPE_HOST && ctx->deviceType == LANDAU_CPU) { 202 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"CPU run with device data: use -mat_type aij"); 203 } 204 cellClosure = NULL; 205 } 206 PetscCall(PetscLogEventEnd(ctx->events[1],0,0,0,0)); 207 } else xdata = cellClosure = NULL; 208 209 /* do it */ 210 if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) { 211 if (ctx->deviceType == LANDAU_CUDA) { 212 #if defined(PETSC_HAVE_CUDA) 213 PetscCall(LandauCUDAJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ, JacP)); 214 #else 215 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda"); 216 #endif 217 } else if (ctx->deviceType == LANDAU_KOKKOS) { 218 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 219 PetscCall(LandauKokkosJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ,JacP)); 220 #else 221 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos"); 222 #endif 223 } 224 } else { /* CPU version */ 225 PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species 226 PetscInt ip_offset[LANDAU_MAX_GRIDS+1], ipf_offset[LANDAU_MAX_GRIDS+1], elem_offset[LANDAU_MAX_GRIDS+1],IPf_sz_glb,IPf_sz_tot,num_grids=ctx->num_grids,Nf[LANDAU_MAX_GRIDS]; 227 PetscReal *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w; 228 PetscReal Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES]; 229 PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS]; 230 PetscScalar *coo_vals=NULL; 231 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 232 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 233 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 234 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 235 } 236 /* count IPf size, etc */ 237 PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids 238 const PetscReal *const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1]; 239 ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0; 240 for (PetscInt grid=0 ; grid<num_grids ; grid++) { 241 PetscInt nfloc = ctx->species_offset[grid+1] - ctx->species_offset[grid]; 242 elem_offset[grid+1] = elem_offset[grid] + numCells[grid]; 243 ip_offset[grid+1] = ip_offset[grid] + numCells[grid]*Nq; 244 ipf_offset[grid+1] = ipf_offset[grid] + Nq*nfloc*numCells[grid]; 245 } 246 IPf_sz_glb = ipf_offset[num_grids]; 247 IPf_sz_tot = IPf_sz_glb*ctx->batch_sz; 248 // prep COO 249 if (ctx->coo_assembly) { 250 PetscCall(PetscMalloc1(ctx->SData_d.coo_size,&coo_vals)); // allocate every time? 251 PetscCall(PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n",(PetscInt)ctx->SData_d.coo_size)); 252 } 253 if (shift==0.0) { /* compute dynamic data f and df and init data for Jacobian */ 254 #if defined(PETSC_HAVE_THREADSAFETY) 255 double starttime, endtime; 256 starttime = MPI_Wtime(); 257 #endif 258 PetscCall(PetscLogEventBegin(ctx->events[8],0,0,0,0)); 259 for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) { 260 invMass[fieldA] = ctx->m_0/ctx->masses[fieldA]; 261 Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */ 262 if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */ 263 nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->m_0)*ctx->m_0/ctx->masses[fieldA]; 264 nu_beta[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3); 265 } 266 PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim==3 ? IPf_sz_tot : 0, &dudz)); 267 // F df/dx 268 for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element 269 const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; // b_id == OMP thd_id in batch 270 // find my grid: 271 PetscInt grid = 0; 272 while (b_elem_idx >= elem_offset[grid+1]) grid++; // yuck search for grid 273 { 274 const PetscInt loc_nip = numCells[grid]*Nq, loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid]; 275 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid]; 276 PetscScalar *coef, coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ]; 277 PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; // ingJ is static data on batch 0 278 PetscInt b,f,q; 279 if (cellClosure) { 280 coef = &cellClosure[b_id*IPf_sz_glb + ipf_offset[grid] + loc_elem*Nb*loc_Nf]; // this is const 281 } else { 282 coef = coef_buff; 283 for (f = 0; f < loc_Nf; ++f) { 284 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0]; 285 for (b = 0; b < Nb; ++b) { 286 PetscInt idx = Idxs[b]; 287 if (idx >= 0) { 288 coef[f*Nb+b] = xdata[idx+moffset]; 289 } else { 290 idx = -idx - 1; 291 coef[f*Nb+b] = 0; 292 for (q = 0; q < maps[grid].num_face; q++) { 293 PetscInt id = maps[grid].c_maps[idx][q].gid; 294 PetscScalar scale = maps[grid].c_maps[idx][q].scale; 295 coef[f*Nb+b] += scale*xdata[id+moffset]; 296 } 297 } 298 } 299 } 300 } 301 /* get f and df */ 302 for (PetscInt qi = 0; qi < Nq; qi++) { 303 const PetscReal *invJ = &invJe[qi*dim*dim]; 304 const PetscReal *Bq = &BB[qi*Nb]; 305 const PetscReal *Dq = &DD[qi*Nb*dim]; 306 PetscReal u_x[LANDAU_DIM]; 307 /* get f & df */ 308 for (f = 0; f < loc_Nf; ++f) { 309 const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid] + f*loc_nip + loc_elem*Nq + qi; 310 PetscInt b, e; 311 PetscReal refSpaceDer[LANDAU_DIM]; 312 ff[idx] = 0.0; 313 for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0; 314 for (b = 0; b < Nb; ++b) { 315 const PetscInt cidx = b; 316 ff[idx] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]); 317 for (int d = 0; d < dim; ++d) { 318 refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]); 319 } 320 } 321 for (int d = 0; d < LANDAU_DIM; ++d) { 322 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) { 323 u_x[d] += invJ[e*dim+d]*refSpaceDer[e]; 324 } 325 } 326 dudx[idx] = u_x[0]; 327 dudy[idx] = u_x[1]; 328 #if LANDAU_DIM==3 329 dudz[idx] = u_x[2]; 330 #endif 331 } 332 } // q 333 } // grid 334 } // grid*batch 335 PetscCall(PetscLogEventEnd(ctx->events[8],0,0,0,0)); 336 #if defined(PETSC_HAVE_THREADSAFETY) 337 endtime = MPI_Wtime(); 338 if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime); 339 #endif 340 } // Jacobian setup 341 // assemble Jacobian (or mass) 342 for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element 343 const PetscInt b_Nelem = elem_offset[num_grids]; 344 const PetscInt glb_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; 345 PetscInt grid = 0; 346 #if defined(PETSC_HAVE_THREADSAFETY) 347 double starttime, endtime; 348 starttime = MPI_Wtime(); 349 #endif 350 while (glb_elem_idx >= elem_offset[grid+1]) grid++; 351 { 352 const PetscInt loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid]; 353 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset), totDim = loc_Nf*Nq, elemMatSize = totDim*totDim; 354 PetscScalar *elemMat; 355 const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; 356 PetscCall(PetscMalloc1(elemMatSize, &elemMat)); 357 PetscCall(PetscMemzero(elemMat, elemMatSize*sizeof(*elemMat))); 358 if (shift==0.0) { // Jacobian 359 PetscCall(PetscLogEventBegin(ctx->events[4],0,0,0,0)); 360 } else { // mass 361 PetscCall(PetscLogEventBegin(ctx->events[16],0,0,0,0)); 362 } 363 for (PetscInt qj = 0; qj < Nq; ++qj) { 364 const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq; 365 PetscReal g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1 366 PetscInt d,d2,dp,d3,IPf_idx; 367 if (shift==0.0) { // Jacobian 368 const PetscReal * const invJj = &invJe[qj*dim*dim]; 369 PetscReal gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM]; 370 const PetscReal vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb]; 371 // create g2 & g3 372 for (d=0;d<LANDAU_DIM;d++) { // clear accumulation data D & K 373 gg2_temp[d] = 0; 374 for (d2=0;d2<LANDAU_DIM;d2++) gg3_temp[d][d2] = 0; 375 } 376 /* inner beta reduction */ 377 IPf_idx = 0; 378 for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids ; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r 379 PetscInt nip_loc_r = numCells[grid_r]*Nq, Nfloc_r = Nf[grid_r]; 380 for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) { 381 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) { 382 const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx]; 383 PetscReal temp1[3] = {0, 0, 0}, temp2 = 0; 384 #if LANDAU_DIM==2 385 PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 386 LandauTensor2D(vj, x, y, Ud, Uk, mask); 387 #else 388 PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2]-z) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 389 if (ctx->use_relativistic_corrections) { 390 LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0)); 391 } else { 392 LandauTensor3D(vj, x, y, z, U, mask); 393 } 394 #endif 395 for (int f = 0; f < Nfloc_r ; ++f) { 396 const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid_r] + f*nip_loc_r + ei_r*Nq + qi; // IPf_idx + f*nip_loc_r + loc_fdf_idx; 397 temp1[0] += dudx[idx]*nu_beta[f+f_off]*invMass[f+f_off]; 398 temp1[1] += dudy[idx]*nu_beta[f+f_off]*invMass[f+f_off]; 399 #if LANDAU_DIM==3 400 temp1[2] += dudz[idx]*nu_beta[f+f_off]*invMass[f+f_off]; 401 #endif 402 temp2 += ff[idx]*nu_beta[f+f_off]; 403 } 404 temp1[0] *= wi; 405 temp1[1] *= wi; 406 #if LANDAU_DIM==3 407 temp1[2] *= wi; 408 #endif 409 temp2 *= wi; 410 #if LANDAU_DIM==2 411 for (d2 = 0; d2 < 2; d2++) { 412 for (d3 = 0; d3 < 2; ++d3) { 413 /* K = U * grad(f): g2=e: i,A */ 414 gg2_temp[d2] += Uk[d2][d3]*temp1[d3]; 415 /* D = -U * (I \kron (fx)): g3=f: i,j,A */ 416 gg3_temp[d2][d3] += Ud[d2][d3]*temp2; 417 } 418 } 419 #else 420 for (d2 = 0; d2 < 3; ++d2) { 421 for (d3 = 0; d3 < 3; ++d3) { 422 /* K = U * grad(f): g2 = e: i,A */ 423 gg2_temp[d2] += U[d2][d3]*temp1[d3]; 424 /* D = -U * (I \kron (fx)): g3 = f: i,j,A */ 425 gg3_temp[d2][d3] += U[d2][d3]*temp2; 426 } 427 } 428 #endif 429 } // qi 430 } // ei_r 431 IPf_idx += nip_loc_r*Nfloc_r; 432 } /* grid_r - IPs */ 433 PetscCheck(IPf_idx == IPf_sz_glb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT,IPf_idx,IPf_sz_glb); 434 // add alpha and put in gg2/3 435 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) { 436 for (d2 = 0; d2 < LANDAU_DIM; d2++) { 437 gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA+f_off]; 438 for (d3 = 0; d3 < LANDAU_DIM; d3++) { 439 gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA+f_off]*invMass[fieldA+f_off]; 440 } 441 } 442 } 443 /* add electric field term once per IP */ 444 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid] ; fieldA < loc_Nf; ++fieldA) { 445 gg2[fieldA][LANDAU_DIM-1] += Eq_m[fieldA+f_off]; 446 } 447 /* Jacobian transform - g2, g3 */ 448 for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) { 449 for (d = 0; d < dim; ++d) { 450 g2[fieldA][d] = 0.0; 451 for (d2 = 0; d2 < dim; ++d2) { 452 g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2]; 453 g3[fieldA][d][d2] = 0.0; 454 for (d3 = 0; d3 < dim; ++d3) { 455 for (dp = 0; dp < dim; ++dp) { 456 g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp]; 457 } 458 } 459 g3[fieldA][d][d2] *= wj; 460 } 461 g2[fieldA][d] *= wj; 462 } 463 } 464 } else { // mass 465 PetscReal wj = ww[jpidx_glb]; 466 /* Jacobian transform - g0 */ 467 for (PetscInt fieldA = 0; fieldA < loc_Nf ; ++fieldA) { 468 if (dim==2) { 469 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0 470 } else { 471 g0[fieldA] = wj * shift; // move this to below and remove g0 472 } 473 } 474 } 475 /* FE matrix construction */ 476 { 477 PetscInt fieldA,d,f,d2,g; 478 const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim]; 479 /* assemble - on the diagonal (I,I) */ 480 for (fieldA = 0; fieldA < loc_Nf ; fieldA++) { 481 for (f = 0; f < Nb ; f++) { 482 const PetscInt i = fieldA*Nb + f; /* Element matrix row */ 483 for (g = 0; g < Nb; ++g) { 484 const PetscInt j = fieldA*Nb + g; /* Element matrix column */ 485 const PetscInt fOff = i*totDim + j; 486 if (shift==0.0) { 487 for (d = 0; d < dim; ++d) { 488 elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g]; 489 for (d2 = 0; d2 < dim; ++d2) { 490 elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2]; 491 } 492 } 493 } else { // mass 494 elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g]; 495 } 496 } 497 } 498 } 499 } 500 } /* qj loop */ 501 if (shift==0.0) { // Jacobian 502 PetscCall(PetscLogEventEnd(ctx->events[4],0,0,0,0)); 503 } else { 504 PetscCall(PetscLogEventEnd(ctx->events[16],0,0,0,0)); 505 } 506 #if defined(PETSC_HAVE_THREADSAFETY) 507 endtime = MPI_Wtime(); 508 if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime); 509 #endif 510 /* assemble matrix */ 511 if (!container) { 512 PetscInt cStart; 513 PetscCall(PetscLogEventBegin(ctx->events[6],0,0,0,0)); 514 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL)); 515 PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[ LAND_PACK_IDX(b_id,grid) ], loc_elem + cStart, elemMat, ADD_VALUES)); 516 PetscCall(PetscLogEventEnd(ctx->events[6],0,0,0,0)); 517 } else { // GPU like assembly for debugging 518 PetscInt fieldA,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE]={0},cols0[LANDAU_MAX_Q_FACE]={0},rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE]; 519 PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]={0},row_scale[LANDAU_MAX_Q_FACE]={0},col_scale[LANDAU_MAX_Q_FACE]={0}; 520 LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets; 521 /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */ 522 for (fieldA = 0; fieldA < loc_Nf ; fieldA++) { 523 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0]; 524 for (f = 0; f < Nb ; f++) { 525 PetscInt idx = Idxs[f]; 526 if (idx >= 0) { 527 nr = 1; 528 rows0[0] = idx; 529 row_scale[0] = 1.; 530 } else { 531 idx = -idx - 1; 532 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) { 533 if (maps[grid].c_maps[idx][q].gid < 0) break; 534 rows0[q] = maps[grid].c_maps[idx][q].gid; 535 row_scale[q] = maps[grid].c_maps[idx][q].scale; 536 } 537 } 538 for (g = 0; g < Nb; ++g) { 539 idx = Idxs[g]; 540 if (idx >= 0) { 541 nc = 1; 542 cols0[0] = idx; 543 col_scale[0] = 1.; 544 } else { 545 idx = -idx - 1; 546 nc = maps[grid].num_face; 547 for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) { 548 if (maps[grid].c_maps[idx][q].gid < 0) break; 549 cols0[q] = maps[grid].c_maps[idx][q].gid; 550 col_scale[q] = maps[grid].c_maps[idx][q].scale; 551 } 552 } 553 const PetscInt i = fieldA*Nb + f; /* Element matrix row */ 554 const PetscInt j = fieldA*Nb + g; /* Element matrix column */ 555 const PetscScalar Aij = elemMat[i*totDim + j]; 556 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData 557 const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb; 558 const int idx0 = b_id*coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g]; 559 for (int q = 0, idx2 = idx0; q < nr; q++) { 560 for (int d = 0; d < nc; d++, idx2++) { 561 coo_vals[idx2] = row_scale[q]*col_scale[d]*Aij; 562 } 563 } 564 } else { 565 for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset; 566 for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset; 567 for (q = 0; q < nr; q++) { 568 for (d = 0; d < nc; d++) { 569 vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij; 570 } 571 } 572 PetscCall(MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES)); 573 } 574 } 575 } 576 } 577 } 578 if (loc_elem==-1) { 579 PetscCall(PetscPrintf(ctx->comm,"CPU Element matrix\n")); 580 for (int d = 0; d < totDim; ++d) { 581 for (int f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm," %12.5e", (double)PetscRealPart(elemMat[d*totDim + f]))); 582 PetscCall(PetscPrintf(ctx->comm,"\n")); 583 } 584 exit(12); 585 } 586 PetscCall(PetscFree(elemMat)); 587 } /* grid */ 588 } /* outer element & batch loop */ 589 if (shift==0.0) { // mass 590 PetscCall(PetscFree4(ff, dudx, dudy, dudz)); 591 } 592 if (!container) { // 'CPU' assembly move nest matrix to global JacP 593 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP 594 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 595 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid]; 596 PetscInt nloc, nzl, colbuf[1024], row; 597 const PetscInt *cols; 598 const PetscScalar *vals; 599 Mat B = subJ[ LAND_PACK_IDX(b_id,grid) ]; 600 PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY)); 601 PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY)); 602 PetscCall(MatGetSize(B, &nloc, NULL)); 603 for (int i=0 ; i<nloc ; i++) { 604 PetscCall(MatGetRow(B,i,&nzl,&cols,&vals)); 605 PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl); 606 for (int j=0; j<nzl; j++) colbuf[j] = moffset + cols[j]; 607 row = moffset + i; 608 PetscCall(MatSetValues(JacP,1,&row,nzl,colbuf,vals,ADD_VALUES)); 609 PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals)); 610 } 611 PetscCall(MatDestroy(&B)); 612 } 613 } 614 } 615 if (coo_vals) { 616 PetscCall(MatSetValuesCOO(JacP,coo_vals,ADD_VALUES)); 617 PetscCall(PetscFree(coo_vals)); 618 } 619 } /* CPU version */ 620 PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY)); 621 PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY)); 622 /* clean up */ 623 if (cellClosure) PetscCall(PetscFree(cellClosure)); 624 if (xdata) { 625 PetscCall(VecRestoreArrayReadAndMemType(a_X,&xdata)); 626 } 627 PetscFunctionReturn(0); 628 } 629 630 #if defined(LANDAU_ADD_BCS) 631 static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux, 632 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 633 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 634 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[]) 635 { 636 uexact[0] = 0; 637 } 638 #endif 639 640 #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }} 641 static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y, 642 PetscReal *outX, PetscReal *outY) 643 { 644 PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact; 645 if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) { 646 *outX = x; *outY = y; 647 } else { 648 const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr; 649 PetscReal cth,sth,xyprime[2],Rth[2][2],rotcos,newrr; 650 if (num_sections==2) { 651 rotcos = 0.70710678118654; 652 outfact = 1.5; efact = 2.5; 653 /* rotate normalized vector into [-pi/4,pi/4) */ 654 if (sinphi >= 0.) { /* top cell, -pi/2 */ 655 cth = 0.707106781186548; sth = -0.707106781186548; 656 } else { /* bottom cell -pi/8 */ 657 cth = 0.707106781186548; sth = .707106781186548; 658 } 659 } else if (num_sections==3) { 660 rotcos = 0.86602540378443; 661 outfact = 1.5; efact = 2.5; 662 /* rotate normalized vector into [-pi/6,pi/6) */ 663 if (sinphi >= 0.5) { /* top cell, -pi/3 */ 664 cth = 0.5; sth = -0.866025403784439; 665 } else if (sinphi >= -.5) { /* mid cell 0 */ 666 cth = 1.; sth = .0; 667 } else { /* bottom cell +pi/3 */ 668 cth = 0.5; sth = 0.866025403784439; 669 } 670 } else if (num_sections==4) { 671 rotcos = 0.9238795325112; 672 outfact = 1.5; efact = 3; 673 /* rotate normalized vector into [-pi/8,pi/8) */ 674 if (sinphi >= 0.707106781186548) { /* top cell, -3pi/8 */ 675 cth = 0.38268343236509; sth = -0.923879532511287; 676 } else if (sinphi >= 0.) { /* mid top cell -pi/8 */ 677 cth = 0.923879532511287; sth = -.38268343236509; 678 } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */ 679 cth = 0.923879532511287; sth = 0.38268343236509; 680 } else { /* bottom cell + 3pi/8 */ 681 cth = 0.38268343236509; sth = .923879532511287; 682 } 683 } else { 684 cth = 0.; sth = 0.; rotcos = 0; efact = 0; 685 } 686 Rth[0][0] = cth; Rth[0][1] =-sth; 687 Rth[1][0] = sth; Rth[1][1] = cth; 688 MATVEC2(Rth,xy,xyprime); 689 if (num_sections==2) { 690 newrr = xyprime[0]/rotcos; 691 } else { 692 PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin; 693 PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax; 694 newrr = rin + routfrac*nroutmax; 695 } 696 *outX = cosphi*newrr; *outY = sinphi*newrr; 697 /* grade */ 698 PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY)); 699 if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */ 700 else { rs = r1; re = r2; fact = efact;} /* electron zone */ 701 tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr; 702 *outX *= tt; 703 *outY *= tt; 704 } 705 } 706 707 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx) 708 { 709 LandauCtx *ctx = (LandauCtx*)a_ctx; 710 PetscReal r = abc[0], z = abc[1]; 711 if (ctx->inflate) { 712 PetscReal absR, absZ; 713 absR = PetscAbs(r); 714 absZ = PetscAbs(z); 715 CircleInflate(ctx->i_radius[0],ctx->e_radius,ctx->radius[0],ctx->num_sections,absR,absZ,&absR,&absZ); // wrong: how do I know what grid I am on? 716 r = (r > 0) ? absR : -absR; 717 z = (z > 0) ? absZ : -absZ; 718 } 719 xyz[0] = r; 720 xyz[1] = z; 721 if (dim==3) xyz[2] = abc[2]; 722 723 PetscFunctionReturn(0); 724 } 725 726 /* create DMComposite of meshes for each species group */ 727 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack) 728 { 729 PetscFunctionBegin; 730 { /* p4est, quads */ 731 /* Create plex mesh of Landau domain */ 732 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 733 PetscReal radius = ctx->radius[grid]; 734 if (!ctx->sphere) { 735 PetscInt cells[] = {2,2,2}; 736 PetscReal lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius}; 737 DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE}; 738 if (dim==2) { lo[0] = 0; cells[0] /* = cells[1] */ = 1; } 739 PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid])); // todo: make composite and create dm[grid] here 740 PetscCall(DMLocalizeCoordinates(ctx->plex[grid])); /* needed for periodic */ 741 if (dim==3) PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "cube")); 742 else PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "half-plane")); 743 } else if (dim==2) { // sphere is all wrong. should just have one inner radius 744 PetscInt numCells,cells[16][4],i,j; 745 PetscInt numVerts; 746 PetscReal inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius; 747 PetscReal *flatCoords = NULL; 748 PetscInt *flatCells = NULL, *pcell; 749 if (ctx->num_sections==2) { 750 #if 1 751 numCells = 5; 752 numVerts = 10; 753 int cells2[][4] = { {0,1,4,3}, 754 {1,2,5,4}, 755 {3,4,7,6}, 756 {4,5,8,7}, 757 {6,7,8,9} }; 758 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 759 PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells)); 760 { 761 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 762 for (j = 0; j < numVerts-1; j++) { 763 PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2; 764 PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid]; 765 z = rad * PetscSinReal(theta); 766 coords[j][1] = z; 767 r = rad * PetscCosReal(theta); 768 coords[j][0] = r; 769 } 770 coords[numVerts-1][0] = coords[numVerts-1][1] = 0; 771 } 772 #else 773 numCells = 4; 774 numVerts = 8; 775 static int cells2[][4] = {{0,1,2,3}, 776 {4,5,1,0}, 777 {5,6,2,1}, 778 {6,7,3,2}}; 779 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 780 PetscCall(loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells)); 781 { 782 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 783 PetscInt j; 784 for (j = 0; j < 8; j++) { 785 PetscReal z, r; 786 PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.; 787 PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0); 788 z = rad * PetscSinReal(theta); 789 coords[j][1] = z; 790 r = rad * PetscCosReal(theta); 791 coords[j][0] = r; 792 } 793 } 794 #endif 795 } else if (ctx->num_sections==3) { 796 numCells = 7; 797 numVerts = 12; 798 int cells2[][4] = { {0,1,5,4}, 799 {1,2,6,5}, 800 {2,3,7,6}, 801 {4,5,9,8}, 802 {5,6,10,9}, 803 {6,7,11,10}, 804 {8,9,10,11} }; 805 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 806 PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells)); 807 { 808 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 809 for (j = 0; j < numVerts; j++) { 810 PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3; 811 PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid]; 812 z = rad * PetscSinReal(theta); 813 coords[j][1] = z; 814 r = rad * PetscCosReal(theta); 815 coords[j][0] = r; 816 } 817 } 818 } else if (ctx->num_sections==4) { 819 numCells = 10; 820 numVerts = 16; 821 int cells2[][4] = { {0,1,6,5}, 822 {1,2,7,6}, 823 {2,3,8,7}, 824 {3,4,9,8}, 825 {5,6,11,10}, 826 {6,7,12,11}, 827 {7,8,13,12}, 828 {8,9,14,13}, 829 {10,11,12,15}, 830 {12,13,14,15}}; 831 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 832 PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells)); 833 { 834 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 835 for (j = 0; j < numVerts-1; j++) { 836 PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4; 837 PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid]; 838 z = rad * PetscSinReal(theta); 839 coords[j][1] = z; 840 r = rad * PetscCosReal(theta); 841 coords[j][0] = r; 842 } 843 coords[numVerts-1][0] = coords[numVerts-1][1] = 0; 844 } 845 } else { 846 numCells = 0; 847 numVerts = 0; 848 } 849 for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) { 850 pcell[0] = cells[j][0]; pcell[1] = cells[j][1]; 851 pcell[2] = cells[j][2]; pcell[3] = cells[j][3]; 852 } 853 PetscCall(DMPlexCreateFromCellListPetsc(comm_self,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,&ctx->plex[grid])); 854 PetscCall(PetscFree2(flatCoords,flatCells)); 855 PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "semi-circle")); 856 } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere"); 857 858 PetscCall(DMSetFromOptions(ctx->plex[grid])); 859 } // grid loop 860 PetscCall(PetscObjectSetOptionsPrefix((PetscObject)pack,prefix)); 861 PetscCall(DMSetFromOptions(pack)); 862 863 { /* convert to p4est (or whatever), wait for discretization to create pack */ 864 char convType[256]; 865 PetscBool flg; 866 867 PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX"); 868 PetscCall(PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (p4est)","plexland.c",DMList,DMPLEX,convType,256,&flg)); 869 PetscOptionsEnd(); 870 if (flg) { 871 ctx->use_p4est = PETSC_TRUE; /* flag for Forest */ 872 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 873 DM dmforest; 874 PetscCall(DMConvert(ctx->plex[grid],convType,&dmforest)); 875 if (dmforest) { 876 PetscBool isForest; 877 PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix)); 878 PetscCall(DMIsForest(dmforest,&isForest)); 879 if (isForest) { 880 if (ctx->sphere && ctx->inflate) { 881 PetscCall(DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx)); 882 } 883 PetscCall(DMDestroy(&ctx->plex[grid])); 884 ctx->plex[grid] = dmforest; // Forest for adaptivity 885 } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?"); 886 } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?"); 887 } 888 } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */ 889 } 890 } /* non-file */ 891 PetscCall(DMSetDimension(pack, dim)); 892 PetscCall(PetscObjectSetName((PetscObject) pack, "Mesh")); 893 PetscCall(DMSetApplicationContext(pack, ctx)); 894 895 PetscFunctionReturn(0); 896 } 897 898 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx) 899 { 900 PetscInt ii,i0; 901 char buf[256]; 902 PetscSection section; 903 904 PetscFunctionBegin; 905 for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) { 906 if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e")); 907 else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii)); 908 /* Setup Discretization - FEM */ 909 PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii])); 910 PetscCall(PetscObjectSetName((PetscObject) ctx->fe[ii], buf)); 911 PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject) ctx->fe[ii])); 912 } 913 PetscCall(DMCreateDS(ctx->plex[grid])); 914 PetscCall(DMGetSection(ctx->plex[grid], §ion)); 915 for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) { 916 if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se")); 917 else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii)); 918 PetscCall(PetscSectionSetComponentName(section, i0, 0, buf)); 919 } 920 PetscFunctionReturn(0); 921 } 922 923 /* Define a Maxwellian function for testing out the operator. */ 924 925 /* Using cartesian velocity space coordinates, the particle */ 926 /* density, [1/m^3], is defined according to */ 927 928 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */ 929 930 /* Using some constant, c, we normalize the velocity vector into a */ 931 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */ 932 933 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */ 934 935 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */ 936 /* for finding the particle within the interval in a box dx^3 around x is */ 937 938 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */ 939 940 typedef struct { 941 PetscReal v_0; 942 PetscReal kT_m; 943 PetscReal n; 944 PetscReal shift; 945 } MaxwellianCtx; 946 947 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 948 { 949 MaxwellianCtx *mctx = (MaxwellianCtx*)actx; 950 PetscInt i; 951 PetscReal v2 = 0, theta = 2*mctx->kT_m/(mctx->v_0*mctx->v_0); /* theta = 2kT/mc^2 */ 952 PetscFunctionBegin; 953 /* compute the exponents, v^2 */ 954 for (i = 0; i < dim; ++i) v2 += x[i]*x[i]; 955 /* evaluate the Maxwellian */ 956 u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta)); 957 if (mctx->shift!=0.) { 958 v2 = 0; 959 for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i]; 960 v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift); 961 /* evaluate the shifted Maxwellian */ 962 u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta)); 963 } 964 PetscFunctionReturn(0); 965 } 966 967 /*@ 968 DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state 969 970 Collective on X 971 972 Input Parameters: 973 . dm - The mesh (local) 974 + time - Current time 975 - temps - Temperatures of each species (global) 976 . ns - Number density of each species (global) 977 - grid - index into current grid - just used for offset into temp and ns 978 + actx - Landau context 979 980 Output Parameter: 981 . X - The state (local to this grid) 982 983 Level: beginner 984 985 .keywords: mesh 986 .seealso: `DMPlexLandauCreateVelocitySpace()` 987 @*/ 988 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, void *actx) 989 { 990 LandauCtx *ctx = (LandauCtx*)actx; 991 PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *); 992 PetscInt dim; 993 MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES]; 994 995 PetscFunctionBegin; 996 PetscCall(DMGetDimension(dm, &dim)); 997 if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx)); 998 for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) { 999 mctxs[i0] = &data[i0]; 1000 data[i0].v_0 = ctx->v_0; // v_0 same for all grids 1001 data[i0].kT_m = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */ 1002 data[i0].n = ns[ii] * (1+(double)b_id/100.0); // make solves a little different to mimic application, n[0] use for Conner-Hastie 1003 initu[i0] = maxwellian; 1004 data[i0].shift = 0; 1005 } 1006 data[0].shift = ctx->electronShift; 1007 /* need to make ADD_ALL_VALUES work - TODO */ 1008 PetscCall(DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X)); 1009 PetscFunctionReturn(0); 1010 } 1011 1012 /* 1013 LandauSetInitialCondition - Addes Maxwellians with context 1014 1015 Collective on X 1016 1017 Input Parameters: 1018 . dm - The mesh 1019 - grid - index into current grid - just used for offset into temp and ns 1020 + actx - Landau context with T and n 1021 1022 Output Parameter: 1023 . X - The state 1024 1025 Level: beginner 1026 1027 .keywords: mesh 1028 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()` 1029 */ 1030 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, void *actx) 1031 { 1032 LandauCtx *ctx = (LandauCtx*)actx; 1033 PetscFunctionBegin; 1034 if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx)); 1035 PetscCall(VecZeroEntries(X)); 1036 PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, ctx)); 1037 PetscFunctionReturn(0); 1038 } 1039 1040 // adapt a level once. Forest in/out 1041 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest) 1042 { 1043 DM forest, plex, adaptedDM = NULL; 1044 PetscDS prob; 1045 PetscBool isForest; 1046 PetscQuadrature quad; 1047 PetscInt Nq, *Nb, cStart, cEnd, c, dim, qj, k; 1048 DMLabel adaptLabel = NULL; 1049 1050 PetscFunctionBegin; 1051 forest = ctx->plex[grid]; 1052 PetscCall(DMCreateDS(forest)); 1053 PetscCall(DMGetDS(forest, &prob)); 1054 PetscCall(DMGetDimension(forest, &dim)); 1055 PetscCall(DMIsForest(forest, &isForest)); 1056 PetscCheck(isForest,ctx->comm,PETSC_ERR_ARG_WRONG,"! Forest"); 1057 PetscCall(DMConvert(forest, DMPLEX, &plex)); 1058 PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd)); 1059 PetscCall(DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel)); 1060 PetscCall(PetscFEGetQuadrature(fem, &quad)); 1061 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); 1062 PetscCheck(Nq <=LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)",Nq,LANDAU_MAX_NQ); 1063 PetscCall(PetscDSGetDimensions(prob, &Nb)); 1064 if (type==4) { 1065 for (c = cStart; c < cEnd; c++) { 1066 PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); 1067 } 1068 PetscCall(PetscInfo(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM")); 1069 } else if (type==2) { 1070 PetscInt rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2; 1071 PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY; 1072 for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; } 1073 for (c = cStart; c < cEnd; c++) { 1074 PetscReal tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ]; 1075 PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ)); 1076 for (qj = 0; qj < Nq; ++qj) { 1077 tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0)); 1078 r = PetscSqrtReal(tt); 1079 if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) { 1080 minRad = r; 1081 nr = 0; 1082 rCellIdx[nr++]= c; 1083 PetscCall(PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", (double)r, c, qj+1, Nq)); 1084 } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) { 1085 for (k=0;k<nr;k++) if (c == rCellIdx[k]) break; 1086 if (k==nr) { 1087 rCellIdx[nr++]= c; 1088 PetscCall(PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", (double)r, c, qj+1, Nq, (double)(r-minRad))); 1089 } 1090 } 1091 if (ctx->sphere) { 1092 if ((tt=r-ctx->e_radius) > 0) { 1093 PetscCall(PetscInfo(sol, "\t\t\t %" PetscInt_FMT " cell r=%g\n",c,(double)tt)); 1094 if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) { 1095 eMinRad = tt; 1096 eMaxIdx = 0; 1097 eCellIdx[eMaxIdx++] = c; 1098 } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) { 1099 eCellIdx[eMaxIdx++] = c; 1100 } 1101 } 1102 if ((tt=r-ctx->i_radius[grid]) > 0) { 1103 if (tt < iMinRad - 1.e-5) { 1104 iMinRad = tt; 1105 iMaxIdx = 0; 1106 iCellIdx[iMaxIdx++] = c; 1107 } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) { 1108 iCellIdx[iMaxIdx++] = c; 1109 } 1110 } 1111 } 1112 } 1113 } 1114 for (k=0;k<nr;k++) { 1115 PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE)); 1116 } 1117 if (ctx->sphere) { 1118 for (c = 0; c < eMaxIdx; c++) { 1119 PetscCall(DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE)); 1120 PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",eCellIdx[c],(double)eMinRad)); 1121 } 1122 for (c = 0; c < iMaxIdx; c++) { 1123 PetscCall(DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE)); 1124 PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",iCellIdx[c],(double)iMinRad)); 1125 } 1126 } 1127 PetscCall(PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],(double)minRad)); 1128 } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */ 1129 PetscScalar *coef = NULL; 1130 Vec coords; 1131 PetscInt csize,Nv,d,nz; 1132 DM cdm; 1133 PetscSection cs; 1134 PetscCall(DMGetCoordinatesLocal(forest, &coords)); 1135 PetscCall(DMGetCoordinateDM(forest, &cdm)); 1136 PetscCall(DMGetLocalSection(cdm, &cs)); 1137 for (c = cStart; c < cEnd; c++) { 1138 PetscInt doit = 0, outside = 0; 1139 PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef)); 1140 Nv = csize/dim; 1141 for (nz = d = 0; d < Nv; d++) { 1142 PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0); 1143 x = PetscSqrtReal(x); 1144 if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1; /* refine origin */ 1145 else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++; /* first pass don't refine bottom */ 1146 else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */ 1147 else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */ 1148 if (x < PETSC_MACHINE_EPSILON*10.) nz++; 1149 } 1150 PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef)); 1151 if (doit || (outside<Nv && nz)) { 1152 PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); 1153 } 1154 } 1155 PetscCall(PetscInfo(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM")); 1156 } 1157 PetscCall(DMDestroy(&plex)); 1158 PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM)); 1159 PetscCall(DMLabelDestroy(&adaptLabel)); 1160 *newForest = adaptedDM; 1161 if (adaptedDM) { 1162 if (isForest) { 1163 PetscCall(DMForestSetAdaptivityForest(adaptedDM,NULL)); // ???? 1164 } else exit(33); // ??????? 1165 PetscCall(DMConvert(adaptedDM, DMPLEX, &plex)); 1166 PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd)); 1167 PetscCall(PetscInfo(sol, "\tPhase: adaptToleranceFEM: %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart))); 1168 PetscCall(DMDestroy(&plex)); 1169 } else *newForest = NULL; 1170 PetscFunctionReturn(0); 1171 } 1172 1173 // forest goes in (ctx->plex[grid]), plex comes out 1174 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu) 1175 { 1176 PetscInt adaptIter; 1177 1178 PetscFunctionBegin; 1179 PetscInt type, limits[5] = {(grid==0) ? ctx->numRERefine : 0, (grid==0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid==0) ? ctx->nZRefine2 : 0,ctx->postAMRRefine[grid]}; 1180 for (type=0;type<5;type++) { 1181 for (adaptIter = 0; adaptIter<limits[type];adaptIter++) { 1182 DM newForest = NULL; 1183 PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest)); 1184 if (newForest) { 1185 PetscCall(DMDestroy(&ctx->plex[grid])); 1186 PetscCall(VecDestroy(uu)); 1187 PetscCall(DMCreateGlobalVector(newForest,uu)); 1188 PetscCall(PetscObjectSetName((PetscObject) *uu, "uAMR")); 1189 PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, ctx)); 1190 ctx->plex[grid] = newForest; 1191 } else { 1192 exit(4); // can happen with no AMR and post refinement 1193 } 1194 } 1195 } 1196 PetscFunctionReturn(0); 1197 } 1198 1199 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[]) 1200 { 1201 PetscBool flg, sph_flg; 1202 PetscInt ii,nt,nm,nc,num_species_grid[LANDAU_MAX_GRIDS]; 1203 PetscReal v0_grid[LANDAU_MAX_GRIDS]; 1204 DM dummy; 1205 1206 PetscFunctionBegin; 1207 PetscCall(DMCreate(ctx->comm,&dummy)); 1208 /* get options - initialize context */ 1209 ctx->verbose = 1; // should be 0 for silent compliance 1210 #if defined(PETSC_HAVE_THREADSAFETY) 1211 ctx->batch_sz = PetscNumOMPThreads; 1212 #else 1213 ctx->batch_sz = 1; 1214 #endif 1215 ctx->batch_view_idx = 0; 1216 ctx->interpolate = PETSC_TRUE; 1217 ctx->gpu_assembly = PETSC_TRUE; 1218 ctx->norm_state = 0; 1219 ctx->electronShift = 0; 1220 ctx->M = NULL; 1221 ctx->J = NULL; 1222 /* geometry and grids */ 1223 ctx->sphere = PETSC_FALSE; 1224 ctx->inflate = PETSC_FALSE; 1225 ctx->use_p4est = PETSC_FALSE; 1226 ctx->num_sections = 3; /* 2, 3 or 4 */ 1227 for (PetscInt grid=0;grid<LANDAU_MAX_GRIDS;grid++) { 1228 ctx->radius[grid] = 5.; /* thermal radius (velocity) */ 1229 ctx->numAMRRefine[grid] = 5; 1230 ctx->postAMRRefine[grid] = 0; 1231 ctx->species_offset[grid+1] = 1; // one species default 1232 num_species_grid[grid] = 0; 1233 ctx->plex[grid] = NULL; /* cache as expensive to Convert */ 1234 } 1235 ctx->species_offset[0] = 0; 1236 ctx->re_radius = 0.; 1237 ctx->vperp0_radius1 = 0; 1238 ctx->vperp0_radius2 = 0; 1239 ctx->nZRefine1 = 0; 1240 ctx->nZRefine2 = 0; 1241 ctx->numRERefine = 0; 1242 num_species_grid[0] = 1; // one species default 1243 /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */ 1244 ctx->charges[0] = -1; /* electron charge (MKS) */ 1245 ctx->masses[0] = 1/1835.469965278441013; /* temporary value in proton mass */ 1246 ctx->n[0] = 1; 1247 ctx->v_0 = 1; /* thermal velocity, we could start with a scale != 1 */ 1248 ctx->thermal_temps[0] = 1; 1249 /* constants, etc. */ 1250 ctx->epsilon0 = 8.8542e-12; /* permittivity of free space (MKS) F/m */ 1251 ctx->k = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */ 1252 ctx->lnLam = 10; /* cross section ratio large - small angle collisions */ 1253 ctx->n_0 = 1.e20; /* typical plasma n, but could set it to 1 */ 1254 ctx->Ez = 0; 1255 for (PetscInt grid=0;grid<LANDAU_NUM_TIMERS;grid++) ctx->times[grid] = 0; 1256 ctx->use_matrix_mass = PETSC_FALSE; 1257 ctx->use_relativistic_corrections = PETSC_FALSE; 1258 ctx->use_energy_tensor_trick = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */ 1259 ctx->SData_d.w = NULL; 1260 ctx->SData_d.x = NULL; 1261 ctx->SData_d.y = NULL; 1262 ctx->SData_d.z = NULL; 1263 ctx->SData_d.invJ = NULL; 1264 ctx->jacobian_field_major_order = PETSC_FALSE; 1265 ctx->SData_d.coo_elem_offsets = NULL; 1266 ctx->SData_d.coo_elem_point_offsets = NULL; 1267 ctx->coo_assembly = PETSC_FALSE; 1268 ctx->SData_d.coo_elem_fullNb = NULL; 1269 ctx->SData_d.coo_size = 0; 1270 PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none"); 1271 { 1272 char opstring[256]; 1273 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 1274 ctx->deviceType = LANDAU_KOKKOS; 1275 PetscCall(PetscStrcpy(opstring,"kokkos")); 1276 #elif defined(PETSC_HAVE_CUDA) 1277 ctx->deviceType = LANDAU_CUDA; 1278 PetscCall(PetscStrcpy(opstring,"cuda")); 1279 #else 1280 ctx->deviceType = LANDAU_CPU; 1281 PetscCall(PetscStrcpy(opstring,"cpu")); 1282 #endif 1283 PetscCall(PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,sizeof(opstring),NULL)); 1284 PetscCall(PetscStrcmp("cpu",opstring,&flg)); 1285 if (flg) { 1286 ctx->deviceType = LANDAU_CPU; 1287 } else { 1288 PetscCall(PetscStrcmp("cuda",opstring,&flg)); 1289 if (flg) { 1290 ctx->deviceType = LANDAU_CUDA; 1291 } else { 1292 PetscCall(PetscStrcmp("kokkos",opstring,&flg)); 1293 if (flg) ctx->deviceType = LANDAU_KOKKOS; 1294 else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring); 1295 } 1296 } 1297 } 1298 PetscCall(PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL)); 1299 PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL)); 1300 PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL)); 1301 PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"LANDAU_MAX_BATCH_SZ %" PetscInt_FMT " < ctx->batch_sz %" PetscInt_FMT,(PetscInt)LANDAU_MAX_BATCH_SZ,ctx->batch_sz); 1302 PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL)); 1303 PetscCheck(ctx->batch_view_idx < ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT,ctx->batch_view_idx,ctx->batch_sz); 1304 PetscCall(PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie critical field","plexland.c",ctx->Ez,&ctx->Ez, NULL)); 1305 PetscCall(PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL)); 1306 PetscCall(PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL)); 1307 PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL)); 1308 PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL)); 1309 PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, &ctx->use_energy_tensor_trick, NULL)); 1310 1311 /* get num species with temperature, set defaults */ 1312 for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) { 1313 ctx->thermal_temps[ii] = 1; 1314 ctx->charges[ii] = 1; 1315 ctx->masses[ii] = 1; 1316 ctx->n[ii] = 1; 1317 } 1318 nt = LANDAU_MAX_SPECIES; 1319 PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg)); 1320 if (flg) { 1321 PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n",nt)); 1322 ctx->num_species = nt; 1323 } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species"); 1324 for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */ 1325 nm = LANDAU_MAX_SPECIES-1; 1326 PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg)); 1327 if (flg && nm != ctx->num_species-1) { 1328 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT,nm,ctx->num_species-1); 1329 } 1330 nm = LANDAU_MAX_SPECIES; 1331 PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg)); 1332 PetscCheck(!flg || nm == ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT,nm,ctx->num_species); 1333 for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */ 1334 ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */ 1335 ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */ 1336 nc = LANDAU_MAX_SPECIES-1; 1337 PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg)); 1338 if (flg) PetscCheck(nc == ctx->num_species-1,ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %" PetscInt_FMT " != num species %" PetscInt_FMT,nc,ctx->num_species-1); 1339 for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */ 1340 /* geometry and grids */ 1341 nt = LANDAU_MAX_GRIDS; 1342 PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid","Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid","plexland.c", num_species_grid, &nt, &flg)); 1343 if (flg) { 1344 ctx->num_grids = nt; 1345 for (ii=nt=0;ii<ctx->num_grids;ii++) nt += num_species_grid[ii]; 1346 PetscCheck(ctx->num_species == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %d)",nt,ctx->num_species,ctx->num_grids,LANDAU_MAX_GRIDS); 1347 } else { 1348 ctx->num_grids = 1; // go back to a single grid run 1349 num_species_grid[0] = ctx->num_species; 1350 } 1351 for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids ; ii++) ctx->species_offset[ii+1] = ctx->species_offset[ii] + num_species_grid[ii]; 1352 PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????",ctx->species_offset[ctx->num_grids],ctx->num_species); 1353 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 1354 int iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid 1355 v0_grid[grid] = PetscSqrtReal(ctx->k*ctx->thermal_temps[iii]/ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */ 1356 } 1357 ii = 0; 1358 PetscCall(PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL)); 1359 ctx->v_0 = v0_grid[ii]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */ 1360 ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */ 1361 /* domain */ 1362 nt = LANDAU_MAX_GRIDS; 1363 PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius","Phase space size in units of thermal velocity of grid","plexland.c",ctx->radius,&nt, &flg)); 1364 if (flg) PetscCheck(nt >= ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT,nt,ctx->num_grids); 1365 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 1366 if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */ 1367 if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75; 1368 else ctx->radius[grid] = -ctx->radius[grid]; 1369 ctx->radius[grid] = ctx->radius[grid]*SPEED_OF_LIGHT/ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid) 1370 PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n",(double)ctx->radius[grid],grid)); 1371 } 1372 ctx->radius[grid] *= v0_grid[grid]/ctx->v_0; // scale domain by thermal radius relative to v_0 1373 } 1374 /* amr parametres */ 1375 nt = LANDAU_MAX_GRIDS; 1376 PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg)); 1377 PetscCheck(!flg || nt >= ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT,nt,ctx->num_grids); 1378 nt = LANDAU_MAX_GRIDS; 1379 PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg)); 1380 for (ii=1;ii<ctx->num_grids;ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now 1381 PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg)); 1382 PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine1", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg)); 1383 PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine2", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg)); 1384 PetscCall(PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg)); 1385 PetscCall(PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg)); 1386 PetscCall(PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg)); 1387 /* spherical domain (not used) */ 1388 PetscCall(PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL)); 1389 PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg)); 1390 PetscCall(PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg)); 1391 PetscCall(PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius, &ctx->e_radius, &flg)); 1392 if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */ 1393 if (!flg) { 1394 ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0; 1395 } 1396 nt = LANDAU_MAX_GRIDS; 1397 PetscCall(PetscOptionsRealArray("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius, &nt, &flg)); 1398 if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; 1399 if (!flg) { 1400 ctx->i_radius[0] = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; // need to correct for ion grid domain 1401 } 1402 if (flg) PetscCheck(ctx->num_grids == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_i_radius: %" PetscInt_FMT " != num_species = %" PetscInt_FMT,nt,ctx->num_grids); 1403 if (ctx->sphere) PetscCheck(ctx->e_radius > ctx->i_radius[0],ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",(double)ctx->i_radius[0],(double)ctx->e_radius,(double)ctx->radius[0]); 1404 /* processing options */ 1405 PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL)); 1406 if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos 1407 PetscCall(PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL)); 1408 if (ctx->coo_assembly) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"COO assembly requires 'gpu assembly' even if Kokkos 'CPU' back-end %d",ctx->coo_assembly); 1409 } 1410 PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering (DEPRECATED)", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL)); 1411 if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly"); 1412 PetscCheck(!ctx->jacobian_field_major_order,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_jacobian_field_major_order DEPRECATED"); 1413 PetscOptionsEnd(); 1414 1415 for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0; 1416 if (ctx->verbose > 0) { 1417 PetscCall(PetscPrintf(ctx->comm, "masses: e=%10.3e; ions in proton mass units: %10.3e %10.3e ...\n",(double)ctx->masses[0],(double)(ctx->masses[1]/1.6720e-27),(double)(ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0))); 1418 PetscCall(PetscPrintf(ctx->comm, "charges: e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0],(double)(-ctx->charges[1]/ctx->charges[0]),(double)(ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0))); 1419 PetscCall(PetscPrintf(ctx->comm, "n: e: %10.3e i: %10.3e %10.3e\n", (double)ctx->n[0],(double)ctx->n[1],(double)(ctx->num_species>2 ? ctx->n[2] : 0))); 1420 PetscCall(PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %" PetscInt_FMT " batched\n", (double)ctx->thermal_temps[0], (double)ctx->thermal_temps[1], (double)((ctx->num_species>2) ? ctx->thermal_temps[2] : 0), (double)ctx->v_0, (double)(ctx->v_0/SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",ctx->batch_sz)); 1421 PetscCall(PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %d: %10.3e (%" PetscInt_FMT ") ",0,(double)ctx->radius[0],ctx->numAMRRefine[0])); 1422 for (ii=1;ii<ctx->num_grids;ii++) PetscCall(PetscPrintf(ctx->comm, ", %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",ii,(double)ctx->radius[ii],ctx->numAMRRefine[ii])); 1423 PetscCall(PetscPrintf(ctx->comm,"\n")); 1424 if (ctx->jacobian_field_major_order) { 1425 PetscCall(PetscPrintf(ctx->comm,"Using field major order for GPU Jacobian\n")); 1426 } else { 1427 PetscCall(PetscPrintf(ctx->comm,"Using default Plex order for all matrices\n")); 1428 } 1429 } 1430 PetscCall(DMDestroy(&dummy)); 1431 { 1432 PetscMPIInt rank; 1433 PetscCallMPI(MPI_Comm_rank(ctx->comm, &rank)); 1434 ctx->stage = 0; 1435 PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13])); /* 13 */ 1436 PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2])); /* 2 */ 1437 PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12])); /* 12 */ 1438 PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15])); /* 15 */ 1439 PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */ 1440 PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */ 1441 PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0])); /* 0 */ 1442 PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9])); /* 9 */ 1443 PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10])); /* 10 */ 1444 PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7])); /* 7 */ 1445 PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1])); /* 1 */ 1446 PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3])); /* 3 */ 1447 PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8])); /* 8 */ 1448 PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4])); /* 4 */ 1449 PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */ 1450 PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5])); /* 5 */ 1451 PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6])); /* 6 */ 1452 1453 if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */ 1454 PetscCall(PetscOptionsClearValue(NULL,"-snes_converged_reason")); 1455 PetscCall(PetscOptionsClearValue(NULL,"-ksp_converged_reason")); 1456 PetscCall(PetscOptionsClearValue(NULL,"-snes_monitor")); 1457 PetscCall(PetscOptionsClearValue(NULL,"-ksp_monitor")); 1458 PetscCall(PetscOptionsClearValue(NULL,"-ts_monitor")); 1459 PetscCall(PetscOptionsClearValue(NULL,"-ts_view")); 1460 PetscCall(PetscOptionsClearValue(NULL,"-ts_adapt_monitor")); 1461 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view")); 1462 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view")); 1463 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_dm_view")); 1464 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_view")); 1465 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_jacobian_view")); 1466 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mat_view")); 1467 PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_converged_reason")); 1468 PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_monitor")); 1469 PetscCall(PetscOptionsClearValue(NULL,"-")); 1470 PetscCall(PetscOptionsClearValue(NULL,"-info")); 1471 } 1472 } 1473 PetscFunctionReturn(0); 1474 } 1475 1476 static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx) 1477 { 1478 PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS]; 1479 PetscQuadrature quad; 1480 const PetscReal *quadWeights; 1481 PetscInt numCells[LANDAU_MAX_GRIDS],Nq,Nf[LANDAU_MAX_GRIDS], ncellsTot=0, MAP_BF_SIZE = 64*LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES; 1482 PetscTabulation *Tf; 1483 PetscDS prob; 1484 1485 PetscFunctionBegin; 1486 PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids 1487 PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids 1488 /* DS, Tab and quad is same on all grids */ 1489 PetscCheck(ctx->plex[0],ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 1490 PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad)); 1491 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights)); 1492 PetscCheck(Nq <= LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%d)",Nq,LANDAU_MAX_NQ); 1493 /* setup each grid */ 1494 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 1495 PetscInt cStart, cEnd; 1496 PetscCheck(ctx->plex[grid] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 1497 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1498 numCells[grid] = cEnd - cStart; // grids can have different topology 1499 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 1500 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 1501 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 1502 ncellsTot += numCells[grid]; 1503 } 1504 /* create GPU assembly data */ 1505 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 1506 PetscContainer container; 1507 PetscScalar elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat; 1508 pointInterpolationP4est (*pointMaps)[LANDAU_MAX_Q_FACE]; 1509 P4estVertexMaps *maps; 1510 const PetscInt *plex_batch=NULL,Nb=Nq; // tensor elements; 1511 LandauIdx *coo_elem_offsets=NULL, *coo_elem_fullNb=NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = NULL; 1512 /* create GPU asssembly data */ 1513 PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n",1)); 1514 PetscCall(PetscLogEventBegin(ctx->events[2],0,0,0,0)); 1515 PetscCall(PetscMalloc(sizeof(*maps)*ctx->num_grids, &maps)); 1516 PetscCall(PetscMalloc(sizeof(*pointMaps)*MAP_BF_SIZE, &pointMaps)); 1517 1518 if (ctx->coo_assembly) { // setup COO assembly -- put COO metadata directly in ctx->SData_d 1519 PetscCall(PetscMalloc3(ncellsTot+1,&coo_elem_offsets,ncellsTot,&coo_elem_fullNb,ncellsTot, &coo_elem_point_offsets)); // array of integer pointers 1520 coo_elem_offsets[0] = 0; // finish later 1521 PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n",ncellsTot)); 1522 ctx->SData_d.coo_n_cellsTot = ncellsTot; 1523 ctx->SData_d.coo_elem_offsets = (void*)coo_elem_offsets; 1524 ctx->SData_d.coo_elem_fullNb = (void*)coo_elem_fullNb; 1525 ctx->SData_d.coo_elem_point_offsets = (void*)coo_elem_point_offsets; 1526 } else { 1527 ctx->SData_d.coo_elem_offsets = ctx->SData_d.coo_elem_fullNb = NULL; 1528 ctx->SData_d.coo_elem_point_offsets = NULL; 1529 ctx->SData_d.coo_n_cellsTot = 0; 1530 } 1531 1532 ctx->SData_d.coo_max_fullnb = 0; 1533 for (PetscInt grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) { 1534 PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc*Nq; 1535 if (grid_batch_is_inv[grid]) { 1536 PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch)); 1537 } 1538 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1539 // make maps 1540 maps[grid].d_self = NULL; 1541 maps[grid].num_elements = numCells[grid]; 1542 maps[grid].num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q 1543 maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face,(double)(dim-1))+.001); // Q^2 1544 maps[grid].num_reduced = 0; 1545 maps[grid].deviceType = ctx->deviceType; 1546 maps[grid].numgrids = ctx->num_grids; 1547 // count reduced and get 1548 PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx)); 1549 for (int ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) { 1550 if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx+1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add 1551 for (int fieldA=0;fieldA<Nf[grid];fieldA++) { 1552 int fullNb = 0; 1553 for (int q = 0; q < Nb; ++q) { 1554 PetscInt numindices,*indices; 1555 PetscScalar *valuesOrig = elMat = elemMatrix; 1556 PetscCall(PetscArrayzero(elMat, totDim*totDim)); 1557 elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1; 1558 PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat)); 1559 for (PetscInt f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal 1560 if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) { 1561 // found it 1562 if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0 1563 if (plex_batch) { 1564 maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx) plex_batch[indices[f]]; 1565 } else { 1566 maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f]; 1567 } 1568 fullNb++; 1569 } else { //found a constraint 1570 int jj = 0; 1571 PetscReal sum = 0; 1572 const PetscInt ff = f; 1573 maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1 1574 1575 do { // constraints are continuous in Plex - exploit that here 1576 int ii; // get 'scale' 1577 for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value 1578 if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not 1579 pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]); 1580 } 1581 } 1582 sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic 1583 // get 'gid' 1584 if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps 1585 else { 1586 if (plex_batch) { 1587 pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]]; 1588 } else { 1589 pointMaps[maps[grid].num_reduced][jj].gid = indices[f]; 1590 } 1591 fullNb++; 1592 } 1593 } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end 1594 while (jj < maps[grid].num_face) { 1595 pointMaps[maps[grid].num_reduced][jj].scale = 0; 1596 pointMaps[maps[grid].num_reduced][jj].gid = -1; 1597 jj++; 1598 } 1599 if (PetscAbs(sum-1.0) > 10*PETSC_MACHINE_EPSILON) { // debug 1600 int d,f; 1601 PetscReal tmp = 0; 1602 PetscCall(PetscPrintf(PETSC_COMM_SELF,"\t\t%d.%d.%d) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%d)\n",eidx,q,fieldA,(double)sum,LANDAU_MAX_Q_FACE,maps[grid].num_face)); 1603 for (d = 0, tmp = 0; d < numindices; ++d) { 1604 if (tmp!=0 && PetscAbs(tmp-1.0) > 10*PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD,"%3d) %3" PetscInt_FMT ": ",d,indices[d])); 1605 for (f = 0; f < numindices; ++f) { 1606 tmp += PetscRealPart(elMat[d*numindices + f]); 1607 } 1608 if (tmp!=0) PetscCall(PetscPrintf(ctx->comm," | %22.16e\n",(double)tmp)); 1609 } 1610 } 1611 maps[grid].num_reduced++; 1612 PetscCheck(maps[grid].num_reduced<MAP_BF_SIZE,PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %" PetscInt_FMT,maps[grid].num_reduced,MAP_BF_SIZE); 1613 } 1614 break; 1615 } 1616 } 1617 // cleanup 1618 PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat)); 1619 if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices*numindices, MPIU_SCALAR, &elMat)); 1620 } 1621 if (ctx->coo_assembly) { // setup COO assembly 1622 coo_elem_offsets[glb_elem_idx+1] += fullNb*fullNb; // one species block, adds a block for each species, on this element in this grid 1623 if (fieldA==0) { // cache full Nb for this element, on this grid per species 1624 coo_elem_fullNb[glb_elem_idx] = fullNb; 1625 if (fullNb>ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb; 1626 } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %d %d",coo_elem_fullNb[glb_elem_idx],fullNb); 1627 } 1628 } // field 1629 } // cell 1630 // allocate and copy point data maps[grid].gIdx[eidx][field][q] 1631 PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps)); 1632 for (int ej = 0; ej < maps[grid].num_reduced; ++ej) { 1633 for (int q = 0; q < maps[grid].num_face; ++q) { 1634 maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale; 1635 maps[grid].c_maps[ej][q].gid = pointMaps[ej][q].gid; 1636 } 1637 } 1638 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 1639 if (ctx->deviceType == LANDAU_KOKKOS) { 1640 PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid)); // imples Kokkos does 1641 } // else could be CUDA 1642 #endif 1643 #if defined(PETSC_HAVE_CUDA) 1644 if (ctx->deviceType == LANDAU_CUDA) { 1645 PetscCall(LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid)); 1646 } 1647 #endif 1648 if (plex_batch) { 1649 PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch)); 1650 PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this 1651 } 1652 } /* grids */ 1653 // finish COO 1654 if (ctx->coo_assembly) { // setup COO assembly 1655 PetscInt *oor, *ooc; 1656 ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot]*ctx->batch_sz; 1657 PetscCall(PetscMalloc2(ctx->SData_d.coo_size,&oor,ctx->SData_d.coo_size,&ooc)); 1658 for (int i=0;i<ctx->SData_d.coo_size;i++) oor[i] = ooc[i] = -1; 1659 // get 1660 for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) { 1661 for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) { 1662 const int fullNb = coo_elem_fullNb[glb_elem_idx]; 1663 const LandauIdx *const Idxs = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage 1664 coo_elem_point_offsets[glb_elem_idx][0] = 0; 1665 for (int f=0, cnt2=0;f<Nb;f++) { 1666 int idx = Idxs[f]; 1667 coo_elem_point_offsets[glb_elem_idx][f+1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last 1668 if (idx >= 0) { 1669 cnt2++; 1670 coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc 1671 } else { 1672 idx = -idx - 1; 1673 for (int q = 0 ; q < maps[grid].num_face; q++) { 1674 if (maps[grid].c_maps[idx][q].gid < 0) break; 1675 cnt2++; 1676 coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc 1677 } 1678 } 1679 PetscCheck(cnt2 <= fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %d < %d",fullNb,cnt2); 1680 } 1681 PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb]==fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %d != fullNb=%d",coo_elem_point_offsets[glb_elem_idx][Nb],fullNb); 1682 } 1683 } 1684 // set 1685 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 1686 for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) { 1687 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 1688 for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) { 1689 const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb; 1690 // set (i,j) 1691 for (int fieldA=0;fieldA<Nf[grid];fieldA++) { 1692 const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0]; 1693 int rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE]; 1694 for (int f = 0; f < Nb; ++f) { 1695 const int nr = coo_elem_point_offsets[glb_elem_idx][f+1] - coo_elem_point_offsets[glb_elem_idx][f]; 1696 if (nr==1) rows[0] = Idxs[f]; 1697 else { 1698 const int idx = -Idxs[f] - 1; 1699 for (int q = 0; q < nr; q++) { 1700 rows[q] = maps[grid].c_maps[idx][q].gid; 1701 } 1702 } 1703 for (int g = 0; g < Nb; ++g) { 1704 const int nc = coo_elem_point_offsets[glb_elem_idx][g+1] - coo_elem_point_offsets[glb_elem_idx][g]; 1705 if (nc==1) cols[0] = Idxs[g]; 1706 else { 1707 const int idx = -Idxs[g] - 1; 1708 for (int q = 0; q < nc; q++) { 1709 cols[q] = maps[grid].c_maps[idx][q].gid; 1710 } 1711 } 1712 const int idx0 = b_id*coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g]; 1713 for (int q = 0, idx = idx0; q < nr; q++) { 1714 for (int d = 0; d < nc; d++, idx++) { 1715 oor[idx] = rows[q] + moffset; 1716 ooc[idx] = cols[d] + moffset; 1717 } 1718 } 1719 } 1720 } 1721 } 1722 } // cell 1723 } // grid 1724 } // batch 1725 PetscCall(MatSetPreallocationCOO(ctx->J,ctx->SData_d.coo_size,oor,ooc)); 1726 PetscCall(PetscFree2(oor,ooc)); 1727 } 1728 PetscCall(PetscFree(pointMaps)); 1729 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 1730 PetscCall(PetscContainerSetPointer(container, (void *)maps)); 1731 PetscCall(PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy)); 1732 PetscCall(PetscObjectCompose((PetscObject) ctx->J, "assembly_maps", (PetscObject) container)); 1733 PetscCall(PetscContainerDestroy(&container)); 1734 PetscCall(PetscLogEventEnd(ctx->events[2],0,0,0,0)); 1735 } // end GPU assembly 1736 { /* create static point data, Jacobian called first, only one vertex copy */ 1737 PetscReal *invJe,*ww,*xx,*yy,*zz=NULL,*invJ_a; 1738 PetscInt outer_ipidx, outer_ej,grid, nip_glb = 0; 1739 PetscFE fe; 1740 const PetscInt Nb = Nq; 1741 PetscCall(PetscLogEventBegin(ctx->events[7],0,0,0,0)); 1742 PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n")); 1743 for (PetscInt grid=0;grid<ctx->num_grids;grid++) nip_glb += Nq*numCells[grid]; 1744 /* collect f data, first time is for Jacobian, but make mass now */ 1745 if (ctx->verbose > 0) { 1746 PetscInt ncells = 0, N; 1747 PetscCall(MatGetSize(ctx->J,&N,NULL)); 1748 for (PetscInt grid=0;grid<ctx->num_grids;grid++) ncells += numCells[grid]; 1749 PetscCall(PetscPrintf(ctx->comm,"%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n",0,"FormLandau",nip_glb,ncells, Nb, Nq, dim, Nb, ctx->num_species, Nb, dim, N)); 1750 } 1751 PetscCall(PetscMalloc4(nip_glb,&ww,nip_glb,&xx,nip_glb,&yy,nip_glb*dim*dim,&invJ_a)); 1752 if (dim==3) { 1753 PetscCall(PetscMalloc1(nip_glb,&zz)); 1754 } 1755 if (ctx->use_energy_tensor_trick) { 1756 PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe)); 1757 PetscCall(PetscObjectSetName((PetscObject) fe, "energy")); 1758 } 1759 /* init each grids static data - no batch */ 1760 for (grid=0, outer_ipidx=0, outer_ej=0 ; grid < ctx->num_grids ; grid++) { // OpenMP (once) 1761 Vec v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic 1762 PetscSection e_section; 1763 DM dmEnergy; 1764 PetscInt cStart, cEnd, ej; 1765 1766 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1767 // prep energy trick, get v^2 / 2 vector 1768 if (ctx->use_energy_tensor_trick) { 1769 PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f}; 1770 Vec glob_v2; 1771 PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))}; 1772 1773 PetscCall(DMClone(ctx->plex[grid], &dmEnergy)); 1774 PetscCall(PetscObjectSetName((PetscObject) dmEnergy, "energy")); 1775 PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe)); 1776 PetscCall(DMCreateDS(dmEnergy)); 1777 PetscCall(DMGetSection(dmEnergy, &e_section)); 1778 PetscCall(DMGetGlobalVector(dmEnergy,&glob_v2)); 1779 PetscCall(PetscObjectSetName((PetscObject) glob_v2, "trick")); 1780 c2_0[0] = &data[0]; 1781 PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void**)c2_0, INSERT_ALL_VALUES, glob_v2)); 1782 PetscCall(DMGetLocalVector(dmEnergy, &v2_2)); 1783 PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */ 1784 PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2)); 1785 PetscCall(DMGlobalToLocalEnd (dmEnergy, glob_v2, INSERT_VALUES, v2_2)); 1786 PetscCall(DMViewFromOptions(dmEnergy,NULL, "-energy_dm_view")); 1787 PetscCall(VecViewFromOptions(glob_v2,NULL, "-energy_vec_view")); 1788 PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2)); 1789 } 1790 /* append part of the IP data for each grid */ 1791 for (ej = 0 ; ej < numCells[grid]; ++ej, ++outer_ej) { 1792 PetscScalar *coefs = NULL; 1793 PetscReal vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0); 1794 invJe = invJ_a + outer_ej*Nq*dim*dim; 1795 PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej+cStart, quad, vj, Jdummy, invJe, detJj)); 1796 if (ctx->use_energy_tensor_trick) { 1797 PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs)); 1798 } 1799 /* create static point data */ 1800 for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) { 1801 const PetscInt gidx = outer_ipidx; 1802 const PetscReal *invJ = &invJe[qj*dim*dim]; 1803 ww [gidx] = detJj[qj] * quadWeights[qj]; 1804 if (dim==2) ww [gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */ 1805 // get xx, yy, zz 1806 if (ctx->use_energy_tensor_trick) { 1807 double refSpaceDer[3],eGradPhi[3]; 1808 const PetscReal * const DD = Tf[0]->T[1]; 1809 const PetscReal *Dq = &DD[qj*Nb*dim]; 1810 for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0; 1811 for (int b = 0; b < Nb; ++b) { 1812 for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b*dim+d]*PetscRealPart(coefs[b]); 1813 } 1814 xx[gidx] = 1e10; 1815 if (ctx->use_relativistic_corrections) { 1816 double dg2_c2 = 0; 1817 //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02; 1818 for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]); 1819 dg2_c2 *= (double)c02; 1820 if (dg2_c2 >= .999) { 1821 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1822 yy[gidx] = vj[qj * dim + 1]; 1823 if (dim==3) zz[gidx] = vj[qj * dim + 2]; 1824 PetscCall(PetscPrintf(ctx->comm,"Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n",(double)PetscSqrtReal(xx[gidx]*xx[gidx] + yy[gidx]*yy[gidx] + zz[gidx]*zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx])); 1825 } else { 1826 PetscReal fact = c02/PetscSqrtReal(1. - dg2_c2); 1827 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact; 1828 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0 1829 } 1830 } 1831 if (xx[gidx] == 1e10) { 1832 for (int d = 0; d < dim; ++d) { 1833 for (int e = 0 ; e < dim; ++e) { 1834 eGradPhi[d] += invJ[e*dim+d]*refSpaceDer[e]; 1835 } 1836 } 1837 xx[gidx] = eGradPhi[0]; 1838 yy[gidx] = eGradPhi[1]; 1839 if (dim==3) zz[gidx] = eGradPhi[2]; 1840 } 1841 } else { 1842 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1843 yy[gidx] = vj[qj * dim + 1]; 1844 if (dim==3) zz[gidx] = vj[qj * dim + 2]; 1845 } 1846 } /* q */ 1847 if (ctx->use_energy_tensor_trick) { 1848 PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs)); 1849 } 1850 } /* ej */ 1851 if (ctx->use_energy_tensor_trick) { 1852 PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2)); 1853 PetscCall(DMDestroy(&dmEnergy)); 1854 } 1855 } /* grid */ 1856 if (ctx->use_energy_tensor_trick) { 1857 PetscCall(PetscFEDestroy(&fe)); 1858 } 1859 /* cache static data */ 1860 if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) { 1861 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS) 1862 PetscReal invMass[LANDAU_MAX_SPECIES],nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES]; 1863 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 1864 for (PetscInt ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++) { 1865 invMass[ii] = ctx->m_0/ctx->masses[ii]; 1866 nu_alpha[ii] = PetscSqr(ctx->charges[ii]/ctx->m_0)*ctx->m_0/ctx->masses[ii]; 1867 nu_beta[ii] = PetscSqr(ctx->charges[ii]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3); 1868 } 1869 } 1870 if (ctx->deviceType == LANDAU_CUDA) { 1871 #if defined(PETSC_HAVE_CUDA) 1872 PetscCall(LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, 1873 nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d)); 1874 #else 1875 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type cuda not built"); 1876 #endif 1877 } else if (ctx->deviceType == LANDAU_KOKKOS) { 1878 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 1879 PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, 1880 nu_alpha, nu_beta, invMass,invJ_a,xx,yy,zz,ww,&ctx->SData_d)); 1881 #else 1882 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type kokkos not built"); 1883 #endif 1884 } 1885 #endif 1886 /* free */ 1887 PetscCall(PetscFree4(ww,xx,yy,invJ_a)); 1888 if (dim==3) PetscCall(PetscFree(zz)); 1889 } else { /* CPU version, just copy in, only use part */ 1890 ctx->SData_d.w = (void*)ww; 1891 ctx->SData_d.x = (void*)xx; 1892 ctx->SData_d.y = (void*)yy; 1893 ctx->SData_d.z = (void*)zz; 1894 ctx->SData_d.invJ = (void*)invJ_a; 1895 } 1896 PetscCall(PetscLogEventEnd(ctx->events[7],0,0,0,0)); 1897 } // initialize 1898 PetscFunctionReturn(0); 1899 } 1900 1901 /* < v, u > */ 1902 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, 1903 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 1904 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 1905 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1906 { 1907 g0[0] = 1.; 1908 } 1909 1910 /* < v, u > */ 1911 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, 1912 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 1913 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 1914 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1915 { 1916 static double ttt = 1; 1917 g0[0] = ttt++; 1918 } 1919 1920 /* < v, u > */ 1921 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, 1922 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 1923 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 1924 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1925 { 1926 g0[0] = 2.*PETSC_PI*x[0]; 1927 } 1928 1929 static PetscErrorCode MatrixNfDestroy(void *ptr) 1930 { 1931 PetscInt *nf = (PetscInt *)ptr; 1932 PetscFunctionBegin; 1933 PetscCall(PetscFree(nf)); 1934 PetscFunctionReturn(0); 1935 } 1936 1937 static PetscErrorCode LandauCreateMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx) 1938 { 1939 PetscInt *idxs=NULL; 1940 Mat subM[LANDAU_MAX_GRIDS]; 1941 1942 PetscFunctionBegin; 1943 if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 1944 PetscFunctionReturn(0); 1945 } 1946 // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is' 1947 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1948 PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids]*ctx->batch_sz, &idxs)); 1949 } 1950 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 1951 const PetscInt *values, n = ctx->mat_offset[grid+1] - ctx->mat_offset[grid]; 1952 Mat gMat; 1953 DM massDM; 1954 PetscDS prob; 1955 Vec tvec; 1956 // get "mass" matrix for reordering 1957 PetscCall(DMClone(ctx->plex[grid], &massDM)); 1958 PetscCall(DMCopyFields(ctx->plex[grid], massDM)); 1959 PetscCall(DMCreateDS(massDM)); 1960 PetscCall(DMGetDS(massDM, &prob)); 1961 for (int ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) { 1962 PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL)); 1963 } 1964 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only")); 1965 PetscCall(DMSetFromOptions(massDM)); 1966 PetscCall(DMCreateMatrix(massDM, &gMat)); 1967 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false")); 1968 PetscCall(MatSetOption(gMat,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 1969 PetscCall(MatSetOption(gMat,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE)); 1970 PetscCall(DMCreateLocalVector(ctx->plex[grid],&tvec)); 1971 PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx)); 1972 PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view")); 1973 PetscCall(DMDestroy(&massDM)); 1974 PetscCall(VecDestroy(&tvec)); 1975 subM[grid] = gMat; 1976 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1977 MatOrderingType rtype = MATORDERINGRCM; 1978 IS isrow,isicol; 1979 PetscCall(MatGetOrdering(gMat,rtype,&isrow,&isicol)); 1980 PetscCall(ISInvertPermutation(isrow,PETSC_DECIDE,&grid_batch_is_inv[grid])); 1981 PetscCall(ISGetIndices(isrow, &values)); 1982 for (PetscInt b_id=0 ; b_id < ctx->batch_sz ; b_id++) { // add batch size DMs for this species grid 1983 #if !defined(LANDAU_SPECIES_MAJOR) 1984 PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id*N; 1985 for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0; 1986 #else 1987 PetscInt n0 = ctx->mat_offset[grid]*ctx->batch_sz + b_id*n; 1988 for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0; 1989 #endif 1990 } 1991 PetscCall(ISRestoreIndices(isrow, &values)); 1992 PetscCall(ISDestroy(&isrow)); 1993 PetscCall(ISDestroy(&isicol)); 1994 } 1995 } 1996 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1997 PetscCall(ISCreateGeneral(comm,ctx->mat_offset[ctx->num_grids]*ctx->batch_sz,idxs,PETSC_OWN_POINTER,&ctx->batch_is)); 1998 } 1999 // get a block matrix 2000 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2001 Mat B = subM[grid]; 2002 PetscInt nloc, nzl, colbuf[1024], row; 2003 PetscCall(MatGetSize(B, &nloc, NULL)); 2004 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 2005 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 2006 const PetscInt *cols; 2007 const PetscScalar *vals; 2008 for (int i=0 ; i<nloc ; i++) { 2009 PetscCall(MatGetRow(B,i,&nzl,&cols,&vals)); 2010 PetscCheck(nzl<=1024,comm, PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl); 2011 for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset; 2012 row = i + moffset; 2013 PetscCall(MatSetValues(ctx->J,1,&row,nzl,colbuf,vals,INSERT_VALUES)); 2014 PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals)); 2015 } 2016 } 2017 } 2018 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2019 PetscCall(MatDestroy(&subM[grid])); 2020 } 2021 PetscCall(MatAssemblyBegin(ctx->J,MAT_FINAL_ASSEMBLY)); 2022 PetscCall(MatAssemblyEnd(ctx->J,MAT_FINAL_ASSEMBLY)); 2023 2024 // debug 2025 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 2026 Mat mat_block_order; 2027 PetscCall(MatCreateSubMatrix(ctx->J,ctx->batch_is,ctx->batch_is,MAT_INITIAL_MATRIX,&mat_block_order)); // use MatPermute 2028 PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view")); 2029 PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_mat_view")); 2030 PetscCall(MatDestroy(&mat_block_order)); 2031 PetscCall(VecScatterCreate(X, ctx->batch_is, X, NULL, &ctx->plex_batch)); 2032 PetscCall(VecDuplicate(X,&ctx->work_vec)); 2033 } 2034 2035 PetscFunctionReturn(0); 2036 } 2037 2038 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat); 2039 /*@C 2040 DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh 2041 2042 Collective on comm 2043 2044 Input Parameters: 2045 + comm - The MPI communicator 2046 . dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver) 2047 - prefix - prefix for options (not tested) 2048 2049 Output Parameter: 2050 . pack - The DM object representing the mesh 2051 + X - A vector (user destroys) 2052 - J - Optional matrix (object destroys) 2053 2054 Level: beginner 2055 2056 .keywords: mesh 2057 .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()` 2058 @*/ 2059 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack) 2060 { 2061 LandauCtx *ctx; 2062 Vec Xsub[LANDAU_MAX_GRIDS]; 2063 IS grid_batch_is_inv[LANDAU_MAX_GRIDS]; 2064 2065 PetscFunctionBegin; 2066 PetscCheck(dim == 2 || dim == 3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported"); 2067 PetscCheck(LANDAU_DIM == dim,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d",dim,LANDAU_DIM); 2068 PetscCall(PetscNew(&ctx)); 2069 ctx->comm = comm; /* used for diagnostics and global errors */ 2070 /* process options */ 2071 PetscCall(ProcessOptions(ctx,prefix)); 2072 if (dim==2) ctx->use_relativistic_corrections = PETSC_FALSE; 2073 /* Create Mesh */ 2074 PetscCall(DMCompositeCreate(PETSC_COMM_SELF,pack)); 2075 PetscCall(PetscLogEventBegin(ctx->events[13],0,0,0,0)); 2076 PetscCall(PetscLogEventBegin(ctx->events[15],0,0,0,0)); 2077 PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR) 2078 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2079 /* create FEM */ 2080 PetscCall(SetupDS(ctx->plex[grid],dim,grid,ctx)); 2081 /* set initial state */ 2082 PetscCall(DMCreateGlobalVector(ctx->plex[grid],&Xsub[grid])); 2083 PetscCall(PetscObjectSetName((PetscObject) Xsub[grid], "u_orig")); 2084 /* initial static refinement, no solve */ 2085 PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, ctx)); 2086 /* forest refinement - forest goes in (if forest), plex comes out */ 2087 if (ctx->use_p4est) { 2088 DM plex; 2089 PetscCall(adapt(grid,ctx,&Xsub[grid])); // forest goes in, plex comes out 2090 PetscCall(DMViewFromOptions(ctx->plex[grid],NULL,"-dm_landau_amr_dm_view")); // need to differentiate - todo 2091 PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view")); 2092 // convert to plex, all done with this level 2093 PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex)); 2094 PetscCall(DMDestroy(&ctx->plex[grid])); 2095 ctx->plex[grid] = plex; 2096 } 2097 #if !defined(LANDAU_SPECIES_MAJOR) 2098 PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid])); 2099 #else 2100 for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid 2101 PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid])); 2102 } 2103 #endif 2104 PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx)); 2105 } 2106 #if !defined(LANDAU_SPECIES_MAJOR) 2107 // stack the batched DMs, could do it all here!!! b_id=0 2108 for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) { 2109 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2110 PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid])); 2111 } 2112 } 2113 #endif 2114 // create ctx->mat_offset 2115 ctx->mat_offset[0] = 0; 2116 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2117 PetscInt n; 2118 PetscCall(VecGetLocalSize(Xsub[grid],&n)); 2119 ctx->mat_offset[grid+1] = ctx->mat_offset[grid] + n; 2120 } 2121 // creat DM & Jac 2122 PetscCall(DMSetApplicationContext(*pack, ctx)); 2123 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only")); 2124 PetscCall(DMSetFromOptions(*pack)); 2125 PetscCall(DMCreateMatrix(*pack, &ctx->J)); 2126 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false")); 2127 PetscCall(MatSetOption(ctx->J,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 2128 PetscCall(MatSetOption(ctx->J,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE)); 2129 PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac")); 2130 // construct initial conditions in X 2131 PetscCall(DMCreateGlobalVector(*pack,X)); 2132 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2133 PetscInt n; 2134 PetscCall(VecGetLocalSize(Xsub[grid],&n)); 2135 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 2136 PetscScalar const *values; 2137 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 2138 PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx)); 2139 PetscCall(VecGetArrayRead(Xsub[grid],&values)); 2140 for (int i=0, idx = moffset; i<n; i++, idx++) { 2141 PetscCall(VecSetValue(*X,idx,values[i],INSERT_VALUES)); 2142 } 2143 PetscCall(VecRestoreArrayRead(Xsub[grid],&values)); 2144 } 2145 } 2146 // cleanup 2147 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2148 PetscCall(VecDestroy(&Xsub[grid])); 2149 } 2150 /* check for correct matrix type */ 2151 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 2152 PetscBool flg; 2153 if (ctx->deviceType == LANDAU_CUDA) { 2154 PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"")); 2155 PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda or use '-dm_landau_device_type cpu'"); 2156 } else if (ctx->deviceType == LANDAU_KOKKOS) { 2157 PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"")); 2158 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 2159 PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2160 #else 2161 PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2162 #endif 2163 } 2164 } 2165 PetscCall(PetscLogEventEnd(ctx->events[15],0,0,0,0)); 2166 // create field major ordering 2167 2168 ctx->work_vec = NULL; 2169 ctx->plex_batch = NULL; 2170 ctx->batch_is = NULL; 2171 for (int i=0;i<LANDAU_MAX_GRIDS;i++) grid_batch_is_inv[i] = NULL; 2172 PetscCall(PetscLogEventBegin(ctx->events[12],0,0,0,0)); 2173 PetscCall(LandauCreateMatrix(comm, *X, grid_batch_is_inv, ctx)); 2174 PetscCall(PetscLogEventEnd(ctx->events[12],0,0,0,0)); 2175 2176 // create AMR GPU assembly maps and static GPU data 2177 PetscCall(CreateStaticGPUData(dim,grid_batch_is_inv,ctx)); 2178 2179 PetscCall(PetscLogEventEnd(ctx->events[13],0,0,0,0)); 2180 2181 // create mass matrix 2182 PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL)); 2183 2184 if (J) *J = ctx->J; 2185 2186 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 2187 PetscContainer container; 2188 // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order 2189 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2190 PetscCall(PetscContainerSetPointer(container, (void *)ctx)); 2191 PetscCall(PetscObjectCompose((PetscObject) ctx->J, "LandauCtx", (PetscObject) container)); 2192 PetscCall(PetscContainerDestroy(&container)); 2193 // batch solvers need to map -- can batch solvers work 2194 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2195 PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch)); 2196 PetscCall(PetscObjectCompose((PetscObject) ctx->J, "plex_batch_is", (PetscObject) container)); 2197 PetscCall(PetscContainerDestroy(&container)); 2198 } 2199 // for batch solvers 2200 { 2201 PetscContainer container; 2202 PetscInt *pNf; 2203 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2204 PetscCall(PetscMalloc1(sizeof(*pNf), &pNf)); 2205 *pNf = ctx->batch_sz; 2206 PetscCall(PetscContainerSetPointer(container, (void *)pNf)); 2207 PetscCall(PetscContainerSetUserDestroy(container, MatrixNfDestroy)); 2208 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject) container)); 2209 PetscCall(PetscContainerDestroy(&container)); 2210 } 2211 2212 PetscFunctionReturn(0); 2213 } 2214 2215 /*@ 2216 DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh 2217 2218 Collective on dm 2219 2220 Input/Output Parameters: 2221 . dm - the dm to destroy 2222 2223 Level: beginner 2224 2225 .keywords: mesh 2226 .seealso: `DMPlexLandauCreateVelocitySpace()` 2227 @*/ 2228 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm) 2229 { 2230 LandauCtx *ctx; 2231 PetscFunctionBegin; 2232 PetscCall(DMGetApplicationContext(*dm, &ctx)); 2233 PetscCall(MatDestroy(&ctx->M)); 2234 PetscCall(MatDestroy(&ctx->J)); 2235 for (PetscInt ii=0;ii<ctx->num_species;ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii])); 2236 PetscCall(ISDestroy(&ctx->batch_is)); 2237 PetscCall(VecDestroy(&ctx->work_vec)); 2238 PetscCall(VecScatterDestroy(&ctx->plex_batch)); 2239 if (ctx->deviceType == LANDAU_CUDA) { 2240 #if defined(PETSC_HAVE_CUDA) 2241 PetscCall(LandauCUDAStaticDataClear(&ctx->SData_d)); 2242 #else 2243 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda"); 2244 #endif 2245 } else if (ctx->deviceType == LANDAU_KOKKOS) { 2246 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 2247 PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d)); 2248 #else 2249 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos"); 2250 #endif 2251 } else { 2252 if (ctx->SData_d.x) { /* in a CPU run */ 2253 PetscReal *invJ = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w; 2254 LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets; 2255 PetscCall(PetscFree4(ww,xx,yy,invJ)); 2256 if (zz) PetscCall(PetscFree(zz)); 2257 if (coo_elem_offsets) { 2258 PetscCall(PetscFree3(coo_elem_offsets,coo_elem_fullNb,coo_elem_point_offsets)); // could be NULL 2259 } 2260 } 2261 } 2262 2263 if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings 2264 PetscCall(PetscPrintf(ctx->comm, "TSStep N 1.0 %10.3e\n",ctx->times[LANDAU_EX2_TSSOLVE])); 2265 PetscCall(PetscPrintf(ctx->comm, "2: Solve: %10.3e with %" PetscInt_FMT " threads\n",ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL],ctx->batch_sz)); 2266 PetscCall(PetscPrintf(ctx->comm, "3: Landau: %10.3e\n",ctx->times[LANDAU_MATRIX_TOTAL])); 2267 PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian %" PetscInt_FMT " 1.0 %10.3e\n",(PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT],ctx->times[LANDAU_JACOBIAN])); 2268 PetscCall(PetscPrintf(ctx->comm, "Landau Operator N 1.0 %10.3e\n",ctx->times[LANDAU_OPERATOR])); 2269 PetscCall(PetscPrintf(ctx->comm, "Landau Mass N 1.0 %10.3e\n",ctx->times[LANDAU_MASS])); 2270 PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU) N 1.0 %10.3e\n",ctx->times[LANDAU_F_DF])); 2271 PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU) N 1.0 %10.3e\n",ctx->times[LANDAU_KERNEL])); 2272 PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum X 1.0 %10.3e\n",ctx->times[KSP_FACTOR])); 2273 PetscCall(PetscPrintf(ctx->comm, "MatSolve X 1.0 %10.3e\n",ctx->times[KSP_SOLVE])); 2274 } 2275 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2276 PetscCall(DMDestroy(&ctx->plex[grid])); 2277 } 2278 PetscFree(ctx); 2279 PetscCall(DMDestroy(dm)); 2280 PetscFunctionReturn(0); 2281 } 2282 2283 /* < v, ru > */ 2284 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2285 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2286 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2287 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2288 { 2289 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2290 f0[0] = u[ii]; 2291 } 2292 2293 /* < v, ru > */ 2294 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2295 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2296 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2297 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2298 { 2299 PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]); 2300 f0[0] = x[jj]*u[ii]; /* x momentum */ 2301 } 2302 2303 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2304 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2305 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2306 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2307 { 2308 PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]); 2309 double tmp1 = 0.; 2310 for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i]; 2311 f0[0] = tmp1*u[ii]; 2312 } 2313 2314 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx) 2315 { 2316 const PetscReal *c2_0_arr = ((PetscReal*)actx); 2317 const PetscReal c02 = c2_0_arr[0]; 2318 2319 PetscFunctionBegin; 2320 for (int s = 0 ; s < Nf ; s++) { 2321 PetscReal tmp1 = 0.; 2322 for (int i = 0; i < dim; ++i) tmp1 += x[i]*x[i]; 2323 #if defined(PETSC_USE_DEBUG) 2324 u[s] = PetscSqrtReal(1. + tmp1/c02);// u[0] = PetscSqrtReal(1. + xx); 2325 #else 2326 { 2327 PetscReal xx = tmp1/c02; 2328 u[s] = xx/(PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.) 2329 } 2330 #endif 2331 } 2332 PetscFunctionReturn(0); 2333 } 2334 2335 /* < v, ru > */ 2336 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2337 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2338 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2339 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2340 { 2341 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2342 f0[0] = 2.*PETSC_PI*x[0]*u[ii]; 2343 } 2344 2345 /* < v, ru > */ 2346 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2347 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2348 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2349 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2350 { 2351 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2352 f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii]; 2353 } 2354 2355 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2356 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2357 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2358 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2359 { 2360 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2361 f0[0] = 2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii]; 2362 } 2363 2364 /*@ 2365 DMPlexLandauPrintNorms - collects moments and prints them 2366 2367 Collective on dm 2368 2369 Input Parameters: 2370 + X - the state 2371 - stepi - current step to print 2372 2373 Level: beginner 2374 2375 .keywords: mesh 2376 .seealso: `DMPlexLandauCreateVelocitySpace()` 2377 @*/ 2378 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi) 2379 { 2380 LandauCtx *ctx; 2381 PetscDS prob; 2382 DM pack; 2383 PetscInt cStart, cEnd, dim, ii, i0, nDMs; 2384 PetscScalar xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES]; 2385 PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES]; 2386 Vec *globXArray; 2387 2388 PetscFunctionBegin; 2389 PetscCall(VecGetDM(X, &pack)); 2390 PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM"); 2391 PetscCall(DMGetDimension(pack, &dim)); 2392 PetscCheck(dim == 2 || dim == 3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]",dim); 2393 PetscCall(DMGetApplicationContext(pack, &ctx)); 2394 PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2395 /* print momentum and energy */ 2396 PetscCall(DMCompositeGetNumberDM(pack,&nDMs)); 2397 PetscCheck(nDMs == ctx->num_grids*ctx->batch_sz,PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT,nDMs,ctx->num_grids*ctx->batch_sz); 2398 PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray)); 2399 PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); 2400 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 2401 Vec Xloc = globXArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ]; 2402 PetscCall(DMGetDS(ctx->plex[grid], &prob)); 2403 for (ii=ctx->species_offset[grid],i0=0;ii<ctx->species_offset[grid+1];ii++,i0++) { 2404 PetscScalar user[2] = { (PetscScalar)i0, (PetscScalar)ctx->charges[ii]}; 2405 PetscCall(PetscDSSetConstants(prob, 2, user)); 2406 if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */ 2407 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden)); 2408 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2409 density[ii] = tt[0]*ctx->n_0*ctx->charges[ii]; 2410 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom)); 2411 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2412 zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2413 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2)); 2414 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2415 energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii]; 2416 zmomentumtot += zmomentum[ii]; 2417 energytot += energy[ii]; 2418 densitytot += density[ii]; 2419 PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,(double)PetscRealPart(density[ii]),(double)PetscRealPart(zmomentum[ii]),(double)PetscRealPart(energy[ii]))); 2420 } else { /* 2/3Xloc + 3V */ 2421 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den)); 2422 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2423 density[ii] = tt[0]*ctx->n_0*ctx->charges[ii]; 2424 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom)); 2425 user[1] = 0; 2426 PetscCall(PetscDSSetConstants(prob, 2, user)); 2427 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2428 xmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2429 user[1] = 1; 2430 PetscCall(PetscDSSetConstants(prob, 2, user)); 2431 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2432 ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2433 user[1] = 2; 2434 PetscCall(PetscDSSetConstants(prob, 2, user)); 2435 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2436 zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2437 if (ctx->use_relativistic_corrections) { 2438 /* gamma * M * f */ 2439 if (ii==0 && grid==0) { // do all at once 2440 Vec Mf, globGamma, *globMfArray, *globGammaArray; 2441 PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {gamma_n_f}; 2442 PetscReal *c2_0[1], data[1]; 2443 2444 PetscCall(VecDuplicate(X,&globGamma)); 2445 PetscCall(VecDuplicate(X,&Mf)); 2446 PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globMfArray)); 2447 PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globGammaArray)); 2448 /* M * f */ 2449 PetscCall(MatMult(ctx->M,X,Mf)); 2450 /* gamma */ 2451 PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2452 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching 2453 Vec v1 = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ]; 2454 data[0] = PetscSqr(C_0(ctx->v_0)); 2455 c2_0[0] = &data[0]; 2456 PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void**)c2_0, INSERT_ALL_VALUES, v1)); 2457 } 2458 PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2459 /* gamma * Mf */ 2460 PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2461 PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray)); 2462 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice 2463 PetscInt Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], N, bs; 2464 Vec Mfsub = globMfArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], Gsub = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], v1, v2; 2465 // get each component 2466 PetscCall(VecGetSize(Mfsub,&N)); 2467 PetscCall(VecCreate(ctx->comm,&v1)); 2468 PetscCall(VecSetSizes(v1,PETSC_DECIDE,N/Nf)); 2469 PetscCall(VecCreate(ctx->comm,&v2)); 2470 PetscCall(VecSetSizes(v2,PETSC_DECIDE,N/Nf)); 2471 PetscCall(VecSetFromOptions(v1)); // ??? 2472 PetscCall(VecSetFromOptions(v2)); 2473 // get each component 2474 PetscCall(VecGetBlockSize(Gsub,&bs)); 2475 PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub",bs,Nf); 2476 PetscCall(VecGetBlockSize(Mfsub,&bs)); 2477 PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT,bs,Nf); 2478 for (int i=0, ix=ctx->species_offset[grid] ; i<Nf ; i++, ix++) { 2479 PetscScalar val; 2480 PetscCall(VecStrideGather(Gsub,i,v1,INSERT_VALUES)); 2481 PetscCall(VecStrideGather(Mfsub,i,v2,INSERT_VALUES)); 2482 PetscCall(VecDot(v1,v2,&val)); 2483 energy[ix] = PetscRealPart(val)*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ix]; 2484 } 2485 PetscCall(VecDestroy(&v1)); 2486 PetscCall(VecDestroy(&v2)); 2487 } /* grids */ 2488 PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2489 PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray)); 2490 PetscCall(PetscFree(globGammaArray)); 2491 PetscCall(PetscFree(globMfArray)); 2492 PetscCall(VecDestroy(&globGamma)); 2493 PetscCall(VecDestroy(&Mf)); 2494 } 2495 } else { 2496 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2)); 2497 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2498 energy[ii] = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii]; 2499 } 2500 PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",stepi,ii,(double)PetscRealPart(density[ii]),(double)PetscRealPart(xmomentum[ii]),(double)PetscRealPart(ymomentum[ii]),(double)PetscRealPart(zmomentum[ii]),(double)PetscRealPart(energy[ii]))); 2501 xmomentumtot += xmomentum[ii]; 2502 ymomentumtot += ymomentum[ii]; 2503 zmomentumtot += zmomentum[ii]; 2504 energytot += energy[ii]; 2505 densitytot += density[ii]; 2506 } 2507 if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n"); 2508 } 2509 } 2510 PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray)); 2511 PetscCall(PetscFree(globXArray)); 2512 /* totals */ 2513 PetscCall(DMPlexGetHeightStratum(ctx->plex[0],0,&cStart,&cEnd)); 2514 if (ctx->num_species>1) { 2515 if (dim==2) { 2516 PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart)); 2517 } else { 2518 PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart)); 2519 } 2520 } else PetscCall(PetscPrintf(ctx->comm, " -- %" PetscInt_FMT " cells",cEnd-cStart)); 2521 PetscCall(PetscPrintf(ctx->comm,"\n")); 2522 PetscFunctionReturn(0); 2523 } 2524 2525 /*@ 2526 DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian) 2527 2528 Collective on pack 2529 2530 Input Parameters: 2531 . pack - the DM object 2532 2533 Output Parameters: 2534 . Amat - The mass matrix (optional), mass matrix is added to the DM context 2535 2536 Level: beginner 2537 2538 .keywords: mesh 2539 .seealso: `DMPlexLandauCreateVelocitySpace()` 2540 @*/ 2541 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat) 2542 { 2543 DM mass_pack,massDM[LANDAU_MAX_GRIDS]; 2544 PetscDS prob; 2545 PetscInt ii,dim,N1=1,N2; 2546 LandauCtx *ctx; 2547 Mat packM,subM[LANDAU_MAX_GRIDS]; 2548 2549 PetscFunctionBegin; 2550 PetscValidHeaderSpecific(pack,DM_CLASSID,1); 2551 if (Amat) PetscValidPointer(Amat,2); 2552 PetscCall(DMGetApplicationContext(pack, &ctx)); 2553 PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2554 PetscCall(PetscLogEventBegin(ctx->events[14],0,0,0,0)); 2555 PetscCall(DMGetDimension(pack, &dim)); 2556 PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject) pack),&mass_pack)); 2557 /* create pack mass matrix */ 2558 for (PetscInt grid=0, ix=0 ; grid<ctx->num_grids ; grid++) { 2559 PetscCall(DMClone(ctx->plex[grid], &massDM[grid])); 2560 PetscCall(DMCopyFields(ctx->plex[grid], massDM[grid])); 2561 PetscCall(DMCreateDS(massDM[grid])); 2562 PetscCall(DMGetDS(massDM[grid], &prob)); 2563 for (ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) { 2564 if (dim==3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL)); 2565 else PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL)); 2566 } 2567 #if !defined(LANDAU_SPECIES_MAJOR) 2568 PetscCall(DMCompositeAddDM(mass_pack,massDM[grid])); 2569 #else 2570 for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid 2571 PetscCall(DMCompositeAddDM(mass_pack,massDM[grid])); 2572 } 2573 #endif 2574 PetscCall(DMCreateMatrix(massDM[grid], &subM[grid])); 2575 } 2576 #if !defined(LANDAU_SPECIES_MAJOR) 2577 // stack the batched DMs 2578 for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) { 2579 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2580 PetscCall(DMCompositeAddDM(mass_pack, massDM[grid])); 2581 } 2582 } 2583 #endif 2584 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only")); 2585 PetscCall(DMSetFromOptions(mass_pack)); 2586 PetscCall(DMCreateMatrix(mass_pack, &packM)); 2587 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false")); 2588 PetscCall(MatSetOption(packM,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 2589 PetscCall(MatSetOption(packM,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE)); 2590 PetscCall(DMDestroy(&mass_pack)); 2591 /* make mass matrix for each block */ 2592 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2593 Vec locX; 2594 DM plex = massDM[grid]; 2595 PetscCall(DMGetLocalVector(plex, &locX)); 2596 /* Mass matrix is independent of the input, so no need to fill locX */ 2597 PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx)); 2598 PetscCall(DMRestoreLocalVector(plex, &locX)); 2599 PetscCall(DMDestroy(&massDM[grid])); 2600 } 2601 PetscCall(MatGetSize(ctx->J, &N1, NULL)); 2602 PetscCall(MatGetSize(packM, &N2, NULL)); 2603 PetscCheck(N1 == N2,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT,N1,N2); 2604 /* assemble block diagonals */ 2605 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2606 Mat B = subM[grid]; 2607 PetscInt nloc, nzl, colbuf[1024], row; 2608 PetscCall(MatGetSize(B, &nloc, NULL)); 2609 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 2610 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 2611 const PetscInt *cols; 2612 const PetscScalar *vals; 2613 for (int i=0 ; i<nloc ; i++) { 2614 PetscCall(MatGetRow(B,i,&nzl,&cols,&vals)); 2615 PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl); 2616 for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset; 2617 row = i + moffset; 2618 PetscCall(MatSetValues(packM,1,&row,nzl,colbuf,vals,INSERT_VALUES)); 2619 PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals)); 2620 } 2621 } 2622 } 2623 // cleanup 2624 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2625 PetscCall(MatDestroy(&subM[grid])); 2626 } 2627 PetscCall(MatAssemblyBegin(packM,MAT_FINAL_ASSEMBLY)); 2628 PetscCall(MatAssemblyEnd(packM,MAT_FINAL_ASSEMBLY)); 2629 PetscCall(PetscObjectSetName((PetscObject)packM, "mass")); 2630 PetscCall(MatViewFromOptions(packM,NULL,"-dm_landau_mass_view")); 2631 ctx->M = packM; 2632 if (Amat) *Amat = packM; 2633 PetscCall(PetscLogEventEnd(ctx->events[14],0,0,0,0)); 2634 PetscFunctionReturn(0); 2635 } 2636 2637 /*@ 2638 DMPlexLandauIFunction - TS residual calculation, confusingly this computes the Jacobian w/o mass 2639 2640 Collective on ts 2641 2642 Input Parameters: 2643 + TS - The time stepping context 2644 . time_dummy - current time (not used) 2645 . X - Current state 2646 . X_t - Time derivative of current state 2647 - actx - Landau context 2648 2649 Output Parameter: 2650 . F - The residual 2651 2652 Level: beginner 2653 2654 .keywords: mesh 2655 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()` 2656 @*/ 2657 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx) 2658 { 2659 LandauCtx *ctx=(LandauCtx*)actx; 2660 PetscInt dim; 2661 DM pack; 2662 #if defined(PETSC_HAVE_THREADSAFETY) 2663 double starttime, endtime; 2664 #endif 2665 PetscObjectState state; 2666 2667 PetscFunctionBegin; 2668 PetscCall(TSGetDM(ts,&pack)); 2669 PetscCall(DMGetApplicationContext(pack, &ctx)); 2670 PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2671 if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage)); 2672 PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0)); 2673 PetscCall(PetscLogEventBegin(ctx->events[0],0,0,0,0)); 2674 #if defined(PETSC_HAVE_THREADSAFETY) 2675 starttime = MPI_Wtime(); 2676 #endif 2677 PetscCall(DMGetDimension(pack, &dim)); 2678 PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state)); 2679 if (state != ctx->norm_state) { 2680 PetscCall(PetscInfo(ts, "Create Landau Jacobian t=%g J.state %" PetscInt64_FMT " --> %" PetscInt64_FMT "\n",(double)time_dummy, ctx->norm_state, state)); 2681 PetscCall(MatZeroEntries(ctx->J)); 2682 PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx)); 2683 PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view")); 2684 PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state)); 2685 ctx->norm_state = state; 2686 } else { 2687 PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n",state)); 2688 } 2689 /* mat vec for op */ 2690 PetscCall(MatMult(ctx->J,X,F)); /* C*f */ 2691 /* add time term */ 2692 if (X_t) PetscCall(MatMultAdd(ctx->M,X_t,F,F)); 2693 #if defined(PETSC_HAVE_THREADSAFETY) 2694 if (ctx->stage) { 2695 endtime = MPI_Wtime(); 2696 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2697 ctx->times[LANDAU_JACOBIAN] += (endtime - starttime); 2698 ctx->times[LANDAU_JACOBIAN_COUNT] += 1; 2699 } 2700 #endif 2701 PetscCall(PetscLogEventEnd(ctx->events[0],0,0,0,0)); 2702 PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0)); 2703 if (ctx->stage) { 2704 PetscCall(PetscLogStagePop()); 2705 #if defined(PETSC_HAVE_THREADSAFETY) 2706 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2707 #endif 2708 } 2709 PetscFunctionReturn(0); 2710 } 2711 2712 /*@ 2713 DMPlexLandauIJacobian - TS Jacobian construction, confusingly this adds mass 2714 2715 Collective on ts 2716 2717 Input Parameters: 2718 + TS - The time stepping context 2719 . time_dummy - current time (not used) 2720 . X - Current state 2721 . U_tdummy - Time derivative of current state (not used) 2722 . shift - shift for du/dt term 2723 - actx - Landau context 2724 2725 Output Parameters: 2726 + Amat - Jacobian 2727 - Pmat - same as Amat 2728 2729 Level: beginner 2730 2731 .keywords: mesh 2732 .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()` 2733 @*/ 2734 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx) 2735 { 2736 LandauCtx *ctx=NULL; 2737 PetscInt dim; 2738 DM pack; 2739 #if defined(PETSC_HAVE_THREADSAFETY) 2740 double starttime, endtime; 2741 #endif 2742 PetscObjectState state; 2743 2744 PetscFunctionBegin; 2745 PetscCall(TSGetDM(ts,&pack)); 2746 PetscCall(DMGetApplicationContext(pack, &ctx)); 2747 PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2748 PetscCheck(Amat == Pmat && Amat == ctx->J,ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J"); 2749 PetscCall(DMGetDimension(pack, &dim)); 2750 /* get collision Jacobian into A */ 2751 if (ctx->stage) PetscCall(PetscLogStagePush(ctx->stage)); 2752 PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0)); 2753 PetscCall(PetscLogEventBegin(ctx->events[9],0,0,0,0)); 2754 #if defined(PETSC_HAVE_THREADSAFETY) 2755 starttime = MPI_Wtime(); 2756 #endif 2757 PetscCall(PetscInfo(ts, "Adding mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift)); 2758 PetscCheck(shift!=0.0,ctx->comm, PETSC_ERR_PLIB, "zero shift"); 2759 PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state)); 2760 PetscCheck(state == ctx->norm_state,ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT "",ctx->norm_state,state); 2761 if (!ctx->use_matrix_mass) { 2762 PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx)); 2763 PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view")); 2764 } else { /* add mass */ 2765 PetscCall(MatAXPY(Pmat,shift,ctx->M,SAME_NONZERO_PATTERN)); 2766 } 2767 #if defined(PETSC_HAVE_THREADSAFETY) 2768 if (ctx->stage) { 2769 endtime = MPI_Wtime(); 2770 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2771 ctx->times[LANDAU_MASS] += (endtime - starttime); 2772 } 2773 #endif 2774 PetscCall(PetscLogEventEnd(ctx->events[9],0,0,0,0)); 2775 PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0)); 2776 if (ctx->stage) { 2777 PetscCall(PetscLogStagePop()); 2778 #if defined(PETSC_HAVE_THREADSAFETY) 2779 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2780 #endif 2781 } 2782 PetscFunctionReturn(0); 2783 } 2784