1 #include <../src/mat/impls/aij/seq/aij.h> 2 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 3 #include <petsclandau.h> /*I "petsclandau.h" I*/ 4 #include <petscts.h> 5 #include <petscdmforest.h> 6 #include <petscdmcomposite.h> 7 8 /* Landau collision operator */ 9 10 /* relativistic terms */ 11 #if defined(PETSC_USE_REAL_SINGLE) 12 #define SPEED_OF_LIGHT 2.99792458e8F 13 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */ 14 #else 15 #define SPEED_OF_LIGHT 2.99792458e8 16 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */ 17 #endif 18 19 #define PETSC_THREAD_SYNC 20 #include "land_tensors.h" 21 22 #if defined(PETSC_HAVE_OPENMP) 23 #include <omp.h> 24 #endif 25 26 /* vector padding not supported */ 27 #define LANDAU_VL 1 28 29 static PetscErrorCode LandauMatMult(Mat A, Vec x, Vec y) 30 { 31 LandauCtx *ctx; 32 PetscContainer container; 33 34 PetscFunctionBegin; 35 PetscCall(PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container)); 36 if (container) { 37 PetscCall(PetscContainerGetPointer(container, (void **) &ctx)); 38 PetscCall(VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD)); 39 PetscCall(VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD)); 40 PetscCall((*ctx->seqaij_mult)(A,ctx->work_vec,y)); 41 PetscCall(VecCopy(y, ctx->work_vec)); 42 PetscCall(VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE)); 43 PetscCall(VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE)); 44 PetscFunctionReturn(0); 45 } 46 PetscCall(MatMult(A,x,y)); 47 PetscFunctionReturn(0); 48 } 49 50 // Computes v3 = v2 + A * v1. 51 static PetscErrorCode LandauMatMultAdd(Mat A,Vec v1,Vec v2,Vec v3) 52 { 53 PetscFunctionBegin; 54 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "?????"); 55 PetscCall(LandauMatMult(A,v1,v3)); 56 PetscCall(VecAYPX(v3,1,v2)); 57 PetscFunctionReturn(0); 58 } 59 60 static PetscErrorCode LandauMatMultTranspose(Mat A, Vec x, Vec y) 61 { 62 LandauCtx *ctx; 63 PetscContainer container; 64 65 PetscFunctionBegin; 66 PetscCall(PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container)); 67 if (container) { 68 PetscCall(PetscContainerGetPointer(container, (void **) &ctx)); 69 PetscCall(VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD)); 70 PetscCall(VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD)); 71 PetscCall((*ctx->seqaij_multtranspose)(A,ctx->work_vec,y)); 72 PetscCall(VecCopy(y, ctx->work_vec)); 73 PetscCall(VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE)); 74 PetscCall(VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE)); 75 PetscFunctionReturn(0); 76 } 77 PetscCall(MatMultTranspose(A,x,y)); 78 PetscFunctionReturn(0); 79 } 80 81 static PetscErrorCode LandauMatGetDiagonal(Mat A,Vec x) 82 { 83 LandauCtx *ctx; 84 PetscContainer container; 85 86 PetscFunctionBegin; 87 PetscCall(PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container)); 88 if (container) { 89 PetscCall(PetscContainerGetPointer(container, (void **) &ctx)); 90 PetscCall((*ctx->seqaij_getdiagonal)(A,ctx->work_vec)); 91 PetscCall(VecScatterBegin(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE)); 92 PetscCall(VecScatterEnd(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE)); 93 PetscFunctionReturn(0); 94 } 95 PetscCall(MatGetDiagonal(A, x)); 96 PetscFunctionReturn(0); 97 } 98 99 static PetscErrorCode LandauGPUMapsDestroy(void *ptr) 100 { 101 P4estVertexMaps *maps = (P4estVertexMaps*)ptr; 102 PetscFunctionBegin; 103 // free device data 104 if (maps[0].deviceType != LANDAU_CPU) { 105 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 106 if (maps[0].deviceType == LANDAU_KOKKOS) { 107 PetscCall(LandauKokkosDestroyMatMaps(maps, maps[0].numgrids)); // imples Kokkos does 108 } // else could be CUDA 109 #elif defined(PETSC_HAVE_CUDA) 110 if (maps[0].deviceType == LANDAU_CUDA) { 111 PetscCall(LandauCUDADestroyMatMaps(maps, maps[0].numgrids)); 112 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %" PetscInt_FMT " ?????",maps->deviceType); 113 #endif 114 } 115 // free host data 116 for (PetscInt grid=0 ; grid < maps[0].numgrids ; grid++) { 117 PetscCall(PetscFree(maps[grid].c_maps)); 118 PetscCall(PetscFree(maps[grid].gIdx)); 119 } 120 PetscCall(PetscFree(maps)); 121 122 PetscFunctionReturn(0); 123 } 124 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 125 { 126 PetscReal v2 = 0; 127 PetscFunctionBegin; 128 /* compute v^2 / 2 */ 129 for (int i = 0; i < dim; ++i) v2 += x[i]*x[i]; 130 /* evaluate the Maxwellian */ 131 u[0] = v2/2; 132 PetscFunctionReturn(0); 133 } 134 135 /* needs double */ 136 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 137 { 138 PetscReal *c2_0_arr = ((PetscReal*)actx); 139 double u2 = 0, c02 = (double)*c2_0_arr, xx; 140 141 PetscFunctionBegin; 142 /* compute u^2 / 2 */ 143 for (int i = 0; i < dim; ++i) u2 += x[i]*x[i]; 144 /* gamma - 1 = g_eps, for conditioning and we only take derivatives */ 145 xx = u2/c02; 146 #if defined(PETSC_USE_DEBUG) 147 u[0] = PetscSqrtReal(1. + xx); 148 #else 149 u[0] = xx/(PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative 150 #endif 151 PetscFunctionReturn(0); 152 } 153 154 /* 155 LandauFormJacobian_Internal - Evaluates Jacobian matrix. 156 157 Input Parameters: 158 . globX - input vector 159 . actx - optional user-defined context 160 . dim - dimension 161 162 Output Parameters: 163 . J0acP - Jacobian matrix filled, not created 164 */ 165 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx) 166 { 167 LandauCtx *ctx = (LandauCtx*)a_ctx; 168 PetscInt numCells[LANDAU_MAX_GRIDS],Nq,Nb; 169 PetscQuadrature quad; 170 PetscReal Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2) 171 PetscScalar *cellClosure=NULL; 172 const PetscScalar *xdata=NULL; 173 PetscDS prob; 174 PetscContainer container; 175 P4estVertexMaps *maps; 176 Mat subJ[LANDAU_MAX_GRIDS*LANDAU_MAX_BATCH_SZ]; 177 178 PetscFunctionBegin; 179 PetscValidHeaderSpecific(a_X,VEC_CLASSID,1); 180 PetscValidHeaderSpecific(JacP,MAT_CLASSID,2); 181 PetscValidPointer(ctx,5); 182 /* check for matrix container for GPU assembly. Support CPU assembly for debugging */ 183 PetscCheck(ctx->plex[0] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 184 PetscCall(PetscLogEventBegin(ctx->events[10],0,0,0,0)); 185 PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids 186 PetscCall(PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container)); 187 if (container) { 188 PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"maps but no GPU assembly"); 189 PetscCall(PetscContainerGetPointer(container, (void **) &maps)); 190 PetscCheck(maps,ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container"); 191 for (PetscInt i=0;i<ctx->num_grids*ctx->batch_sz;i++) subJ[i] = NULL; 192 } else { 193 PetscCheck(!ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"No maps but GPU assembly"); 194 for (PetscInt tid=0 ; tid<ctx->batch_sz ; tid++) { 195 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 196 PetscCall(DMCreateMatrix(ctx->plex[grid], &subJ[ LAND_PACK_IDX(tid,grid) ])); 197 } 198 } 199 maps = NULL; 200 } 201 // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck) 202 PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad)); 203 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); Nb = Nq; 204 PetscCheck(Nq <=LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%" PetscInt_FMT ")",Nq,LANDAU_MAX_NQ); 205 // get metadata for collecting dynamic data 206 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 207 PetscInt cStart, cEnd; 208 PetscCheck(ctx->plex[grid] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 209 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 210 numCells[grid] = cEnd - cStart; // grids can have different topology 211 } 212 PetscCall(PetscLogEventEnd(ctx->events[10],0,0,0,0)); 213 if (shift==0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */ 214 DM pack; 215 PetscCall(VecGetDM(a_X, &pack)); 216 PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM"); 217 PetscCall(PetscLogEventBegin(ctx->events[1],0,0,0,0)); 218 for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) { 219 Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */ 220 if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */ 221 } 222 if (!ctx->gpu_assembly) { 223 Vec *locXArray,*globXArray; 224 PetscScalar *cellClosure_it; 225 PetscInt cellClosure_sz=0,nDMs,Nf[LANDAU_MAX_GRIDS]; 226 PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS]; 227 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 228 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 229 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 230 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 231 } 232 /* count cellClosure size */ 233 PetscCall(DMCompositeGetNumberDM(pack,&nDMs)); 234 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) cellClosure_sz += Nb*Nf[grid]*numCells[grid]; 235 PetscCall(PetscMalloc1(cellClosure_sz*ctx->batch_sz,&cellClosure)); 236 cellClosure_it = cellClosure; 237 PetscCall(PetscMalloc(sizeof(*locXArray)*nDMs, &locXArray)); 238 PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray)); 239 PetscCall(DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray)); 240 PetscCall(DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray)); 241 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP (once) 242 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 243 Vec locX = locXArray[ LAND_PACK_IDX(b_id,grid) ], globX = globXArray[ LAND_PACK_IDX(b_id,grid) ], locX2; 244 PetscInt cStart, cEnd, ei; 245 PetscCall(VecDuplicate(locX,&locX2)); 246 PetscCall(DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2)); 247 PetscCall(DMGlobalToLocalEnd (ctx->plex[grid], globX, INSERT_VALUES, locX2)); 248 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 249 for (ei = cStart ; ei < cEnd; ++ei) { 250 PetscScalar *coef = NULL; 251 PetscCall(DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef)); 252 PetscCall(PetscMemcpy(cellClosure_it,coef,Nb*Nf[grid]*sizeof(*cellClosure_it))); /* change if LandauIPReal != PetscScalar */ 253 PetscCall(DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef)); 254 cellClosure_it += Nb*Nf[grid]; 255 } 256 PetscCall(VecDestroy(&locX2)); 257 } 258 } 259 PetscCheck(cellClosure_it-cellClosure == cellClosure_sz*ctx->batch_sz,PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %" PetscInt_FMT " != cellClosure_sz = %" PetscInt_FMT,cellClosure_it-cellClosure,cellClosure_sz*ctx->batch_sz); 260 PetscCall(DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray)); 261 PetscCall(DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray)); 262 PetscCall(PetscFree(locXArray)); 263 PetscCall(PetscFree(globXArray)); 264 xdata = NULL; 265 } else { 266 PetscMemType mtype; 267 if (ctx->jacobian_field_major_order) { // get data in batch ordering 268 PetscCall(VecScatterBegin(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD)); 269 PetscCall(VecScatterEnd(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD)); 270 PetscCall(VecGetArrayReadAndMemType(ctx->work_vec,&xdata,&mtype)); 271 } else { 272 PetscCall(VecGetArrayReadAndMemType(a_X,&xdata,&mtype)); 273 } 274 if (mtype!=PETSC_MEMTYPE_HOST && ctx->deviceType == LANDAU_CPU) { 275 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"CPU run with device data: use -mat_type aij"); 276 } 277 cellClosure = NULL; 278 } 279 PetscCall(PetscLogEventEnd(ctx->events[1],0,0,0,0)); 280 } else xdata = cellClosure = NULL; 281 282 /* do it */ 283 if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) { 284 if (ctx->deviceType == LANDAU_CUDA) { 285 #if defined(PETSC_HAVE_CUDA) 286 PetscCall(LandauCUDAJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ, JacP)); 287 #else 288 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda"); 289 #endif 290 } else if (ctx->deviceType == LANDAU_KOKKOS) { 291 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 292 PetscCall(LandauKokkosJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ,JacP)); 293 #else 294 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos"); 295 #endif 296 } 297 } else { /* CPU version */ 298 PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species 299 PetscInt ip_offset[LANDAU_MAX_GRIDS+1], ipf_offset[LANDAU_MAX_GRIDS+1], elem_offset[LANDAU_MAX_GRIDS+1],IPf_sz_glb,IPf_sz_tot,num_grids=ctx->num_grids,Nf[LANDAU_MAX_GRIDS]; 300 PetscReal *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w; 301 PetscReal Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES]; 302 PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS]; 303 PetscScalar *coo_vals=NULL; 304 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 305 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 306 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 307 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 308 } 309 /* count IPf size, etc */ 310 PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids 311 const PetscReal *const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1]; 312 ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0; 313 for (PetscInt grid=0 ; grid<num_grids ; grid++) { 314 PetscInt nfloc = ctx->species_offset[grid+1] - ctx->species_offset[grid]; 315 elem_offset[grid+1] = elem_offset[grid] + numCells[grid]; 316 ip_offset[grid+1] = ip_offset[grid] + numCells[grid]*Nq; 317 ipf_offset[grid+1] = ipf_offset[grid] + Nq*nfloc*numCells[grid]; 318 } 319 IPf_sz_glb = ipf_offset[num_grids]; 320 IPf_sz_tot = IPf_sz_glb*ctx->batch_sz; 321 // prep COO 322 if (ctx->coo_assembly) { 323 PetscCall(PetscMalloc1(ctx->SData_d.coo_size,&coo_vals)); // allocate every time? 324 PetscCall(PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n",ctx->SData_d.coo_size)); 325 } 326 if (shift==0.0) { /* compute dynamic data f and df and init data for Jacobian */ 327 #if defined(PETSC_HAVE_THREADSAFETY) 328 double starttime, endtime; 329 starttime = MPI_Wtime(); 330 #endif 331 PetscCall(PetscLogEventBegin(ctx->events[8],0,0,0,0)); 332 for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) { 333 invMass[fieldA] = ctx->m_0/ctx->masses[fieldA]; 334 Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */ 335 if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */ 336 nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->m_0)*ctx->m_0/ctx->masses[fieldA]; 337 nu_beta[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3); 338 } 339 PetscCall(PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim==3 ? IPf_sz_tot : 0, &dudz)); 340 // F df/dx 341 for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element 342 const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; // b_id == OMP thd_id in batch 343 // find my grid: 344 PetscInt grid = 0; 345 while (b_elem_idx >= elem_offset[grid+1]) grid++; // yuck search for grid 346 { 347 const PetscInt loc_nip = numCells[grid]*Nq, loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid]; 348 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid]; 349 PetscScalar *coef, coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ]; 350 PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; // ingJ is static data on batch 0 351 PetscInt b,f,q; 352 if (cellClosure) { 353 coef = &cellClosure[b_id*IPf_sz_glb + ipf_offset[grid] + loc_elem*Nb*loc_Nf]; // this is const 354 } else { 355 coef = coef_buff; 356 for (f = 0; f < loc_Nf; ++f) { 357 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0]; 358 for (b = 0; b < Nb; ++b) { 359 PetscInt idx = Idxs[b]; 360 if (idx >= 0) { 361 coef[f*Nb+b] = xdata[idx+moffset]; 362 } else { 363 idx = -idx - 1; 364 coef[f*Nb+b] = 0; 365 for (q = 0; q < maps[grid].num_face; q++) { 366 PetscInt id = maps[grid].c_maps[idx][q].gid; 367 PetscScalar scale = maps[grid].c_maps[idx][q].scale; 368 coef[f*Nb+b] += scale*xdata[id+moffset]; 369 } 370 } 371 } 372 } 373 } 374 /* get f and df */ 375 for (PetscInt qi = 0; qi < Nq; qi++) { 376 const PetscReal *invJ = &invJe[qi*dim*dim]; 377 const PetscReal *Bq = &BB[qi*Nb]; 378 const PetscReal *Dq = &DD[qi*Nb*dim]; 379 PetscReal u_x[LANDAU_DIM]; 380 /* get f & df */ 381 for (f = 0; f < loc_Nf; ++f) { 382 const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid] + f*loc_nip + loc_elem*Nq + qi; 383 PetscInt b, e; 384 PetscReal refSpaceDer[LANDAU_DIM]; 385 ff[idx] = 0.0; 386 for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0; 387 for (b = 0; b < Nb; ++b) { 388 const PetscInt cidx = b; 389 ff[idx] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]); 390 for (int d = 0; d < dim; ++d) { 391 refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]); 392 } 393 } 394 for (int d = 0; d < LANDAU_DIM; ++d) { 395 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) { 396 u_x[d] += invJ[e*dim+d]*refSpaceDer[e]; 397 } 398 } 399 dudx[idx] = u_x[0]; 400 dudy[idx] = u_x[1]; 401 #if LANDAU_DIM==3 402 dudz[idx] = u_x[2]; 403 #endif 404 } 405 } // q 406 } // grid 407 } // grid*batch 408 PetscCall(PetscLogEventEnd(ctx->events[8],0,0,0,0)); 409 #if defined(PETSC_HAVE_THREADSAFETY) 410 endtime = MPI_Wtime(); 411 if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime); 412 #endif 413 } // Jacobian setup 414 // assemble Jacobian (or mass) 415 for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element 416 const PetscInt b_Nelem = elem_offset[num_grids]; 417 const PetscInt glb_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; 418 PetscInt grid = 0; 419 #if defined(PETSC_HAVE_THREADSAFETY) 420 double starttime, endtime; 421 starttime = MPI_Wtime(); 422 #endif 423 while (glb_elem_idx >= elem_offset[grid+1]) grid++; 424 { 425 const PetscInt loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid]; 426 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset), totDim = loc_Nf*Nq, elemMatSize = totDim*totDim; 427 PetscScalar *elemMat; 428 const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; 429 PetscCall(PetscMalloc1(elemMatSize, &elemMat)); 430 PetscCall(PetscMemzero(elemMat, elemMatSize*sizeof(*elemMat))); 431 if (shift==0.0) { // Jacobian 432 PetscCall(PetscLogEventBegin(ctx->events[4],0,0,0,0)); 433 } else { // mass 434 PetscCall(PetscLogEventBegin(ctx->events[16],0,0,0,0)); 435 } 436 for (PetscInt qj = 0; qj < Nq; ++qj) { 437 const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq; 438 PetscReal g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1 439 PetscInt d,d2,dp,d3,IPf_idx; 440 if (shift==0.0) { // Jacobian 441 const PetscReal * const invJj = &invJe[qj*dim*dim]; 442 PetscReal gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM]; 443 const PetscReal vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb]; 444 // create g2 & g3 445 for (d=0;d<LANDAU_DIM;d++) { // clear accumulation data D & K 446 gg2_temp[d] = 0; 447 for (d2=0;d2<LANDAU_DIM;d2++) gg3_temp[d][d2] = 0; 448 } 449 /* inner beta reduction */ 450 IPf_idx = 0; 451 for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids ; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r 452 PetscInt nip_loc_r = numCells[grid_r]*Nq, Nfloc_r = Nf[grid_r]; 453 for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) { 454 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) { 455 const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx]; 456 PetscReal temp1[3] = {0, 0, 0}, temp2 = 0; 457 #if LANDAU_DIM==2 458 PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 459 LandauTensor2D(vj, x, y, Ud, Uk, mask); 460 #else 461 PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2]-z) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 462 if (ctx->use_relativistic_corrections) { 463 LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0)); 464 } else { 465 LandauTensor3D(vj, x, y, z, U, mask); 466 } 467 #endif 468 for (int f = 0; f < Nfloc_r ; ++f) { 469 const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid_r] + f*nip_loc_r + ei_r*Nq + qi; // IPf_idx + f*nip_loc_r + loc_fdf_idx; 470 temp1[0] += dudx[idx]*nu_beta[f+f_off]*invMass[f+f_off]; 471 temp1[1] += dudy[idx]*nu_beta[f+f_off]*invMass[f+f_off]; 472 #if LANDAU_DIM==3 473 temp1[2] += dudz[idx]*nu_beta[f+f_off]*invMass[f+f_off]; 474 #endif 475 temp2 += ff[idx]*nu_beta[f+f_off]; 476 } 477 temp1[0] *= wi; 478 temp1[1] *= wi; 479 #if LANDAU_DIM==3 480 temp1[2] *= wi; 481 #endif 482 temp2 *= wi; 483 #if LANDAU_DIM==2 484 for (d2 = 0; d2 < 2; d2++) { 485 for (d3 = 0; d3 < 2; ++d3) { 486 /* K = U * grad(f): g2=e: i,A */ 487 gg2_temp[d2] += Uk[d2][d3]*temp1[d3]; 488 /* D = -U * (I \kron (fx)): g3=f: i,j,A */ 489 gg3_temp[d2][d3] += Ud[d2][d3]*temp2; 490 } 491 } 492 #else 493 for (d2 = 0; d2 < 3; ++d2) { 494 for (d3 = 0; d3 < 3; ++d3) { 495 /* K = U * grad(f): g2 = e: i,A */ 496 gg2_temp[d2] += U[d2][d3]*temp1[d3]; 497 /* D = -U * (I \kron (fx)): g3 = f: i,j,A */ 498 gg3_temp[d2][d3] += U[d2][d3]*temp2; 499 } 500 } 501 #endif 502 } // qi 503 } // ei_r 504 IPf_idx += nip_loc_r*Nfloc_r; 505 } /* grid_r - IPs */ 506 PetscCheck(IPf_idx == IPf_sz_glb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %" PetscInt_FMT " %" PetscInt_FMT,IPf_idx,IPf_sz_glb); 507 // add alpha and put in gg2/3 508 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) { 509 for (d2 = 0; d2 < LANDAU_DIM; d2++) { 510 gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA+f_off]; 511 for (d3 = 0; d3 < LANDAU_DIM; d3++) { 512 gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA+f_off]*invMass[fieldA+f_off]; 513 } 514 } 515 } 516 /* add electric field term once per IP */ 517 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid] ; fieldA < loc_Nf; ++fieldA) { 518 gg2[fieldA][LANDAU_DIM-1] += Eq_m[fieldA+f_off]; 519 } 520 /* Jacobian transform - g2, g3 */ 521 for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) { 522 for (d = 0; d < dim; ++d) { 523 g2[fieldA][d] = 0.0; 524 for (d2 = 0; d2 < dim; ++d2) { 525 g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2]; 526 g3[fieldA][d][d2] = 0.0; 527 for (d3 = 0; d3 < dim; ++d3) { 528 for (dp = 0; dp < dim; ++dp) { 529 g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp]; 530 } 531 } 532 g3[fieldA][d][d2] *= wj; 533 } 534 g2[fieldA][d] *= wj; 535 } 536 } 537 } else { // mass 538 PetscReal wj = ww[jpidx_glb]; 539 /* Jacobian transform - g0 */ 540 for (PetscInt fieldA = 0; fieldA < loc_Nf ; ++fieldA) { 541 if (dim==2) { 542 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0 543 } else { 544 g0[fieldA] = wj * shift; // move this to below and remove g0 545 } 546 } 547 } 548 /* FE matrix construction */ 549 { 550 PetscInt fieldA,d,f,d2,g; 551 const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim]; 552 /* assemble - on the diagonal (I,I) */ 553 for (fieldA = 0; fieldA < loc_Nf ; fieldA++) { 554 for (f = 0; f < Nb ; f++) { 555 const PetscInt i = fieldA*Nb + f; /* Element matrix row */ 556 for (g = 0; g < Nb; ++g) { 557 const PetscInt j = fieldA*Nb + g; /* Element matrix column */ 558 const PetscInt fOff = i*totDim + j; 559 if (shift==0.0) { 560 for (d = 0; d < dim; ++d) { 561 elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g]; 562 for (d2 = 0; d2 < dim; ++d2) { 563 elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2]; 564 } 565 } 566 } else { // mass 567 elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g]; 568 } 569 } 570 } 571 } 572 } 573 } /* qj loop */ 574 if (shift==0.0) { // Jacobian 575 PetscCall(PetscLogEventEnd(ctx->events[4],0,0,0,0)); 576 } else { 577 PetscCall(PetscLogEventEnd(ctx->events[16],0,0,0,0)); 578 } 579 #if defined(PETSC_HAVE_THREADSAFETY) 580 endtime = MPI_Wtime(); 581 if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime); 582 #endif 583 /* assemble matrix */ 584 if (!container) { 585 PetscInt cStart; 586 PetscCall(PetscLogEventBegin(ctx->events[6],0,0,0,0)); 587 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL)); 588 PetscCall(DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[ LAND_PACK_IDX(b_id,grid) ], loc_elem + cStart, elemMat, ADD_VALUES)); 589 PetscCall(PetscLogEventEnd(ctx->events[6],0,0,0,0)); 590 } else { // GPU like assembly for debugging 591 PetscInt fieldA,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE]={0},cols0[LANDAU_MAX_Q_FACE]={0},rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE]; 592 PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]={0},row_scale[LANDAU_MAX_Q_FACE]={0},col_scale[LANDAU_MAX_Q_FACE]={0}; 593 LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets; 594 /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */ 595 for (fieldA = 0; fieldA < loc_Nf ; fieldA++) { 596 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0]; 597 for (f = 0; f < Nb ; f++) { 598 PetscInt idx = Idxs[f]; 599 if (idx >= 0) { 600 nr = 1; 601 rows0[0] = idx; 602 row_scale[0] = 1.; 603 } else { 604 idx = -idx - 1; 605 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) { 606 if (maps[grid].c_maps[idx][q].gid < 0) break; 607 rows0[q] = maps[grid].c_maps[idx][q].gid; 608 row_scale[q] = maps[grid].c_maps[idx][q].scale; 609 } 610 } 611 for (g = 0; g < Nb; ++g) { 612 idx = Idxs[g]; 613 if (idx >= 0) { 614 nc = 1; 615 cols0[0] = idx; 616 col_scale[0] = 1.; 617 } else { 618 idx = -idx - 1; 619 nc = maps[grid].num_face; 620 for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) { 621 if (maps[grid].c_maps[idx][q].gid < 0) break; 622 cols0[q] = maps[grid].c_maps[idx][q].gid; 623 col_scale[q] = maps[grid].c_maps[idx][q].scale; 624 } 625 } 626 const PetscInt i = fieldA*Nb + f; /* Element matrix row */ 627 const PetscInt j = fieldA*Nb + g; /* Element matrix column */ 628 const PetscScalar Aij = elemMat[i*totDim + j]; 629 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData 630 const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb; 631 const int idx0 = b_id*coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g]; 632 for (int q = 0, idx2 = idx0; q < nr; q++) { 633 for (int d = 0; d < nc; d++, idx2++) { 634 coo_vals[idx2] = row_scale[q]*col_scale[d]*Aij; 635 } 636 } 637 } else { 638 for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset; 639 for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset; 640 for (q = 0; q < nr; q++) { 641 for (d = 0; d < nc; d++) { 642 vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij; 643 } 644 } 645 PetscCall(MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES)); 646 } 647 } 648 } 649 } 650 } 651 if (loc_elem==-1) { 652 PetscCall(PetscPrintf(ctx->comm,"CPU Element matrix\n")); 653 for (int d = 0; d < totDim; ++d) { 654 for (int f = 0; f < totDim; ++f) PetscCall(PetscPrintf(ctx->comm," %12.5e", PetscRealPart(elemMat[d*totDim + f]))); 655 PetscCall(PetscPrintf(ctx->comm,"\n")); 656 } 657 exit(12); 658 } 659 PetscCall(PetscFree(elemMat)); 660 } /* grid */ 661 } /* outer element & batch loop */ 662 if (shift==0.0) { // mass 663 PetscCall(PetscFree4(ff, dudx, dudy, dudz)); 664 } 665 if (!container) { // 'CPU' assembly move nest matrix to global JacP 666 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP 667 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 668 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid]; 669 PetscInt nloc, nzl, colbuf[1024], row; 670 const PetscInt *cols; 671 const PetscScalar *vals; 672 Mat B = subJ[ LAND_PACK_IDX(b_id,grid) ]; 673 PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY)); 674 PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY)); 675 PetscCall(MatGetSize(B, &nloc, NULL)); 676 for (int i=0 ; i<nloc ; i++) { 677 PetscCall(MatGetRow(B,i,&nzl,&cols,&vals)); 678 PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) B), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl); 679 for (int j=0; j<nzl; j++) colbuf[j] = moffset + cols[j]; 680 row = moffset + i; 681 PetscCall(MatSetValues(JacP,1,&row,nzl,colbuf,vals,ADD_VALUES)); 682 PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals)); 683 } 684 PetscCall(MatDestroy(&B)); 685 } 686 } 687 } 688 if (coo_vals) { 689 PetscCall(MatSetValuesCOO(JacP,coo_vals,ADD_VALUES)); 690 PetscCall(PetscFree(coo_vals)); 691 } 692 } /* CPU version */ 693 PetscCall(MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY)); 694 PetscCall(MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY)); 695 /* clean up */ 696 if (cellClosure) { 697 PetscCall(PetscFree(cellClosure)); 698 } 699 if (xdata) { 700 PetscCall(VecRestoreArrayReadAndMemType(a_X,&xdata)); 701 } 702 PetscFunctionReturn(0); 703 } 704 705 #if defined(LANDAU_ADD_BCS) 706 static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux, 707 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 708 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 709 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[]) 710 { 711 uexact[0] = 0; 712 } 713 #endif 714 715 #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }} 716 static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y, 717 PetscReal *outX, PetscReal *outY) 718 { 719 PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact; 720 if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) { 721 *outX = x; *outY = y; 722 } else { 723 const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr; 724 PetscReal cth,sth,xyprime[2],Rth[2][2],rotcos,newrr; 725 if (num_sections==2) { 726 rotcos = 0.70710678118654; 727 outfact = 1.5; efact = 2.5; 728 /* rotate normalized vector into [-pi/4,pi/4) */ 729 if (sinphi >= 0.) { /* top cell, -pi/2 */ 730 cth = 0.707106781186548; sth = -0.707106781186548; 731 } else { /* bottom cell -pi/8 */ 732 cth = 0.707106781186548; sth = .707106781186548; 733 } 734 } else if (num_sections==3) { 735 rotcos = 0.86602540378443; 736 outfact = 1.5; efact = 2.5; 737 /* rotate normalized vector into [-pi/6,pi/6) */ 738 if (sinphi >= 0.5) { /* top cell, -pi/3 */ 739 cth = 0.5; sth = -0.866025403784439; 740 } else if (sinphi >= -.5) { /* mid cell 0 */ 741 cth = 1.; sth = .0; 742 } else { /* bottom cell +pi/3 */ 743 cth = 0.5; sth = 0.866025403784439; 744 } 745 } else if (num_sections==4) { 746 rotcos = 0.9238795325112; 747 outfact = 1.5; efact = 3; 748 /* rotate normalized vector into [-pi/8,pi/8) */ 749 if (sinphi >= 0.707106781186548) { /* top cell, -3pi/8 */ 750 cth = 0.38268343236509; sth = -0.923879532511287; 751 } else if (sinphi >= 0.) { /* mid top cell -pi/8 */ 752 cth = 0.923879532511287; sth = -.38268343236509; 753 } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */ 754 cth = 0.923879532511287; sth = 0.38268343236509; 755 } else { /* bottom cell + 3pi/8 */ 756 cth = 0.38268343236509; sth = .923879532511287; 757 } 758 } else { 759 cth = 0.; sth = 0.; rotcos = 0; efact = 0; 760 } 761 Rth[0][0] = cth; Rth[0][1] =-sth; 762 Rth[1][0] = sth; Rth[1][1] = cth; 763 MATVEC2(Rth,xy,xyprime); 764 if (num_sections==2) { 765 newrr = xyprime[0]/rotcos; 766 } else { 767 PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin; 768 PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax; 769 newrr = rin + routfrac*nroutmax; 770 } 771 *outX = cosphi*newrr; *outY = sinphi*newrr; 772 /* grade */ 773 PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY)); 774 if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */ 775 else { rs = r1; re = r2; fact = efact;} /* electron zone */ 776 tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr; 777 *outX *= tt; 778 *outY *= tt; 779 } 780 } 781 782 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx) 783 { 784 LandauCtx *ctx = (LandauCtx*)a_ctx; 785 PetscReal r = abc[0], z = abc[1]; 786 if (ctx->inflate) { 787 PetscReal absR, absZ; 788 absR = PetscAbs(r); 789 absZ = PetscAbs(z); 790 CircleInflate(ctx->i_radius[0],ctx->e_radius,ctx->radius[0],ctx->num_sections,absR,absZ,&absR,&absZ); // wrong: how do I know what grid I am on? 791 r = (r > 0) ? absR : -absR; 792 z = (z > 0) ? absZ : -absZ; 793 } 794 xyz[0] = r; 795 xyz[1] = z; 796 if (dim==3) xyz[2] = abc[2]; 797 798 PetscFunctionReturn(0); 799 } 800 801 /* create DMComposite of meshes for each species group */ 802 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack) 803 { 804 PetscFunctionBegin; 805 { /* p4est, quads */ 806 /* Create plex mesh of Landau domain */ 807 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 808 PetscReal radius = ctx->radius[grid]; 809 if (!ctx->sphere) { 810 PetscInt cells[] = {2,2,2}; 811 PetscReal lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius}; 812 DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE}; 813 if (dim==2) { lo[0] = 0; cells[0] /* = cells[1] */ = 1; } 814 PetscCall(DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid])); // todo: make composite and create dm[grid] here 815 PetscCall(DMLocalizeCoordinates(ctx->plex[grid])); /* needed for periodic */ 816 if (dim==3) PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "cube")); 817 else PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "half-plane")); 818 } else if (dim==2) { // sphere is all wrong. should just have one inner radius 819 PetscInt numCells,cells[16][4],i,j; 820 PetscInt numVerts; 821 PetscReal inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius; 822 PetscReal *flatCoords = NULL; 823 PetscInt *flatCells = NULL, *pcell; 824 if (ctx->num_sections==2) { 825 #if 1 826 numCells = 5; 827 numVerts = 10; 828 int cells2[][4] = { {0,1,4,3}, 829 {1,2,5,4}, 830 {3,4,7,6}, 831 {4,5,8,7}, 832 {6,7,8,9} }; 833 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 834 PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells)); 835 { 836 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 837 for (j = 0; j < numVerts-1; j++) { 838 PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2; 839 PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid]; 840 z = rad * PetscSinReal(theta); 841 coords[j][1] = z; 842 r = rad * PetscCosReal(theta); 843 coords[j][0] = r; 844 } 845 coords[numVerts-1][0] = coords[numVerts-1][1] = 0; 846 } 847 #else 848 numCells = 4; 849 numVerts = 8; 850 static int cells2[][4] = {{0,1,2,3}, 851 {4,5,1,0}, 852 {5,6,2,1}, 853 {6,7,3,2}}; 854 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 855 PetscCall(loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells)); 856 { 857 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 858 PetscInt j; 859 for (j = 0; j < 8; j++) { 860 PetscReal z, r; 861 PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.; 862 PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0); 863 z = rad * PetscSinReal(theta); 864 coords[j][1] = z; 865 r = rad * PetscCosReal(theta); 866 coords[j][0] = r; 867 } 868 } 869 #endif 870 } else if (ctx->num_sections==3) { 871 numCells = 7; 872 numVerts = 12; 873 int cells2[][4] = { {0,1,5,4}, 874 {1,2,6,5}, 875 {2,3,7,6}, 876 {4,5,9,8}, 877 {5,6,10,9}, 878 {6,7,11,10}, 879 {8,9,10,11} }; 880 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 881 PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells)); 882 { 883 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 884 for (j = 0; j < numVerts; j++) { 885 PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3; 886 PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid]; 887 z = rad * PetscSinReal(theta); 888 coords[j][1] = z; 889 r = rad * PetscCosReal(theta); 890 coords[j][0] = r; 891 } 892 } 893 } else if (ctx->num_sections==4) { 894 numCells = 10; 895 numVerts = 16; 896 int cells2[][4] = { {0,1,6,5}, 897 {1,2,7,6}, 898 {2,3,8,7}, 899 {3,4,9,8}, 900 {5,6,11,10}, 901 {6,7,12,11}, 902 {7,8,13,12}, 903 {8,9,14,13}, 904 {10,11,12,15}, 905 {12,13,14,15}}; 906 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 907 PetscCall(PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells)); 908 { 909 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 910 for (j = 0; j < numVerts-1; j++) { 911 PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4; 912 PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid]; 913 z = rad * PetscSinReal(theta); 914 coords[j][1] = z; 915 r = rad * PetscCosReal(theta); 916 coords[j][0] = r; 917 } 918 coords[numVerts-1][0] = coords[numVerts-1][1] = 0; 919 } 920 } else { 921 numCells = 0; 922 numVerts = 0; 923 } 924 for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) { 925 pcell[0] = cells[j][0]; pcell[1] = cells[j][1]; 926 pcell[2] = cells[j][2]; pcell[3] = cells[j][3]; 927 } 928 PetscCall(DMPlexCreateFromCellListPetsc(comm_self,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,&ctx->plex[grid])); 929 PetscCall(PetscFree2(flatCoords,flatCells)); 930 PetscCall(PetscObjectSetName((PetscObject) ctx->plex[grid], "semi-circle")); 931 } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere"); 932 933 PetscCall(DMSetFromOptions(ctx->plex[grid])); 934 } // grid loop 935 PetscCall(PetscObjectSetOptionsPrefix((PetscObject)pack,prefix)); 936 PetscCall(DMSetFromOptions(pack)); 937 938 { /* convert to p4est (or whatever), wait for discretization to create pack */ 939 char convType[256]; 940 PetscBool flg; 941 PetscErrorCode ierr; 942 943 ierr = PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");PetscCall(ierr); 944 PetscCall(PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (p4est)","plexland.c",DMList,DMPLEX,convType,256,&flg)); 945 ierr = PetscOptionsEnd();PetscCall(ierr); 946 if (flg) { 947 ctx->use_p4est = PETSC_TRUE; /* flag for Forest */ 948 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 949 DM dmforest; 950 PetscCall(DMConvert(ctx->plex[grid],convType,&dmforest)); 951 if (dmforest) { 952 PetscBool isForest; 953 PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix)); 954 PetscCall(DMIsForest(dmforest,&isForest)); 955 if (isForest) { 956 if (ctx->sphere && ctx->inflate) { 957 PetscCall(DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx)); 958 } 959 PetscCall(DMDestroy(&ctx->plex[grid])); 960 ctx->plex[grid] = dmforest; // Forest for adaptivity 961 } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?"); 962 } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?"); 963 } 964 } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */ 965 } 966 } /* non-file */ 967 PetscCall(DMSetDimension(pack, dim)); 968 PetscCall(PetscObjectSetName((PetscObject) pack, "Mesh")); 969 PetscCall(DMSetApplicationContext(pack, ctx)); 970 971 PetscFunctionReturn(0); 972 } 973 974 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx) 975 { 976 PetscInt ii,i0; 977 char buf[256]; 978 PetscSection section; 979 980 PetscFunctionBegin; 981 for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) { 982 if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "e")); 983 else PetscCall(PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii)); 984 /* Setup Discretization - FEM */ 985 PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii])); 986 PetscCall(PetscObjectSetName((PetscObject) ctx->fe[ii], buf)); 987 PetscCall(DMSetField(ctx->plex[grid], i0, NULL, (PetscObject) ctx->fe[ii])); 988 } 989 PetscCall(DMCreateDS(ctx->plex[grid])); 990 PetscCall(DMGetSection(ctx->plex[grid], §ion)); 991 for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) { 992 if (ii==0) PetscCall(PetscSNPrintf(buf, sizeof(buf), "se")); 993 else PetscCall(PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii)); 994 PetscCall(PetscSectionSetComponentName(section, i0, 0, buf)); 995 } 996 PetscFunctionReturn(0); 997 } 998 999 /* Define a Maxwellian function for testing out the operator. */ 1000 1001 /* Using cartesian velocity space coordinates, the particle */ 1002 /* density, [1/m^3], is defined according to */ 1003 1004 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */ 1005 1006 /* Using some constant, c, we normalize the velocity vector into a */ 1007 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */ 1008 1009 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */ 1010 1011 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */ 1012 /* for finding the particle within the interval in a box dx^3 around x is */ 1013 1014 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */ 1015 1016 typedef struct { 1017 PetscReal v_0; 1018 PetscReal kT_m; 1019 PetscReal n; 1020 PetscReal shift; 1021 } MaxwellianCtx; 1022 1023 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 1024 { 1025 MaxwellianCtx *mctx = (MaxwellianCtx*)actx; 1026 PetscInt i; 1027 PetscReal v2 = 0, theta = 2*mctx->kT_m/(mctx->v_0*mctx->v_0); /* theta = 2kT/mc^2 */ 1028 PetscFunctionBegin; 1029 /* compute the exponents, v^2 */ 1030 for (i = 0; i < dim; ++i) v2 += x[i]*x[i]; 1031 /* evaluate the Maxwellian */ 1032 u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta)); 1033 if (mctx->shift!=0.) { 1034 v2 = 0; 1035 for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i]; 1036 v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift); 1037 /* evaluate the shifted Maxwellian */ 1038 u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta)); 1039 } 1040 PetscFunctionReturn(0); 1041 } 1042 1043 /*@ 1044 DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state 1045 1046 Collective on X 1047 1048 Input Parameters: 1049 . dm - The mesh (local) 1050 + time - Current time 1051 - temps - Temperatures of each species (global) 1052 . ns - Number density of each species (global) 1053 - grid - index into current grid - just used for offset into temp and ns 1054 + actx - Landau context 1055 1056 Output Parameter: 1057 . X - The state (local to this grid) 1058 1059 Level: beginner 1060 1061 .keywords: mesh 1062 .seealso: DMPlexLandauCreateVelocitySpace() 1063 @*/ 1064 PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, void *actx) 1065 { 1066 LandauCtx *ctx = (LandauCtx*)actx; 1067 PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *); 1068 PetscInt dim; 1069 MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES]; 1070 1071 PetscFunctionBegin; 1072 PetscCall(DMGetDimension(dm, &dim)); 1073 if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx)); 1074 for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) { 1075 mctxs[i0] = &data[i0]; 1076 data[i0].v_0 = ctx->v_0; // v_0 same for all grids 1077 data[i0].kT_m = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */ 1078 data[i0].n = ns[ii] * (1+(double)b_id/100.0); // make solves a little different to mimic application, n[0] use for Conner-Hastie 1079 initu[i0] = maxwellian; 1080 data[i0].shift = 0; 1081 } 1082 data[0].shift = ctx->electronShift; 1083 /* need to make ADD_ALL_VALUES work - TODO */ 1084 PetscCall(DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X)); 1085 PetscFunctionReturn(0); 1086 } 1087 1088 /* 1089 LandauSetInitialCondition - Addes Maxwellians with context 1090 1091 Collective on X 1092 1093 Input Parameters: 1094 . dm - The mesh 1095 - grid - index into current grid - just used for offset into temp and ns 1096 + actx - Landau context with T and n 1097 1098 Output Parameter: 1099 . X - The state 1100 1101 Level: beginner 1102 1103 .keywords: mesh 1104 .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauAddMaxwellians() 1105 */ 1106 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, void *actx) 1107 { 1108 LandauCtx *ctx = (LandauCtx*)actx; 1109 PetscFunctionBegin; 1110 if (!ctx) PetscCall(DMGetApplicationContext(dm, &ctx)); 1111 PetscCall(VecZeroEntries(X)); 1112 PetscCall(DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, ctx)); 1113 PetscFunctionReturn(0); 1114 } 1115 1116 // adapt a level once. Forest in/out 1117 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest) 1118 { 1119 DM forest, plex, adaptedDM = NULL; 1120 PetscDS prob; 1121 PetscBool isForest; 1122 PetscQuadrature quad; 1123 PetscInt Nq, *Nb, cStart, cEnd, c, dim, qj, k; 1124 DMLabel adaptLabel = NULL; 1125 1126 PetscFunctionBegin; 1127 forest = ctx->plex[grid]; 1128 PetscCall(DMCreateDS(forest)); 1129 PetscCall(DMGetDS(forest, &prob)); 1130 PetscCall(DMGetDimension(forest, &dim)); 1131 PetscCall(DMIsForest(forest, &isForest)); 1132 PetscCheck(isForest,ctx->comm,PETSC_ERR_ARG_WRONG,"! Forest"); 1133 PetscCall(DMConvert(forest, DMPLEX, &plex)); 1134 PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd)); 1135 PetscCall(DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel)); 1136 PetscCall(PetscFEGetQuadrature(fem, &quad)); 1137 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL)); 1138 PetscCheck(Nq <=LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%" PetscInt_FMT ")",Nq,LANDAU_MAX_NQ); 1139 PetscCall(PetscDSGetDimensions(prob, &Nb)); 1140 if (type==4) { 1141 for (c = cStart; c < cEnd; c++) { 1142 PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); 1143 } 1144 PetscCall(PetscInfo(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM")); 1145 } else if (type==2) { 1146 PetscInt rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2; 1147 PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY; 1148 for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; } 1149 for (c = cStart; c < cEnd; c++) { 1150 PetscReal tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ]; 1151 PetscCall(DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ)); 1152 for (qj = 0; qj < Nq; ++qj) { 1153 tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0)); 1154 r = PetscSqrtReal(tt); 1155 if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) { 1156 minRad = r; 1157 nr = 0; 1158 rCellIdx[nr++]= c; 1159 PetscCall(PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", r, c, qj+1, Nq)); 1160 } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) { 1161 for (k=0;k<nr;k++) if (c == rCellIdx[k]) break; 1162 if (k==nr) { 1163 rCellIdx[nr++]= c; 1164 PetscCall(PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", r, c, qj+1, Nq, r-minRad)); 1165 } 1166 } 1167 if (ctx->sphere) { 1168 if ((tt=r-ctx->e_radius) > 0) { 1169 PetscCall(PetscInfo(sol, "\t\t\t %" PetscInt_FMT " cell r=%g\n",c,tt)); 1170 if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) { 1171 eMinRad = tt; 1172 eMaxIdx = 0; 1173 eCellIdx[eMaxIdx++] = c; 1174 } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) { 1175 eCellIdx[eMaxIdx++] = c; 1176 } 1177 } 1178 if ((tt=r-ctx->i_radius[grid]) > 0) { 1179 if (tt < iMinRad - 1.e-5) { 1180 iMinRad = tt; 1181 iMaxIdx = 0; 1182 iCellIdx[iMaxIdx++] = c; 1183 } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) { 1184 iCellIdx[iMaxIdx++] = c; 1185 } 1186 } 1187 } 1188 } 1189 } 1190 for (k=0;k<nr;k++) { 1191 PetscCall(DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE)); 1192 } 1193 if (ctx->sphere) { 1194 for (c = 0; c < eMaxIdx; c++) { 1195 PetscCall(DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE)); 1196 PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",eCellIdx[c],eMinRad)); 1197 } 1198 for (c = 0; c < iMaxIdx; c++) { 1199 PetscCall(DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE)); 1200 PetscCall(PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %" PetscInt_FMT " r=%g\n","adaptToleranceFEM",iCellIdx[c],iMinRad)); 1201 } 1202 } 1203 PetscCall(PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],minRad)); 1204 } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */ 1205 PetscScalar *coef = NULL; 1206 Vec coords; 1207 PetscInt csize,Nv,d,nz; 1208 DM cdm; 1209 PetscSection cs; 1210 PetscCall(DMGetCoordinatesLocal(forest, &coords)); 1211 PetscCall(DMGetCoordinateDM(forest, &cdm)); 1212 PetscCall(DMGetLocalSection(cdm, &cs)); 1213 for (c = cStart; c < cEnd; c++) { 1214 PetscInt doit = 0, outside = 0; 1215 PetscCall(DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef)); 1216 Nv = csize/dim; 1217 for (nz = d = 0; d < Nv; d++) { 1218 PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0); 1219 x = PetscSqrtReal(x); 1220 if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1; /* refine origin */ 1221 else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++; /* first pass don't refine bottom */ 1222 else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */ 1223 else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */ 1224 if (x < PETSC_MACHINE_EPSILON*10.) nz++; 1225 } 1226 PetscCall(DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef)); 1227 if (doit || (outside<Nv && nz)) { 1228 PetscCall(DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE)); 1229 } 1230 } 1231 PetscCall(PetscInfo(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM")); 1232 } 1233 PetscCall(DMDestroy(&plex)); 1234 PetscCall(DMAdaptLabel(forest, adaptLabel, &adaptedDM)); 1235 PetscCall(DMLabelDestroy(&adaptLabel)); 1236 *newForest = adaptedDM; 1237 if (adaptedDM) { 1238 if (isForest) { 1239 PetscCall(DMForestSetAdaptivityForest(adaptedDM,NULL)); // ???? 1240 } else exit(33); // ??????? 1241 PetscCall(DMConvert(adaptedDM, DMPLEX, &plex)); 1242 PetscCall(DMPlexGetHeightStratum(plex,0,&cStart,&cEnd)); 1243 PetscCall(PetscInfo(sol, "\tPhase: adaptToleranceFEM: %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart))); 1244 PetscCall(DMDestroy(&plex)); 1245 } else *newForest = NULL; 1246 PetscFunctionReturn(0); 1247 } 1248 1249 // forest goes in (ctx->plex[grid]), plex comes out 1250 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu) 1251 { 1252 PetscInt adaptIter; 1253 1254 PetscFunctionBegin; 1255 PetscInt type, limits[5] = {(grid==0) ? ctx->numRERefine : 0, (grid==0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid==0) ? ctx->nZRefine2 : 0,ctx->postAMRRefine[grid]}; 1256 for (type=0;type<5;type++) { 1257 for (adaptIter = 0; adaptIter<limits[type];adaptIter++) { 1258 DM newForest = NULL; 1259 PetscCall(adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest)); 1260 if (newForest) { 1261 PetscCall(DMDestroy(&ctx->plex[grid])); 1262 PetscCall(VecDestroy(uu)); 1263 PetscCall(DMCreateGlobalVector(newForest,uu)); 1264 PetscCall(PetscObjectSetName((PetscObject) *uu, "uAMR")); 1265 PetscCall(LandauSetInitialCondition(newForest, *uu, grid, 0, ctx)); 1266 ctx->plex[grid] = newForest; 1267 } else { 1268 exit(4); // can happen with no AMR and post refinement 1269 } 1270 } 1271 } 1272 PetscFunctionReturn(0); 1273 } 1274 1275 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[]) 1276 { 1277 PetscErrorCode ierr; 1278 PetscBool flg, sph_flg; 1279 PetscInt ii,nt,nm,nc,num_species_grid[LANDAU_MAX_GRIDS]; 1280 PetscReal v0_grid[LANDAU_MAX_GRIDS]; 1281 DM dummy; 1282 1283 PetscFunctionBegin; 1284 PetscCall(DMCreate(ctx->comm,&dummy)); 1285 /* get options - initialize context */ 1286 ctx->verbose = 1; // should be 0 for silent compliance 1287 #if defined(PETSC_HAVE_THREADSAFETY) 1288 ctx->batch_sz = PetscNumOMPThreads; 1289 #else 1290 ctx->batch_sz = 1; 1291 #endif 1292 ctx->batch_view_idx = 0; 1293 ctx->interpolate = PETSC_TRUE; 1294 ctx->gpu_assembly = PETSC_TRUE; 1295 ctx->norm_state = 0; 1296 ctx->electronShift = 0; 1297 ctx->M = NULL; 1298 ctx->J = NULL; 1299 /* geometry and grids */ 1300 ctx->sphere = PETSC_FALSE; 1301 ctx->inflate = PETSC_FALSE; 1302 ctx->use_p4est = PETSC_FALSE; 1303 ctx->num_sections = 3; /* 2, 3 or 4 */ 1304 for (PetscInt grid=0;grid<LANDAU_MAX_GRIDS;grid++) { 1305 ctx->radius[grid] = 5.; /* thermal radius (velocity) */ 1306 ctx->numAMRRefine[grid] = 5; 1307 ctx->postAMRRefine[grid] = 0; 1308 ctx->species_offset[grid+1] = 1; // one species default 1309 num_species_grid[grid] = 0; 1310 ctx->plex[grid] = NULL; /* cache as expensive to Convert */ 1311 } 1312 ctx->species_offset[0] = 0; 1313 ctx->re_radius = 0.; 1314 ctx->vperp0_radius1 = 0; 1315 ctx->vperp0_radius2 = 0; 1316 ctx->nZRefine1 = 0; 1317 ctx->nZRefine2 = 0; 1318 ctx->numRERefine = 0; 1319 num_species_grid[0] = 1; // one species default 1320 /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */ 1321 ctx->charges[0] = -1; /* electron charge (MKS) */ 1322 ctx->masses[0] = 1/1835.469965278441013; /* temporary value in proton mass */ 1323 ctx->n[0] = 1; 1324 ctx->v_0 = 1; /* thermal velocity, we could start with a scale != 1 */ 1325 ctx->thermal_temps[0] = 1; 1326 /* constants, etc. */ 1327 ctx->epsilon0 = 8.8542e-12; /* permittivity of free space (MKS) F/m */ 1328 ctx->k = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */ 1329 ctx->lnLam = 10; /* cross section ratio large - small angle collisions */ 1330 ctx->n_0 = 1.e20; /* typical plasma n, but could set it to 1 */ 1331 ctx->Ez = 0; 1332 for (PetscInt grid=0;grid<LANDAU_NUM_TIMERS;grid++) ctx->times[grid] = 0; 1333 ctx->use_matrix_mass = PETSC_FALSE; 1334 ctx->use_relativistic_corrections = PETSC_FALSE; 1335 ctx->use_energy_tensor_trick = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */ 1336 ctx->SData_d.w = NULL; 1337 ctx->SData_d.x = NULL; 1338 ctx->SData_d.y = NULL; 1339 ctx->SData_d.z = NULL; 1340 ctx->SData_d.invJ = NULL; 1341 ctx->jacobian_field_major_order = PETSC_FALSE; 1342 ctx->SData_d.coo_elem_offsets = NULL; 1343 ctx->SData_d.coo_elem_point_offsets = NULL; 1344 ctx->coo_assembly = PETSC_FALSE; 1345 ctx->SData_d.coo_elem_fullNb = NULL; 1346 ctx->SData_d.coo_size = 0; 1347 ierr = PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");PetscCall(ierr); 1348 { 1349 char opstring[256]; 1350 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 1351 ctx->deviceType = LANDAU_KOKKOS; 1352 PetscCall(PetscStrcpy(opstring,"kokkos")); 1353 #elif defined(PETSC_HAVE_CUDA) 1354 ctx->deviceType = LANDAU_CUDA; 1355 PetscCall(PetscStrcpy(opstring,"cuda")); 1356 #else 1357 ctx->deviceType = LANDAU_CPU; 1358 PetscCall(PetscStrcpy(opstring,"cpu")); 1359 #endif 1360 PetscCall(PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,sizeof(opstring),NULL)); 1361 PetscCall(PetscStrcmp("cpu",opstring,&flg)); 1362 if (flg) { 1363 ctx->deviceType = LANDAU_CPU; 1364 } else { 1365 PetscCall(PetscStrcmp("cuda",opstring,&flg)); 1366 if (flg) { 1367 ctx->deviceType = LANDAU_CUDA; 1368 } else { 1369 PetscCall(PetscStrcmp("kokkos",opstring,&flg)); 1370 if (flg) ctx->deviceType = LANDAU_KOKKOS; 1371 else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring); 1372 } 1373 } 1374 } 1375 PetscCall(PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL)); 1376 PetscCall(PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL)); 1377 PetscCall(PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL)); 1378 PetscCheck(LANDAU_MAX_BATCH_SZ >= ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"LANDAU_MAX_BATCH_SZ %" PetscInt_FMT " < ctx->batch_sz %" PetscInt_FMT,LANDAU_MAX_BATCH_SZ,ctx->batch_sz); 1379 PetscCall(PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL)); 1380 PetscCheck(ctx->batch_view_idx < ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"-ctx->batch_view_idx %" PetscInt_FMT " > ctx->batch_sz %" PetscInt_FMT,ctx->batch_view_idx,ctx->batch_sz); 1381 PetscCall(PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie critical field","plexland.c",ctx->Ez,&ctx->Ez, NULL)); 1382 PetscCall(PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL)); 1383 PetscCall(PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL)); 1384 PetscCall(PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL)); 1385 PetscCall(PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL)); 1386 PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, &ctx->use_energy_tensor_trick, NULL)); 1387 1388 /* get num species with temperature, set defaults */ 1389 for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) { 1390 ctx->thermal_temps[ii] = 1; 1391 ctx->charges[ii] = 1; 1392 ctx->masses[ii] = 1; 1393 ctx->n[ii] = 1; 1394 } 1395 nt = LANDAU_MAX_SPECIES; 1396 PetscCall(PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg)); 1397 if (flg) { 1398 PetscCall(PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n",nt)); 1399 ctx->num_species = nt; 1400 } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species"); 1401 for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */ 1402 nm = LANDAU_MAX_SPECIES-1; 1403 PetscCall(PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg)); 1404 if (flg && nm != ctx->num_species-1) { 1405 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %" PetscInt_FMT " != num species %" PetscInt_FMT "",nm,ctx->num_species-1); 1406 } 1407 nm = LANDAU_MAX_SPECIES; 1408 PetscCall(PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg)); 1409 PetscCheckFalse(flg && nm != ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %" PetscInt_FMT " != num species %" PetscInt_FMT "",nm,ctx->num_species); 1410 for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */ 1411 ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */ 1412 ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */ 1413 nc = LANDAU_MAX_SPECIES-1; 1414 PetscCall(PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg)); 1415 if (flg) PetscCheck(nc == ctx->num_species-1,ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %" PetscInt_FMT " != num species %" PetscInt_FMT,nc,ctx->num_species-1); 1416 for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */ 1417 /* geometry and grids */ 1418 nt = LANDAU_MAX_GRIDS; 1419 PetscCall(PetscOptionsIntArray("-dm_landau_num_species_grid","Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid","plexland.c", num_species_grid, &nt, &flg)); 1420 if (flg) { 1421 ctx->num_grids = nt; 1422 for (ii=nt=0;ii<ctx->num_grids;ii++) nt += num_species_grid[ii]; 1423 PetscCheck(ctx->num_species == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_num_species_grid: sum %" PetscInt_FMT " != num_species = %" PetscInt_FMT ". %" PetscInt_FMT " grids (check that number of grids <= LANDAU_MAX_GRIDS = %" PetscInt_FMT ")",nt,ctx->num_species,ctx->num_grids,LANDAU_MAX_GRIDS); 1424 } else { 1425 ctx->num_grids = 1; // go back to a single grid run 1426 num_species_grid[0] = ctx->num_species; 1427 } 1428 for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids ; ii++) ctx->species_offset[ii+1] = ctx->species_offset[ii] + num_species_grid[ii]; 1429 PetscCheck(ctx->species_offset[ctx->num_grids] == ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"ctx->species_offset[ctx->num_grids] %" PetscInt_FMT " != ctx->num_species = %" PetscInt_FMT " ???????????",ctx->species_offset[ctx->num_grids],ctx->num_species); 1430 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 1431 int iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid 1432 v0_grid[grid] = PetscSqrtReal(ctx->k*ctx->thermal_temps[iii]/ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */ 1433 } 1434 ii = 0; 1435 PetscCall(PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL)); 1436 ctx->v_0 = v0_grid[ii]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */ 1437 ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */ 1438 /* domain */ 1439 nt = LANDAU_MAX_GRIDS; 1440 PetscCall(PetscOptionsRealArray("-dm_landau_domain_radius","Phase space size in units of thermal velocity of grid","plexland.c",ctx->radius,&nt, &flg)); 1441 if (flg) PetscCheck(nt >= ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_domain_radius: given %" PetscInt_FMT " radius != number grids %" PetscInt_FMT,nt,ctx->num_grids); 1442 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 1443 if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */ 1444 if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75; 1445 else ctx->radius[grid] = -ctx->radius[grid]; 1446 ctx->radius[grid] = ctx->radius[grid]*SPEED_OF_LIGHT/ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid) 1447 PetscCall(PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n",ctx->radius[grid],grid)); 1448 } 1449 ctx->radius[grid] *= v0_grid[grid]/ctx->v_0; // scale domain by thermal radius relative to v_0 1450 } 1451 /* amr parametres */ 1452 nt = LANDAU_MAX_GRIDS; 1453 PetscCall(PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg)); 1454 PetscCheckFalse(flg && nt < ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_amr_levels_max: given %" PetscInt_FMT " != number grids %" PetscInt_FMT,nt,ctx->num_grids); 1455 nt = LANDAU_MAX_GRIDS; 1456 PetscCall(PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg)); 1457 for (ii=1;ii<ctx->num_grids;ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now 1458 PetscCall(PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg)); 1459 PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine1", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg)); 1460 PetscCall(PetscOptionsInt("-dm_landau_amr_z_refine2", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg)); 1461 PetscCall(PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg)); 1462 PetscCall(PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg)); 1463 PetscCall(PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg)); 1464 /* spherical domain (not used) */ 1465 PetscCall(PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL)); 1466 PetscCall(PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg)); 1467 PetscCall(PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg)); 1468 PetscCall(PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius, &ctx->e_radius, &flg)); 1469 if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */ 1470 if (!flg) { 1471 ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0; 1472 } 1473 nt = LANDAU_MAX_GRIDS; 1474 PetscCall(PetscOptionsRealArray("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius, &nt, &flg)); 1475 if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; 1476 if (!flg) { 1477 ctx->i_radius[0] = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; // need to correct for ion grid domain 1478 } 1479 if (flg) PetscCheck(ctx->num_grids == nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_i_radius: %" PetscInt_FMT " != num_species = %" PetscInt_FMT,nt,ctx->num_grids); 1480 if (ctx->sphere) PetscCheck(ctx->e_radius > ctx->i_radius[0],ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",ctx->i_radius[0],ctx->e_radius,ctx->radius[0]); 1481 /* processing options */ 1482 PetscCall(PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL)); 1483 if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos 1484 PetscCall(PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL)); 1485 if (ctx->coo_assembly) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"COO assembly requires 'gpu assembly' even if Kokkos 'CPU' back-end %d",ctx->coo_assembly); 1486 } 1487 PetscCall(PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL)); 1488 if (ctx->jacobian_field_major_order) PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly"); 1489 ierr = PetscOptionsEnd();PetscCall(ierr); 1490 1491 for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0; 1492 if (ctx->verbose > 0) { 1493 PetscCall(PetscPrintf(ctx->comm, "masses: e=%10.3e; ions in proton mass units: %10.3e %10.3e ...\n",ctx->masses[0],ctx->masses[1]/1.6720e-27,ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0)); 1494 PetscCall(PetscPrintf(ctx->comm, "charges: e=%10.3e; charges in elementary units: %10.3e %10.3e\n", ctx->charges[0],-ctx->charges[1]/ctx->charges[0],ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0)); 1495 PetscCall(PetscPrintf(ctx->comm, "n: e: %10.3e i: %10.3e %10.3e\n", ctx->n[0],ctx->n[1],ctx->num_species>2 ? ctx->n[2] : 0)); 1496 PetscCall(PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %" PetscInt_FMT " batched\n", ctx->thermal_temps[0], ctx->thermal_temps[1], (ctx->num_species>2) ? ctx->thermal_temps[2] : 0, ctx->v_0, ctx->v_0/SPEED_OF_LIGHT, ctx->n_0, ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",ctx->batch_sz)); 1497 PetscCall(PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",0,ctx->radius[0],ctx->numAMRRefine[0])); 1498 for (ii=1;ii<ctx->num_grids;ii++) PetscCall(PetscPrintf(ctx->comm, ", %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ",ii,ctx->radius[ii],ctx->numAMRRefine[ii])); 1499 PetscCall(PetscPrintf(ctx->comm,"\n")); 1500 if (ctx->jacobian_field_major_order) { 1501 PetscCall(PetscPrintf(ctx->comm,"Using field major order for GPU Jacobian\n")); 1502 } else { 1503 PetscCall(PetscPrintf(ctx->comm,"Using default Plex order for all matrices\n")); 1504 } 1505 } 1506 PetscCall(DMDestroy(&dummy)); 1507 { 1508 PetscMPIInt rank; 1509 PetscCallMPI(MPI_Comm_rank(ctx->comm, &rank)); 1510 ctx->stage = 0; 1511 PetscCall(PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13])); /* 13 */ 1512 PetscCall(PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2])); /* 2 */ 1513 PetscCall(PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12])); /* 12 */ 1514 PetscCall(PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15])); /* 15 */ 1515 PetscCall(PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14])); /* 14 */ 1516 PetscCall(PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11])); /* 11 */ 1517 PetscCall(PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0])); /* 0 */ 1518 PetscCall(PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9])); /* 9 */ 1519 PetscCall(PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10])); /* 10 */ 1520 PetscCall(PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7])); /* 7 */ 1521 PetscCall(PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1])); /* 1 */ 1522 PetscCall(PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3])); /* 3 */ 1523 PetscCall(PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8])); /* 8 */ 1524 PetscCall(PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4])); /* 4 */ 1525 PetscCall(PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16])); /* 16 */ 1526 PetscCall(PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5])); /* 5 */ 1527 PetscCall(PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6])); /* 6 */ 1528 1529 if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */ 1530 PetscCall(PetscOptionsClearValue(NULL,"-snes_converged_reason")); 1531 PetscCall(PetscOptionsClearValue(NULL,"-ksp_converged_reason")); 1532 PetscCall(PetscOptionsClearValue(NULL,"-snes_monitor")); 1533 PetscCall(PetscOptionsClearValue(NULL,"-ksp_monitor")); 1534 PetscCall(PetscOptionsClearValue(NULL,"-ts_monitor")); 1535 PetscCall(PetscOptionsClearValue(NULL,"-ts_view")); 1536 PetscCall(PetscOptionsClearValue(NULL,"-ts_adapt_monitor")); 1537 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view")); 1538 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view")); 1539 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_dm_view")); 1540 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mass_view")); 1541 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_jacobian_view")); 1542 PetscCall(PetscOptionsClearValue(NULL,"-dm_landau_mat_view")); 1543 PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_converged_reason")); 1544 PetscCall(PetscOptionsClearValue(NULL,"-pc_bjkokkos_ksp_monitor")); 1545 PetscCall(PetscOptionsClearValue(NULL,"-")); 1546 PetscCall(PetscOptionsClearValue(NULL,"-info")); 1547 } 1548 } 1549 PetscFunctionReturn(0); 1550 } 1551 1552 static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx) 1553 { 1554 PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS]; 1555 PetscQuadrature quad; 1556 const PetscReal *quadWeights; 1557 PetscInt numCells[LANDAU_MAX_GRIDS],Nq,Nf[LANDAU_MAX_GRIDS], ncellsTot=0; 1558 PetscTabulation *Tf; 1559 PetscDS prob; 1560 1561 PetscFunctionBegin; 1562 PetscCall(DMGetDS(ctx->plex[0], &prob)); // same DS for all grids 1563 PetscCall(PetscDSGetTabulation(prob, &Tf)); // Bf, &Df same for all grids 1564 /* DS, Tab and quad is same on all grids */ 1565 PetscCheck(ctx->plex[0],ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 1566 PetscCall(PetscFEGetQuadrature(ctx->fe[0], &quad)); 1567 PetscCall(PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights)); 1568 PetscCheck(Nq <= LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %" PetscInt_FMT " > LANDAU_MAX_NQ (%" PetscInt_FMT ")",Nq,LANDAU_MAX_NQ); 1569 /* setup each grid */ 1570 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 1571 PetscInt cStart, cEnd; 1572 PetscCheck(ctx->plex[grid] != NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 1573 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1574 numCells[grid] = cEnd - cStart; // grids can have different topology 1575 PetscCall(DMGetLocalSection(ctx->plex[grid], §ion[grid])); 1576 PetscCall(DMGetGlobalSection(ctx->plex[grid], &globsection[grid])); 1577 PetscCall(PetscSectionGetNumFields(section[grid], &Nf[grid])); 1578 ncellsTot += numCells[grid]; 1579 } 1580 #define MAP_BF_SIZE (64*LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES) 1581 /* create GPU assembly data */ 1582 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 1583 PetscContainer container; 1584 PetscScalar elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat; 1585 pointInterpolationP4est pointMaps[MAP_BF_SIZE][LANDAU_MAX_Q_FACE]; 1586 P4estVertexMaps *maps; 1587 const PetscInt *plex_batch=NULL,Nb=Nq; // tensor elements; 1588 LandauIdx *coo_elem_offsets=NULL, *coo_elem_fullNb=NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = NULL; 1589 /* create GPU asssembly data */ 1590 PetscCall(PetscInfo(ctx->plex[0], "Make GPU maps %d\n",1)); 1591 PetscCall(PetscLogEventBegin(ctx->events[2],0,0,0,0)); 1592 PetscCall(PetscMalloc(sizeof(*maps)*ctx->num_grids, &maps)); 1593 1594 if (ctx->coo_assembly) { // setup COO assembly -- put COO metadata directly in ctx->SData_d 1595 PetscCall(PetscMalloc3(ncellsTot+1,&coo_elem_offsets,ncellsTot,&coo_elem_fullNb,ncellsTot, &coo_elem_point_offsets)); // array of integer pointers 1596 coo_elem_offsets[0] = 0; // finish later 1597 PetscCall(PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n",ncellsTot)); 1598 ctx->SData_d.coo_n_cellsTot = ncellsTot; 1599 ctx->SData_d.coo_elem_offsets = (void*)coo_elem_offsets; 1600 ctx->SData_d.coo_elem_fullNb = (void*)coo_elem_fullNb; 1601 ctx->SData_d.coo_elem_point_offsets = (void*)coo_elem_point_offsets; 1602 } else { 1603 ctx->SData_d.coo_elem_offsets = ctx->SData_d.coo_elem_fullNb = NULL; 1604 ctx->SData_d.coo_elem_point_offsets = NULL; 1605 ctx->SData_d.coo_n_cellsTot = 0; 1606 } 1607 1608 ctx->SData_d.coo_max_fullnb = 0; 1609 for (PetscInt grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) { 1610 PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc*Nq; 1611 if (grid_batch_is_inv[grid]) { 1612 PetscCall(ISGetIndices(grid_batch_is_inv[grid], &plex_batch)); 1613 } 1614 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1615 // make maps 1616 maps[grid].d_self = NULL; 1617 maps[grid].num_elements = numCells[grid]; 1618 maps[grid].num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q 1619 maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face,(double)(dim-1))+.001); // Q^2 1620 maps[grid].num_reduced = 0; 1621 maps[grid].deviceType = ctx->deviceType; 1622 maps[grid].numgrids = ctx->num_grids; 1623 // count reduced and get 1624 PetscCall(PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx)); 1625 for (int ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) { 1626 if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx+1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add 1627 for (int fieldA=0;fieldA<Nf[grid];fieldA++) { 1628 int fullNb = 0; 1629 for (int q = 0; q < Nb; ++q) { 1630 PetscInt numindices,*indices; 1631 PetscScalar *valuesOrig = elMat = elemMatrix; 1632 PetscCall(PetscArrayzero(elMat, totDim*totDim)); 1633 elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1; 1634 PetscCall(DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat)); 1635 for (PetscInt f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal 1636 if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) { 1637 // found it 1638 if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0 1639 if (plex_batch) { 1640 maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx) plex_batch[indices[f]]; 1641 } else { 1642 maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f]; 1643 } 1644 fullNb++; 1645 } else { //found a constraint 1646 int jj = 0; 1647 PetscReal sum = 0; 1648 const PetscInt ff = f; 1649 maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1 1650 1651 do { // constraints are continuous in Plex - exploit that here 1652 int ii; // get 'scale' 1653 for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value 1654 if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not 1655 pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]); 1656 } 1657 } 1658 sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic 1659 // get 'gid' 1660 if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps 1661 else { 1662 if (plex_batch) { 1663 pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]]; 1664 } else { 1665 pointMaps[maps[grid].num_reduced][jj].gid = indices[f]; 1666 } 1667 fullNb++; 1668 } 1669 } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end 1670 while (jj < maps[grid].num_face) { 1671 pointMaps[maps[grid].num_reduced][jj].scale = 0; 1672 pointMaps[maps[grid].num_reduced][jj].gid = -1; 1673 jj++; 1674 } 1675 if (PetscAbs(sum-1.0) > 10*PETSC_MACHINE_EPSILON) { // debug 1676 int d,f; 1677 PetscReal tmp = 0; 1678 PetscCall(PetscPrintf(PETSC_COMM_SELF,"\t\t%" PetscInt_FMT ".%" PetscInt_FMT ".%" PetscInt_FMT ") ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%" PetscInt_FMT ")\n",eidx,q,fieldA,sum,LANDAU_MAX_Q_FACE,maps[grid].num_face)); 1679 for (d = 0, tmp = 0; d < numindices; ++d) { 1680 if (tmp!=0 && PetscAbs(tmp-1.0) > 10*PETSC_MACHINE_EPSILON) PetscCall(PetscPrintf(PETSC_COMM_WORLD,"%3" PetscInt_FMT ") %3" PetscInt_FMT ": ",d,indices[d])); 1681 for (f = 0; f < numindices; ++f) { 1682 tmp += PetscRealPart(elMat[d*numindices + f]); 1683 } 1684 if (tmp!=0) PetscCall(PetscPrintf(ctx->comm," | %22.16e\n",tmp)); 1685 } 1686 } 1687 maps[grid].num_reduced++; 1688 PetscCheck(maps[grid].num_reduced<MAP_BF_SIZE,PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %d",maps[grid].num_reduced,MAP_BF_SIZE); 1689 } 1690 break; 1691 } 1692 } 1693 // cleanup 1694 PetscCall(DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat)); 1695 if (elMat != valuesOrig) PetscCall(DMRestoreWorkArray(ctx->plex[grid], numindices*numindices, MPIU_SCALAR, &elMat)); 1696 } 1697 if (ctx->coo_assembly) { // setup COO assembly 1698 coo_elem_offsets[glb_elem_idx+1] += fullNb*fullNb; // one species block, adds a block for each species, on this element in this grid 1699 if (fieldA==0) { // cache full Nb for this element, on this grid per species 1700 coo_elem_fullNb[glb_elem_idx] = fullNb; 1701 if (fullNb>ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb; 1702 } else PetscCheck(coo_elem_fullNb[glb_elem_idx] == fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "full element size change with species %" PetscInt_FMT " %" PetscInt_FMT,coo_elem_fullNb[glb_elem_idx],fullNb); 1703 } 1704 } // field 1705 } // cell 1706 // allocate and copy point data maps[grid].gIdx[eidx][field][q] 1707 PetscCall(PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps)); 1708 for (int ej = 0; ej < maps[grid].num_reduced; ++ej) { 1709 for (int q = 0; q < maps[grid].num_face; ++q) { 1710 maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale; 1711 maps[grid].c_maps[ej][q].gid = pointMaps[ej][q].gid; 1712 } 1713 } 1714 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 1715 if (ctx->deviceType == LANDAU_KOKKOS) { 1716 PetscCall(LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid)); // imples Kokkos does 1717 } // else could be CUDA 1718 #endif 1719 #if defined(PETSC_HAVE_CUDA) 1720 if (ctx->deviceType == LANDAU_CUDA) { 1721 PetscCall(LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid)); 1722 } 1723 #endif 1724 if (plex_batch) { 1725 PetscCall(ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch)); 1726 PetscCall(ISDestroy(&grid_batch_is_inv[grid])); // we are done with this 1727 } 1728 } /* grids */ 1729 // finish COO 1730 if (ctx->coo_assembly) { // setup COO assembly 1731 PetscInt *oor, *ooc; 1732 ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot]*ctx->batch_sz; 1733 PetscCall(PetscMalloc2(ctx->SData_d.coo_size,&oor,ctx->SData_d.coo_size,&ooc)); 1734 for (int i=0;i<ctx->SData_d.coo_size;i++) oor[i] = ooc[i] = -1; 1735 // get 1736 for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) { 1737 for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) { 1738 const int fullNb = coo_elem_fullNb[glb_elem_idx]; 1739 const LandauIdx *const Idxs = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage 1740 coo_elem_point_offsets[glb_elem_idx][0] = 0; 1741 for (int f=0, cnt2=0;f<Nb;f++) { 1742 int idx = Idxs[f]; 1743 coo_elem_point_offsets[glb_elem_idx][f+1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last 1744 if (idx >= 0) { 1745 cnt2++; 1746 coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc 1747 } else { 1748 idx = -idx - 1; 1749 for (int q = 0 ; q < maps[grid].num_face; q++) { 1750 if (maps[grid].c_maps[idx][q].gid < 0) break; 1751 cnt2++; 1752 coo_elem_point_offsets[glb_elem_idx][f+1]++; // inc 1753 } 1754 } 1755 PetscCheck(cnt2 <= fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "wrong count %d < %d",fullNb,cnt2); 1756 } 1757 PetscCheck(coo_elem_point_offsets[glb_elem_idx][Nb]==fullNb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "coo_elem_point_offsets size %d != fullNb=%d",coo_elem_point_offsets[glb_elem_idx][Nb],fullNb); 1758 } 1759 } 1760 // set 1761 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 1762 for (int grid=0,glb_elem_idx=0;grid<ctx->num_grids;grid++) { 1763 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 1764 for (int ej = 0 ; ej < numCells[grid] ; ++ej, glb_elem_idx++) { 1765 const int fullNb = coo_elem_fullNb[glb_elem_idx],fullNb2=fullNb*fullNb; 1766 // set (i,j) 1767 for (int fieldA=0;fieldA<Nf[grid];fieldA++) { 1768 const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0]; 1769 int rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE]; 1770 for (int f = 0; f < Nb; ++f) { 1771 const int nr = coo_elem_point_offsets[glb_elem_idx][f+1] - coo_elem_point_offsets[glb_elem_idx][f]; 1772 if (nr==1) rows[0] = Idxs[f]; 1773 else { 1774 const int idx = -Idxs[f] - 1; 1775 for (int q = 0; q < nr; q++) { 1776 rows[q] = maps[grid].c_maps[idx][q].gid; 1777 } 1778 } 1779 for (int g = 0; g < Nb; ++g) { 1780 const int nc = coo_elem_point_offsets[glb_elem_idx][g+1] - coo_elem_point_offsets[glb_elem_idx][g]; 1781 if (nc==1) cols[0] = Idxs[g]; 1782 else { 1783 const int idx = -Idxs[g] - 1; 1784 for (int q = 0; q < nc; q++) { 1785 cols[q] = maps[grid].c_maps[idx][q].gid; 1786 } 1787 } 1788 const int idx0 = b_id*coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA*fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g]; 1789 for (int q = 0, idx = idx0; q < nr; q++) { 1790 for (int d = 0; d < nc; d++, idx++) { 1791 oor[idx] = rows[q] + moffset; 1792 ooc[idx] = cols[d] + moffset; 1793 } 1794 } 1795 } 1796 } 1797 } 1798 } // cell 1799 } // grid 1800 } // batch 1801 PetscCall(MatSetPreallocationCOO(ctx->J,ctx->SData_d.coo_size,oor,ooc)); 1802 PetscCall(PetscFree2(oor,ooc)); 1803 } 1804 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 1805 PetscCall(PetscContainerSetPointer(container, (void *)maps)); 1806 PetscCall(PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy)); 1807 PetscCall(PetscObjectCompose((PetscObject) ctx->J, "assembly_maps", (PetscObject) container)); 1808 PetscCall(PetscContainerDestroy(&container)); 1809 PetscCall(PetscLogEventEnd(ctx->events[2],0,0,0,0)); 1810 } // end GPU assembly 1811 { /* create static point data, Jacobian called first, only one vertex copy */ 1812 PetscReal *invJe,*ww,*xx,*yy,*zz=NULL,*invJ_a; 1813 PetscInt outer_ipidx, outer_ej,grid, nip_glb = 0; 1814 PetscFE fe; 1815 const PetscInt Nb = Nq; 1816 PetscCall(PetscLogEventBegin(ctx->events[7],0,0,0,0)); 1817 PetscCall(PetscInfo(ctx->plex[0], "Initialize static data\n")); 1818 for (PetscInt grid=0;grid<ctx->num_grids;grid++) nip_glb += Nq*numCells[grid]; 1819 /* collect f data, first time is for Jacobian, but make mass now */ 1820 if (ctx->verbose > 0) { 1821 PetscInt ncells = 0, N; 1822 PetscCall(MatGetSize(ctx->J,&N,NULL)); 1823 for (PetscInt grid=0;grid<ctx->num_grids;grid++) ncells += numCells[grid]; 1824 PetscCall(PetscPrintf(ctx->comm,"%" PetscInt_FMT ") %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n",0,"FormLandau",nip_glb,ncells, Nb, Nq, dim, Nb, ctx->num_species, Nb, dim, N)); 1825 } 1826 PetscCall(PetscMalloc4(nip_glb,&ww,nip_glb,&xx,nip_glb,&yy,nip_glb*dim*dim,&invJ_a)); 1827 if (dim==3) { 1828 PetscCall(PetscMalloc1(nip_glb,&zz)); 1829 } 1830 if (ctx->use_energy_tensor_trick) { 1831 PetscCall(PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe)); 1832 PetscCall(PetscObjectSetName((PetscObject) fe, "energy")); 1833 } 1834 /* init each grids static data - no batch */ 1835 for (grid=0, outer_ipidx=0, outer_ej=0 ; grid < ctx->num_grids ; grid++) { // OpenMP (once) 1836 Vec v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic 1837 PetscSection e_section; 1838 DM dmEnergy; 1839 PetscInt cStart, cEnd, ej; 1840 1841 PetscCall(DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd)); 1842 // prep energy trick, get v^2 / 2 vector 1843 if (ctx->use_energy_tensor_trick) { 1844 PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f}; 1845 Vec glob_v2; 1846 PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))}; 1847 1848 PetscCall(DMClone(ctx->plex[grid], &dmEnergy)); 1849 PetscCall(PetscObjectSetName((PetscObject) dmEnergy, "energy")); 1850 PetscCall(DMSetField(dmEnergy, 0, NULL, (PetscObject)fe)); 1851 PetscCall(DMCreateDS(dmEnergy)); 1852 PetscCall(DMGetSection(dmEnergy, &e_section)); 1853 PetscCall(DMGetGlobalVector(dmEnergy,&glob_v2)); 1854 PetscCall(PetscObjectSetName((PetscObject) glob_v2, "trick")); 1855 c2_0[0] = &data[0]; 1856 PetscCall(DMProjectFunction(dmEnergy, 0., energyf, (void**)c2_0, INSERT_ALL_VALUES, glob_v2)); 1857 PetscCall(DMGetLocalVector(dmEnergy, &v2_2)); 1858 PetscCall(VecZeroEntries(v2_2)); /* zero BCs so don't set */ 1859 PetscCall(DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2)); 1860 PetscCall(DMGlobalToLocalEnd (dmEnergy, glob_v2, INSERT_VALUES, v2_2)); 1861 PetscCall(DMViewFromOptions(dmEnergy,NULL, "-energy_dm_view")); 1862 PetscCall(VecViewFromOptions(glob_v2,NULL, "-energy_vec_view")); 1863 PetscCall(DMRestoreGlobalVector(dmEnergy, &glob_v2)); 1864 } 1865 /* append part of the IP data for each grid */ 1866 for (ej = 0 ; ej < numCells[grid]; ++ej, ++outer_ej) { 1867 PetscScalar *coefs = NULL; 1868 PetscReal vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0); 1869 invJe = invJ_a + outer_ej*Nq*dim*dim; 1870 PetscCall(DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej+cStart, quad, vj, Jdummy, invJe, detJj)); 1871 if (ctx->use_energy_tensor_trick) { 1872 PetscCall(DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs)); 1873 } 1874 /* create static point data */ 1875 for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) { 1876 const PetscInt gidx = outer_ipidx; 1877 const PetscReal *invJ = &invJe[qj*dim*dim]; 1878 ww [gidx] = detJj[qj] * quadWeights[qj]; 1879 if (dim==2) ww [gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */ 1880 // get xx, yy, zz 1881 if (ctx->use_energy_tensor_trick) { 1882 double refSpaceDer[3],eGradPhi[3]; 1883 const PetscReal * const DD = Tf[0]->T[1]; 1884 const PetscReal *Dq = &DD[qj*Nb*dim]; 1885 for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0; 1886 for (int b = 0; b < Nb; ++b) { 1887 for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b*dim+d]*PetscRealPart(coefs[b]); 1888 } 1889 xx[gidx] = 1e10; 1890 if (ctx->use_relativistic_corrections) { 1891 double dg2_c2 = 0; 1892 //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02; 1893 for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]); 1894 dg2_c2 *= (double)c02; 1895 if (dg2_c2 >= .999) { 1896 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1897 yy[gidx] = vj[qj * dim + 1]; 1898 if (dim==3) zz[gidx] = vj[qj * dim + 2]; 1899 PetscCall(PetscPrintf(ctx->comm,"Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n",PetscSqrtReal(xx[gidx]*xx[gidx] + yy[gidx]*yy[gidx] + zz[gidx]*zz[gidx]), ej, qj, dg2_c2, xx[gidx],yy[gidx],zz[gidx])); 1900 } else { 1901 PetscReal fact = c02/PetscSqrtReal(1. - dg2_c2); 1902 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact; 1903 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0 1904 } 1905 } 1906 if (xx[gidx] == 1e10) { 1907 for (int d = 0; d < dim; ++d) { 1908 for (int e = 0 ; e < dim; ++e) { 1909 eGradPhi[d] += invJ[e*dim+d]*refSpaceDer[e]; 1910 } 1911 } 1912 xx[gidx] = eGradPhi[0]; 1913 yy[gidx] = eGradPhi[1]; 1914 if (dim==3) zz[gidx] = eGradPhi[2]; 1915 } 1916 } else { 1917 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1918 yy[gidx] = vj[qj * dim + 1]; 1919 if (dim==3) zz[gidx] = vj[qj * dim + 2]; 1920 } 1921 } /* q */ 1922 if (ctx->use_energy_tensor_trick) { 1923 PetscCall(DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs)); 1924 } 1925 } /* ej */ 1926 if (ctx->use_energy_tensor_trick) { 1927 PetscCall(DMRestoreLocalVector(dmEnergy, &v2_2)); 1928 PetscCall(DMDestroy(&dmEnergy)); 1929 } 1930 } /* grid */ 1931 if (ctx->use_energy_tensor_trick) { 1932 PetscCall(PetscFEDestroy(&fe)); 1933 } 1934 /* cache static data */ 1935 if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) { 1936 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS) 1937 PetscReal invMass[LANDAU_MAX_SPECIES],nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES]; 1938 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 1939 for (PetscInt ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++) { 1940 invMass[ii] = ctx->m_0/ctx->masses[ii]; 1941 nu_alpha[ii] = PetscSqr(ctx->charges[ii]/ctx->m_0)*ctx->m_0/ctx->masses[ii]; 1942 nu_beta[ii] = PetscSqr(ctx->charges[ii]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3); 1943 } 1944 } 1945 if (ctx->deviceType == LANDAU_CUDA) { 1946 #if defined(PETSC_HAVE_CUDA) 1947 PetscCall(LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, 1948 nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d)); 1949 #else 1950 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type cuda not built"); 1951 #endif 1952 } else if (ctx->deviceType == LANDAU_KOKKOS) { 1953 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 1954 PetscCall(LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, 1955 nu_alpha, nu_beta, invMass,invJ_a,xx,yy,zz,ww,&ctx->SData_d)); 1956 #else 1957 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type kokkos not built"); 1958 #endif 1959 } 1960 #endif 1961 /* free */ 1962 PetscCall(PetscFree4(ww,xx,yy,invJ_a)); 1963 if (dim==3) { 1964 PetscCall(PetscFree(zz)); 1965 } 1966 } else { /* CPU version, just copy in, only use part */ 1967 ctx->SData_d.w = (void*)ww; 1968 ctx->SData_d.x = (void*)xx; 1969 ctx->SData_d.y = (void*)yy; 1970 ctx->SData_d.z = (void*)zz; 1971 ctx->SData_d.invJ = (void*)invJ_a; 1972 } 1973 PetscCall(PetscLogEventEnd(ctx->events[7],0,0,0,0)); 1974 } // initialize 1975 PetscFunctionReturn(0); 1976 } 1977 1978 /* < v, u > */ 1979 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, 1980 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 1981 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 1982 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1983 { 1984 g0[0] = 1.; 1985 } 1986 1987 /* < v, u > */ 1988 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, 1989 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 1990 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 1991 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1992 { 1993 static double ttt = 1; 1994 g0[0] = ttt++; 1995 } 1996 1997 /* < v, u > */ 1998 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, 1999 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2000 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2001 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 2002 { 2003 g0[0] = 2.*PETSC_PI*x[0]; 2004 } 2005 2006 static PetscErrorCode MatrixNfDestroy(void *ptr) 2007 { 2008 PetscInt *nf = (PetscInt *)ptr; 2009 PetscFunctionBegin; 2010 PetscCall(PetscFree(nf)); 2011 PetscFunctionReturn(0); 2012 } 2013 2014 static PetscErrorCode LandauCreateMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx) 2015 { 2016 PetscInt *idxs=NULL; 2017 Mat subM[LANDAU_MAX_GRIDS]; 2018 2019 PetscFunctionBegin; 2020 if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 2021 PetscFunctionReturn(0); 2022 } 2023 // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is' 2024 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 2025 PetscCall(PetscMalloc1(ctx->mat_offset[ctx->num_grids]*ctx->batch_sz, &idxs)); 2026 } 2027 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2028 const PetscInt *values, n = ctx->mat_offset[grid+1] - ctx->mat_offset[grid]; 2029 Mat gMat; 2030 DM massDM; 2031 PetscDS prob; 2032 Vec tvec; 2033 // get "mass" matrix for reordering 2034 PetscCall(DMClone(ctx->plex[grid], &massDM)); 2035 PetscCall(DMCopyFields(ctx->plex[grid], massDM)); 2036 PetscCall(DMCreateDS(massDM)); 2037 PetscCall(DMGetDS(massDM, &prob)); 2038 for (int ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) { 2039 PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL)); 2040 } 2041 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only")); 2042 PetscCall(DMSetFromOptions(massDM)); 2043 PetscCall(DMCreateMatrix(massDM, &gMat)); 2044 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false")); 2045 PetscCall(MatSetOption(gMat,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 2046 PetscCall(MatSetOption(gMat,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE)); 2047 PetscCall(DMCreateLocalVector(ctx->plex[grid],&tvec)); 2048 PetscCall(DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx)); 2049 PetscCall(MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view")); 2050 PetscCall(DMDestroy(&massDM)); 2051 PetscCall(VecDestroy(&tvec)); 2052 subM[grid] = gMat; 2053 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 2054 MatOrderingType rtype = MATORDERINGRCM; 2055 IS isrow,isicol; 2056 PetscCall(MatGetOrdering(gMat,rtype,&isrow,&isicol)); 2057 PetscCall(ISInvertPermutation(isrow,PETSC_DECIDE,&grid_batch_is_inv[grid])); 2058 PetscCall(ISGetIndices(isrow, &values)); 2059 for (PetscInt b_id=0 ; b_id < ctx->batch_sz ; b_id++) { // add batch size DMs for this species grid 2060 #if !defined(LANDAU_SPECIES_MAJOR) 2061 PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id*N; 2062 for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0; 2063 #else 2064 PetscInt n0 = ctx->mat_offset[grid]*ctx->batch_sz + b_id*n; 2065 for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0; 2066 #endif 2067 } 2068 PetscCall(ISRestoreIndices(isrow, &values)); 2069 PetscCall(ISDestroy(&isrow)); 2070 PetscCall(ISDestroy(&isicol)); 2071 } 2072 } 2073 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 2074 PetscCall(ISCreateGeneral(comm,ctx->mat_offset[ctx->num_grids]*ctx->batch_sz,idxs,PETSC_OWN_POINTER,&ctx->batch_is)); 2075 } 2076 // get a block matrix 2077 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2078 Mat B = subM[grid]; 2079 PetscInt nloc, nzl, colbuf[1024], row; 2080 PetscCall(MatGetSize(B, &nloc, NULL)); 2081 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 2082 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 2083 const PetscInt *cols; 2084 const PetscScalar *vals; 2085 for (int i=0 ; i<nloc ; i++) { 2086 PetscCall(MatGetRow(B,i,&nzl,&cols,&vals)); 2087 PetscCheck(nzl<=1024,comm, PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl); 2088 for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset; 2089 row = i + moffset; 2090 PetscCall(MatSetValues(ctx->J,1,&row,nzl,colbuf,vals,INSERT_VALUES)); 2091 PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals)); 2092 } 2093 } 2094 } 2095 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2096 PetscCall(MatDestroy(&subM[grid])); 2097 } 2098 PetscCall(MatAssemblyBegin(ctx->J,MAT_FINAL_ASSEMBLY)); 2099 PetscCall(MatAssemblyEnd(ctx->J,MAT_FINAL_ASSEMBLY)); 2100 2101 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 2102 Mat mat_block_order; 2103 PetscCall(MatCreateSubMatrix(ctx->J,ctx->batch_is,ctx->batch_is,MAT_INITIAL_MATRIX,&mat_block_order)); // use MatPermute 2104 PetscCall(MatViewFromOptions(mat_block_order, NULL, "-dm_landau_field_major_mat_view")); 2105 PetscCall(MatDestroy(&ctx->J)); 2106 ctx->J = mat_block_order; 2107 // override ops to make KSP work in field major space 2108 ctx->seqaij_mult = mat_block_order->ops->mult; 2109 mat_block_order->ops->mult = LandauMatMult; 2110 mat_block_order->ops->multadd = LandauMatMultAdd; 2111 ctx->seqaij_solve = NULL; 2112 ctx->seqaij_getdiagonal = mat_block_order->ops->getdiagonal; 2113 mat_block_order->ops->getdiagonal = LandauMatGetDiagonal; 2114 ctx->seqaij_multtranspose = mat_block_order->ops->multtranspose; 2115 mat_block_order->ops->multtranspose = LandauMatMultTranspose; 2116 PetscCall(VecDuplicate(X,&ctx->work_vec)); 2117 PetscCall(VecScatterCreate(X, ctx->batch_is, ctx->work_vec, NULL, &ctx->plex_batch)); 2118 } 2119 2120 PetscFunctionReturn(0); 2121 } 2122 2123 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat); 2124 /*@C 2125 DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh 2126 2127 Collective on comm 2128 2129 Input Parameters: 2130 + comm - The MPI communicator 2131 . dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver) 2132 - prefix - prefix for options (not tested) 2133 2134 Output Parameter: 2135 . pack - The DM object representing the mesh 2136 + X - A vector (user destroys) 2137 - J - Optional matrix (object destroys) 2138 2139 Level: beginner 2140 2141 .keywords: mesh 2142 .seealso: DMPlexCreate(), DMPlexLandauDestroyVelocitySpace() 2143 @*/ 2144 PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack) 2145 { 2146 LandauCtx *ctx; 2147 Vec Xsub[LANDAU_MAX_GRIDS]; 2148 IS grid_batch_is_inv[LANDAU_MAX_GRIDS]; 2149 2150 PetscFunctionBegin; 2151 PetscCheckFalse(dim!=2 && dim!=3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported"); 2152 PetscCheck(LANDAU_DIM == dim,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " != LANDAU_DIM %d",dim,LANDAU_DIM); 2153 PetscCall(PetscNew(&ctx)); 2154 ctx->comm = comm; /* used for diagnostics and global errors */ 2155 /* process options */ 2156 PetscCall(ProcessOptions(ctx,prefix)); 2157 if (dim==2) ctx->use_relativistic_corrections = PETSC_FALSE; 2158 /* Create Mesh */ 2159 PetscCall(DMCompositeCreate(PETSC_COMM_SELF,pack)); 2160 PetscCall(PetscLogEventBegin(ctx->events[13],0,0,0,0)); 2161 PetscCall(PetscLogEventBegin(ctx->events[15],0,0,0,0)); 2162 PetscCall(LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack)); // creates grids (Forest of AMR) 2163 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2164 /* create FEM */ 2165 PetscCall(SetupDS(ctx->plex[grid],dim,grid,ctx)); 2166 /* set initial state */ 2167 PetscCall(DMCreateGlobalVector(ctx->plex[grid],&Xsub[grid])); 2168 PetscCall(PetscObjectSetName((PetscObject) Xsub[grid], "u_orig")); 2169 /* initial static refinement, no solve */ 2170 PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, ctx)); 2171 /* forest refinement - forest goes in (if forest), plex comes out */ 2172 if (ctx->use_p4est) { 2173 DM plex; 2174 PetscCall(adapt(grid,ctx,&Xsub[grid])); // forest goes in, plex comes out 2175 PetscCall(DMViewFromOptions(ctx->plex[grid],NULL,"-dm_landau_amr_dm_view")); // need to differentiate - todo 2176 PetscCall(VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view")); 2177 // convert to plex, all done with this level 2178 PetscCall(DMConvert(ctx->plex[grid], DMPLEX, &plex)); 2179 PetscCall(DMDestroy(&ctx->plex[grid])); 2180 ctx->plex[grid] = plex; 2181 } 2182 #if !defined(LANDAU_SPECIES_MAJOR) 2183 PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid])); 2184 #else 2185 for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid 2186 PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid])); 2187 } 2188 #endif 2189 PetscCall(DMSetApplicationContext(ctx->plex[grid], ctx)); 2190 } 2191 #if !defined(LANDAU_SPECIES_MAJOR) 2192 // stack the batched DMs, could do it all here!!! b_id=0 2193 for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) { 2194 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2195 PetscCall(DMCompositeAddDM(*pack,ctx->plex[grid])); 2196 } 2197 } 2198 #endif 2199 // create ctx->mat_offset 2200 ctx->mat_offset[0] = 0; 2201 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2202 PetscInt n; 2203 PetscCall(VecGetLocalSize(Xsub[grid],&n)); 2204 ctx->mat_offset[grid+1] = ctx->mat_offset[grid] + n; 2205 } 2206 // creat DM & Jac 2207 PetscCall(DMSetApplicationContext(*pack, ctx)); 2208 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only")); 2209 PetscCall(DMSetFromOptions(*pack)); 2210 PetscCall(DMCreateMatrix(*pack, &ctx->J)); 2211 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false")); 2212 PetscCall(MatSetOption(ctx->J,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 2213 PetscCall(MatSetOption(ctx->J,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE)); 2214 PetscCall(PetscObjectSetName((PetscObject)ctx->J, "Jac")); 2215 // construct initial conditions in X 2216 PetscCall(DMCreateGlobalVector(*pack,X)); 2217 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2218 PetscInt n; 2219 PetscCall(VecGetLocalSize(Xsub[grid],&n)); 2220 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 2221 PetscScalar const *values; 2222 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 2223 PetscCall(LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx)); 2224 PetscCall(VecGetArrayRead(Xsub[grid],&values)); 2225 for (int i=0, idx = moffset; i<n; i++, idx++) { 2226 PetscCall(VecSetValue(*X,idx,values[i],INSERT_VALUES)); 2227 } 2228 PetscCall(VecRestoreArrayRead(Xsub[grid],&values)); 2229 } 2230 } 2231 // cleanup 2232 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2233 PetscCall(VecDestroy(&Xsub[grid])); 2234 } 2235 /* check for correct matrix type */ 2236 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 2237 PetscBool flg; 2238 if (ctx->deviceType == LANDAU_CUDA) { 2239 PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"")); 2240 PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda or use '-dm_landau_device_type cpu'"); 2241 } else if (ctx->deviceType == LANDAU_KOKKOS) { 2242 PetscCall(PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"")); 2243 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 2244 PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2245 #else 2246 PetscCheck(flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2247 #endif 2248 } 2249 } 2250 PetscCall(PetscLogEventEnd(ctx->events[15],0,0,0,0)); 2251 // create field major ordering 2252 2253 ctx->work_vec = NULL; 2254 ctx->plex_batch = NULL; 2255 ctx->batch_is = NULL; 2256 for (int i=0;i<LANDAU_MAX_GRIDS;i++) grid_batch_is_inv[i] = NULL; 2257 PetscCall(PetscLogEventBegin(ctx->events[12],0,0,0,0)); 2258 PetscCall(LandauCreateMatrix(comm, *X, grid_batch_is_inv, ctx)); 2259 PetscCall(PetscLogEventEnd(ctx->events[12],0,0,0,0)); 2260 2261 // create AMR GPU assembly maps and static GPU data 2262 PetscCall(CreateStaticGPUData(dim,grid_batch_is_inv,ctx)); 2263 2264 PetscCall(PetscLogEventEnd(ctx->events[13],0,0,0,0)); 2265 2266 // create mass matrix 2267 PetscCall(DMPlexLandauCreateMassMatrix(*pack, NULL)); 2268 2269 if (J) *J = ctx->J; 2270 2271 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 2272 PetscContainer container; 2273 // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order 2274 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2275 PetscCall(PetscContainerSetPointer(container, (void *)ctx)); 2276 PetscCall(PetscObjectCompose((PetscObject) ctx->J, "LandauCtx", (PetscObject) container)); 2277 PetscCall(PetscContainerDestroy(&container)); 2278 // batch solvers need to map -- can batch solvers work 2279 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2280 PetscCall(PetscContainerSetPointer(container, (void *)ctx->plex_batch)); 2281 PetscCall(PetscObjectCompose((PetscObject) ctx->J, "plex_batch_is", (PetscObject) container)); 2282 PetscCall(PetscContainerDestroy(&container)); 2283 } 2284 // for batch solvers 2285 { 2286 PetscContainer container; 2287 PetscInt *pNf; 2288 PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &container)); 2289 PetscCall(PetscMalloc1(sizeof(*pNf), &pNf)); 2290 *pNf = ctx->batch_sz; 2291 PetscCall(PetscContainerSetPointer(container, (void *)pNf)); 2292 PetscCall(PetscContainerSetUserDestroy(container, MatrixNfDestroy)); 2293 PetscCall(PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject) container)); 2294 PetscCall(PetscContainerDestroy(&container)); 2295 } 2296 2297 PetscFunctionReturn(0); 2298 } 2299 2300 /*@ 2301 DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh 2302 2303 Collective on dm 2304 2305 Input/Output Parameters: 2306 . dm - the dm to destroy 2307 2308 Level: beginner 2309 2310 .keywords: mesh 2311 .seealso: DMPlexLandauCreateVelocitySpace() 2312 @*/ 2313 PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm) 2314 { 2315 LandauCtx *ctx; 2316 PetscFunctionBegin; 2317 PetscCall(DMGetApplicationContext(*dm, &ctx)); 2318 PetscCall(MatDestroy(&ctx->M)); 2319 PetscCall(MatDestroy(&ctx->J)); 2320 for (PetscInt ii=0;ii<ctx->num_species;ii++) PetscCall(PetscFEDestroy(&ctx->fe[ii])); 2321 PetscCall(ISDestroy(&ctx->batch_is)); 2322 PetscCall(VecDestroy(&ctx->work_vec)); 2323 PetscCall(VecScatterDestroy(&ctx->plex_batch)); 2324 if (ctx->deviceType == LANDAU_CUDA) { 2325 #if defined(PETSC_HAVE_CUDA) 2326 PetscCall(LandauCUDAStaticDataClear(&ctx->SData_d)); 2327 #else 2328 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda"); 2329 #endif 2330 } else if (ctx->deviceType == LANDAU_KOKKOS) { 2331 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 2332 PetscCall(LandauKokkosStaticDataClear(&ctx->SData_d)); 2333 #else 2334 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos"); 2335 #endif 2336 } else { 2337 if (ctx->SData_d.x) { /* in a CPU run */ 2338 PetscReal *invJ = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w; 2339 LandauIdx *coo_elem_offsets = (LandauIdx*)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx*)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ+1] = (LandauIdx (*)[LANDAU_MAX_NQ+1])ctx->SData_d.coo_elem_point_offsets; 2340 PetscCall(PetscFree4(ww,xx,yy,invJ)); 2341 if (zz) { 2342 PetscCall(PetscFree(zz)); 2343 } 2344 if (coo_elem_offsets) { 2345 PetscCall(PetscFree3(coo_elem_offsets,coo_elem_fullNb,coo_elem_point_offsets)); // could be NULL 2346 } 2347 } 2348 } 2349 2350 if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings 2351 PetscCall(PetscPrintf(ctx->comm, "TSStep N 1.0 %10.3e\n",ctx->times[LANDAU_EX2_TSSOLVE])); 2352 PetscCall(PetscPrintf(ctx->comm, "2: Solve: %10.3e with %" PetscInt_FMT " threads\n",ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL],ctx->batch_sz)); 2353 PetscCall(PetscPrintf(ctx->comm, "3: Landau: %10.3e\n",ctx->times[LANDAU_MATRIX_TOTAL])); 2354 PetscCall(PetscPrintf(ctx->comm, "Landau Jacobian %" PetscInt_FMT " 1.0 %10.3e\n",(PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT],ctx->times[LANDAU_JACOBIAN])); 2355 PetscCall(PetscPrintf(ctx->comm, "Landau Operator N 1.0 %10.3e\n",ctx->times[LANDAU_OPERATOR])); 2356 PetscCall(PetscPrintf(ctx->comm, "Landau Mass N 1.0 %10.3e\n",ctx->times[LANDAU_MASS])); 2357 PetscCall(PetscPrintf(ctx->comm, " Jac-f-df (GPU) N 1.0 %10.3e\n",ctx->times[LANDAU_F_DF])); 2358 PetscCall(PetscPrintf(ctx->comm, " Kernel (GPU) N 1.0 %10.3e\n",ctx->times[LANDAU_KERNEL])); 2359 PetscCall(PetscPrintf(ctx->comm, "MatLUFactorNum X 1.0 %10.3e\n",ctx->times[KSP_FACTOR])); 2360 PetscCall(PetscPrintf(ctx->comm, "MatSolve X 1.0 %10.3e\n",ctx->times[KSP_SOLVE])); 2361 } 2362 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2363 PetscCall(DMDestroy(&ctx->plex[grid])); 2364 } 2365 PetscFree(ctx); 2366 PetscCall(DMDestroy(dm)); 2367 PetscFunctionReturn(0); 2368 } 2369 2370 /* < v, ru > */ 2371 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2372 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2373 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2374 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2375 { 2376 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2377 f0[0] = u[ii]; 2378 } 2379 2380 /* < v, ru > */ 2381 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2382 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2383 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2384 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2385 { 2386 PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]); 2387 f0[0] = x[jj]*u[ii]; /* x momentum */ 2388 } 2389 2390 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2391 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2392 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2393 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2394 { 2395 PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]); 2396 double tmp1 = 0.; 2397 for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i]; 2398 f0[0] = tmp1*u[ii]; 2399 } 2400 2401 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx) 2402 { 2403 const PetscReal *c2_0_arr = ((PetscReal*)actx); 2404 const PetscReal c02 = c2_0_arr[0]; 2405 2406 PetscFunctionBegin; 2407 for (int s = 0 ; s < Nf ; s++) { 2408 PetscReal tmp1 = 0.; 2409 for (int i = 0; i < dim; ++i) tmp1 += x[i]*x[i]; 2410 #if defined(PETSC_USE_DEBUG) 2411 u[s] = PetscSqrtReal(1. + tmp1/c02);// u[0] = PetscSqrtReal(1. + xx); 2412 #else 2413 { 2414 PetscReal xx = tmp1/c02; 2415 u[s] = xx/(PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.) 2416 } 2417 #endif 2418 } 2419 PetscFunctionReturn(0); 2420 } 2421 2422 /* < v, ru > */ 2423 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2424 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2425 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2426 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2427 { 2428 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2429 f0[0] = 2.*PETSC_PI*x[0]*u[ii]; 2430 } 2431 2432 /* < v, ru > */ 2433 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2434 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2435 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2436 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2437 { 2438 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2439 f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii]; 2440 } 2441 2442 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2443 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2444 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2445 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2446 { 2447 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2448 f0[0] = 2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii]; 2449 } 2450 2451 /*@ 2452 DMPlexLandauPrintNorms - collects moments and prints them 2453 2454 Collective on dm 2455 2456 Input Parameters: 2457 + X - the state 2458 - stepi - current step to print 2459 2460 Level: beginner 2461 2462 .keywords: mesh 2463 .seealso: DMPlexLandauCreateVelocitySpace() 2464 @*/ 2465 PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi) 2466 { 2467 LandauCtx *ctx; 2468 PetscDS prob; 2469 DM pack; 2470 PetscInt cStart, cEnd, dim, ii, i0, nDMs; 2471 PetscScalar xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES]; 2472 PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES]; 2473 Vec *globXArray; 2474 2475 PetscFunctionBegin; 2476 PetscCall(VecGetDM(X, &pack)); 2477 PetscCheck(pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM"); 2478 PetscCall(DMGetDimension(pack, &dim)); 2479 PetscCheck(dim == 2 || dim == 3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %" PetscInt_FMT " not in [2,3]",dim); 2480 PetscCall(DMGetApplicationContext(pack, &ctx)); 2481 PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2482 /* print momentum and energy */ 2483 PetscCall(DMCompositeGetNumberDM(pack,&nDMs)); 2484 PetscCheck(nDMs == ctx->num_grids*ctx->batch_sz,PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %" PetscInt_FMT " %" PetscInt_FMT,nDMs,ctx->num_grids*ctx->batch_sz); 2485 PetscCall(PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray)); 2486 PetscCall(DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray)); 2487 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 2488 Vec Xloc = globXArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ]; 2489 PetscCall(DMGetDS(ctx->plex[grid], &prob)); 2490 for (ii=ctx->species_offset[grid],i0=0;ii<ctx->species_offset[grid+1];ii++,i0++) { 2491 PetscScalar user[2] = { (PetscScalar)i0, (PetscScalar)ctx->charges[ii]}; 2492 PetscCall(PetscDSSetConstants(prob, 2, user)); 2493 if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */ 2494 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rden)); 2495 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2496 density[ii] = tt[0]*ctx->n_0*ctx->charges[ii]; 2497 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rmom)); 2498 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2499 zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2500 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_rv2)); 2501 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2502 energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii]; 2503 zmomentumtot += zmomentum[ii]; 2504 energytot += energy[ii]; 2505 densitytot += density[ii]; 2506 PetscCall(PetscPrintf(ctx->comm, "%3D) species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]))); 2507 } else { /* 2/3Xloc + 3V */ 2508 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_den)); 2509 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2510 density[ii] = tt[0]*ctx->n_0*ctx->charges[ii]; 2511 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_mom)); 2512 user[1] = 0; 2513 PetscCall(PetscDSSetConstants(prob, 2, user)); 2514 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2515 xmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2516 user[1] = 1; 2517 PetscCall(PetscDSSetConstants(prob, 2, user)); 2518 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2519 ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2520 user[1] = 2; 2521 PetscCall(PetscDSSetConstants(prob, 2, user)); 2522 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2523 zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2524 if (ctx->use_relativistic_corrections) { 2525 /* gamma * M * f */ 2526 if (ii==0 && grid==0) { // do all at once 2527 Vec Mf, globGamma, *globMfArray, *globGammaArray; 2528 PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {gamma_n_f}; 2529 PetscReal *c2_0[1], data[1]; 2530 2531 PetscCall(VecDuplicate(X,&globGamma)); 2532 PetscCall(VecDuplicate(X,&Mf)); 2533 PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globMfArray)); 2534 PetscCall(PetscMalloc(sizeof(*globMfArray)*nDMs, &globGammaArray)); 2535 /* M * f */ 2536 PetscCall(MatMult(ctx->M,X,Mf)); 2537 /* gamma */ 2538 PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2539 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching 2540 Vec v1 = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ]; 2541 data[0] = PetscSqr(C_0(ctx->v_0)); 2542 c2_0[0] = &data[0]; 2543 PetscCall(DMProjectFunction(ctx->plex[grid], 0., gammaf, (void**)c2_0, INSERT_ALL_VALUES, v1)); 2544 } 2545 PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2546 /* gamma * Mf */ 2547 PetscCall(DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2548 PetscCall(DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray)); 2549 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice 2550 PetscInt Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], N, bs; 2551 Vec Mfsub = globMfArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], Gsub = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], v1, v2; 2552 // get each component 2553 PetscCall(VecGetSize(Mfsub,&N)); 2554 PetscCall(VecCreate(ctx->comm,&v1)); 2555 PetscCall(VecSetSizes(v1,PETSC_DECIDE,N/Nf)); 2556 PetscCall(VecCreate(ctx->comm,&v2)); 2557 PetscCall(VecSetSizes(v2,PETSC_DECIDE,N/Nf)); 2558 PetscCall(VecSetFromOptions(v1)); // ??? 2559 PetscCall(VecSetFromOptions(v2)); 2560 // get each component 2561 PetscCall(VecGetBlockSize(Gsub,&bs)); 2562 PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT " in Gsub",bs,Nf); 2563 PetscCall(VecGetBlockSize(Mfsub,&bs)); 2564 PetscCheck(bs == Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %" PetscInt_FMT " != num_species %" PetscInt_FMT,bs,Nf); 2565 for (int i=0, ix=ctx->species_offset[grid] ; i<Nf ; i++, ix++) { 2566 PetscScalar val; 2567 PetscCall(VecStrideGather(Gsub,i,v1,INSERT_VALUES)); 2568 PetscCall(VecStrideGather(Mfsub,i,v2,INSERT_VALUES)); 2569 PetscCall(VecDot(v1,v2,&val)); 2570 energy[ix] = PetscRealPart(val)*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ix]; 2571 } 2572 PetscCall(VecDestroy(&v1)); 2573 PetscCall(VecDestroy(&v2)); 2574 } /* grids */ 2575 PetscCall(DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray)); 2576 PetscCall(DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray)); 2577 PetscCall(PetscFree(globGammaArray)); 2578 PetscCall(PetscFree(globMfArray)); 2579 PetscCall(VecDestroy(&globGamma)); 2580 PetscCall(VecDestroy(&Mf)); 2581 } 2582 } else { 2583 PetscCall(PetscDSSetObjective(prob, 0, &f0_s_v2)); 2584 PetscCall(DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx)); 2585 energy[ii] = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii]; 2586 } 2587 PetscCall(PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(xmomentum[ii]),PetscRealPart(ymomentum[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]))); 2588 xmomentumtot += xmomentum[ii]; 2589 ymomentumtot += ymomentum[ii]; 2590 zmomentumtot += zmomentum[ii]; 2591 energytot += energy[ii]; 2592 densitytot += density[ii]; 2593 } 2594 if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n"); 2595 } 2596 } 2597 PetscCall(DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray)); 2598 PetscCall(PetscFree(globXArray)); 2599 /* totals */ 2600 PetscCall(DMPlexGetHeightStratum(ctx->plex[0],0,&cStart,&cEnd)); 2601 if (ctx->num_species>1) { 2602 if (dim==2) { 2603 PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart)); 2604 } else { 2605 PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)",stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart)); 2606 } 2607 } else PetscCall(PetscPrintf(ctx->comm, " -- %" PetscInt_FMT " cells",cEnd-cStart)); 2608 PetscCall(PetscPrintf(ctx->comm,"\n")); 2609 PetscFunctionReturn(0); 2610 } 2611 2612 /*@ 2613 DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian) 2614 2615 Collective on pack 2616 2617 Input Parameters: 2618 . pack - the DM object 2619 2620 Output Parameters: 2621 . Amat - The mass matrix (optional), mass matrix is added to the DM context 2622 2623 Level: beginner 2624 2625 .keywords: mesh 2626 .seealso: DMPlexLandauCreateVelocitySpace() 2627 @*/ 2628 PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat) 2629 { 2630 DM mass_pack,massDM[LANDAU_MAX_GRIDS]; 2631 PetscDS prob; 2632 PetscInt ii,dim,N1=1,N2; 2633 LandauCtx *ctx; 2634 Mat packM,subM[LANDAU_MAX_GRIDS]; 2635 2636 PetscFunctionBegin; 2637 PetscValidHeaderSpecific(pack,DM_CLASSID,1); 2638 if (Amat) PetscValidPointer(Amat,2); 2639 PetscCall(DMGetApplicationContext(pack, &ctx)); 2640 PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2641 PetscCall(PetscLogEventBegin(ctx->events[14],0,0,0,0)); 2642 PetscCall(DMGetDimension(pack, &dim)); 2643 PetscCall(DMCompositeCreate(PetscObjectComm((PetscObject) pack),&mass_pack)); 2644 /* create pack mass matrix */ 2645 for (PetscInt grid=0, ix=0 ; grid<ctx->num_grids ; grid++) { 2646 PetscCall(DMClone(ctx->plex[grid], &massDM[grid])); 2647 PetscCall(DMCopyFields(ctx->plex[grid], massDM[grid])); 2648 PetscCall(DMCreateDS(massDM[grid])); 2649 PetscCall(DMGetDS(massDM[grid], &prob)); 2650 for (ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) { 2651 if (dim==3) PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL)); 2652 else PetscCall(PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL)); 2653 } 2654 #if !defined(LANDAU_SPECIES_MAJOR) 2655 PetscCall(DMCompositeAddDM(mass_pack,massDM[grid])); 2656 #else 2657 for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid 2658 PetscCall(DMCompositeAddDM(mass_pack,massDM[grid])); 2659 } 2660 #endif 2661 PetscCall(DMCreateMatrix(massDM[grid], &subM[grid])); 2662 } 2663 #if !defined(LANDAU_SPECIES_MAJOR) 2664 // stack the batched DMs 2665 for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) { 2666 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2667 PetscCall(DMCompositeAddDM(mass_pack, massDM[grid])); 2668 } 2669 } 2670 #endif 2671 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only")); 2672 PetscCall(DMSetFromOptions(mass_pack)); 2673 PetscCall(DMCreateMatrix(mass_pack, &packM)); 2674 PetscCall(PetscOptionsInsertString(NULL,"-dm_preallocate_only false")); 2675 PetscCall(MatSetOption(packM,MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE)); 2676 PetscCall(MatSetOption(packM,MAT_IGNORE_ZERO_ENTRIES,PETSC_TRUE)); 2677 PetscCall(DMDestroy(&mass_pack)); 2678 /* make mass matrix for each block */ 2679 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2680 Vec locX; 2681 DM plex = massDM[grid]; 2682 PetscCall(DMGetLocalVector(plex, &locX)); 2683 /* Mass matrix is independent of the input, so no need to fill locX */ 2684 PetscCall(DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx)); 2685 PetscCall(DMRestoreLocalVector(plex, &locX)); 2686 PetscCall(DMDestroy(&massDM[grid])); 2687 } 2688 PetscCall(MatGetSize(ctx->J, &N1, NULL)); 2689 PetscCall(MatGetSize(packM, &N2, NULL)); 2690 PetscCheck(N1 == N2,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %" PetscInt_FMT ", |Mass|=%" PetscInt_FMT,N1,N2); 2691 /* assemble block diagonals */ 2692 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2693 Mat B = subM[grid]; 2694 PetscInt nloc, nzl, colbuf[1024], row; 2695 PetscCall(MatGetSize(B, &nloc, NULL)); 2696 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 2697 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 2698 const PetscInt *cols; 2699 const PetscScalar *vals; 2700 for (int i=0 ; i<nloc ; i++) { 2701 PetscCall(MatGetRow(B,i,&nzl,&cols,&vals)); 2702 PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Row too big: %" PetscInt_FMT,nzl); 2703 for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset; 2704 row = i + moffset; 2705 PetscCall(MatSetValues(packM,1,&row,nzl,colbuf,vals,INSERT_VALUES)); 2706 PetscCall(MatRestoreRow(B,i,&nzl,&cols,&vals)); 2707 } 2708 } 2709 } 2710 // cleanup 2711 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2712 PetscCall(MatDestroy(&subM[grid])); 2713 } 2714 PetscCall(MatAssemblyBegin(packM,MAT_FINAL_ASSEMBLY)); 2715 PetscCall(MatAssemblyEnd(packM,MAT_FINAL_ASSEMBLY)); 2716 PetscCall(PetscObjectSetName((PetscObject)packM, "mass")); 2717 PetscCall(MatViewFromOptions(packM,NULL,"-dm_landau_mass_view")); 2718 ctx->M = packM; 2719 if (Amat) *Amat = packM; 2720 PetscCall(PetscLogEventEnd(ctx->events[14],0,0,0,0)); 2721 PetscFunctionReturn(0); 2722 } 2723 2724 /*@ 2725 DMPlexLandauIFunction - TS residual calculation, confusingly this computes the Jacobian w/o mass 2726 2727 Collective on ts 2728 2729 Input Parameters: 2730 + TS - The time stepping context 2731 . time_dummy - current time (not used) 2732 - X - Current state 2733 + X_t - Time derivative of current state 2734 . actx - Landau context 2735 2736 Output Parameter: 2737 . F - The residual 2738 2739 Level: beginner 2740 2741 .keywords: mesh 2742 .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauIJacobian() 2743 @*/ 2744 PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx) 2745 { 2746 LandauCtx *ctx=(LandauCtx*)actx; 2747 PetscInt dim; 2748 DM pack; 2749 #if defined(PETSC_HAVE_THREADSAFETY) 2750 double starttime, endtime; 2751 #endif 2752 PetscObjectState state; 2753 2754 PetscFunctionBegin; 2755 PetscCall(TSGetDM(ts,&pack)); 2756 PetscCall(DMGetApplicationContext(pack, &ctx)); 2757 PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2758 if (ctx->stage) { 2759 PetscCall(PetscLogStagePush(ctx->stage)); 2760 } 2761 PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0)); 2762 PetscCall(PetscLogEventBegin(ctx->events[0],0,0,0,0)); 2763 #if defined(PETSC_HAVE_THREADSAFETY) 2764 starttime = MPI_Wtime(); 2765 #endif 2766 PetscCall(DMGetDimension(pack, &dim)); 2767 PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state)); 2768 if (state != ctx->norm_state) { 2769 PetscCall(PetscInfo(ts, "Create Landau Jacobian t=%g J.state %" PetscInt64_FMT " --> %" PetscInt64_FMT "\n",time_dummy, ctx->norm_state, state)); 2770 PetscCall(MatZeroEntries(ctx->J)); 2771 PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx)); 2772 PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view")); 2773 PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state)); 2774 ctx->norm_state = state; 2775 } else { 2776 PetscCall(PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n",state)); 2777 } 2778 /* mat vec for op */ 2779 PetscCall(MatMult(ctx->J,X,F)); /* C*f */ 2780 /* add time term */ 2781 if (X_t) { 2782 PetscCall(MatMultAdd(ctx->M,X_t,F,F)); 2783 } 2784 #if defined(PETSC_HAVE_THREADSAFETY) 2785 if (ctx->stage) { 2786 endtime = MPI_Wtime(); 2787 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2788 ctx->times[LANDAU_JACOBIAN] += (endtime - starttime); 2789 ctx->times[LANDAU_JACOBIAN_COUNT] += 1; 2790 } 2791 #endif 2792 PetscCall(PetscLogEventEnd(ctx->events[0],0,0,0,0)); 2793 PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0)); 2794 if (ctx->stage) { 2795 PetscCall(PetscLogStagePop()); 2796 #if defined(PETSC_HAVE_THREADSAFETY) 2797 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2798 #endif 2799 } 2800 PetscFunctionReturn(0); 2801 } 2802 2803 /*@ 2804 DMPlexLandauIJacobian - TS Jacobian construction, confusingly this adds mass 2805 2806 Collective on ts 2807 2808 Input Parameters: 2809 + TS - The time stepping context 2810 . time_dummy - current time (not used) 2811 - X - Current state 2812 + U_tdummy - Time derivative of current state (not used) 2813 . shift - shift for du/dt term 2814 - actx - Landau context 2815 2816 Output Parameter: 2817 . Amat - Jacobian 2818 + Pmat - same as Amat 2819 2820 Level: beginner 2821 2822 .keywords: mesh 2823 .seealso: DMPlexLandauCreateVelocitySpace(), DMPlexLandauIFunction() 2824 @*/ 2825 PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx) 2826 { 2827 LandauCtx *ctx=NULL; 2828 PetscInt dim; 2829 DM pack; 2830 #if defined(PETSC_HAVE_THREADSAFETY) 2831 double starttime, endtime; 2832 #endif 2833 PetscObjectState state; 2834 2835 PetscFunctionBegin; 2836 PetscCall(TSGetDM(ts,&pack)); 2837 PetscCall(DMGetApplicationContext(pack, &ctx)); 2838 PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2839 PetscCheckFalse(Amat!=Pmat || Amat!=ctx->J,ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J"); 2840 PetscCall(DMGetDimension(pack, &dim)); 2841 /* get collision Jacobian into A */ 2842 if (ctx->stage) { 2843 PetscCall(PetscLogStagePush(ctx->stage)); 2844 } 2845 PetscCall(PetscLogEventBegin(ctx->events[11],0,0,0,0)); 2846 PetscCall(PetscLogEventBegin(ctx->events[9],0,0,0,0)); 2847 #if defined(PETSC_HAVE_THREADSAFETY) 2848 starttime = MPI_Wtime(); 2849 #endif 2850 PetscCall(PetscInfo(ts, "Adding mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift)); 2851 PetscCheck(shift!=0.0,ctx->comm, PETSC_ERR_PLIB, "zero shift"); 2852 PetscCall(PetscObjectStateGet((PetscObject)ctx->J,&state)); 2853 PetscCheck(state == ctx->norm_state,ctx->comm, PETSC_ERR_PLIB, "wrong state, %" PetscInt64_FMT " %" PetscInt64_FMT "",ctx->norm_state,state); 2854 if (!ctx->use_matrix_mass) { 2855 PetscCall(LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx)); 2856 PetscCall(MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view")); 2857 } else { /* add mass */ 2858 PetscCall(MatAXPY(Pmat,shift,ctx->M,SAME_NONZERO_PATTERN)); 2859 } 2860 #if defined(PETSC_HAVE_THREADSAFETY) 2861 if (ctx->stage) { 2862 endtime = MPI_Wtime(); 2863 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2864 ctx->times[LANDAU_MASS] += (endtime - starttime); 2865 } 2866 #endif 2867 PetscCall(PetscLogEventEnd(ctx->events[9],0,0,0,0)); 2868 PetscCall(PetscLogEventEnd(ctx->events[11],0,0,0,0)); 2869 if (ctx->stage) { 2870 PetscCall(PetscLogStagePop()); 2871 #if defined(PETSC_HAVE_THREADSAFETY) 2872 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2873 #endif 2874 } 2875 PetscFunctionReturn(0); 2876 } 2877