1 #include <../src/mat/impls/aij/seq/aij.h> 2 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 3 #include <petsclandau.h> /*I "petsclandau.h" I*/ 4 #include <petscts.h> 5 #include <petscdmforest.h> 6 #include <petscdmcomposite.h> 7 8 /* Landau collision operator */ 9 10 /* relativistic terms */ 11 #if defined(PETSC_USE_REAL_SINGLE) 12 #define SPEED_OF_LIGHT 2.99792458e8F 13 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */ 14 #else 15 #define SPEED_OF_LIGHT 2.99792458e8 16 #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */ 17 #endif 18 19 #define PETSC_THREAD_SYNC 20 #include "land_tensors.h" 21 22 #if defined(PETSC_HAVE_OPENMP) 23 #include <omp.h> 24 #endif 25 26 /* vector padding not supported */ 27 #define LANDAU_VL 1 28 29 static PetscErrorCode LandauMatMult(Mat A, Vec x, Vec y) 30 { 31 PetscErrorCode ierr; 32 LandauCtx *ctx; 33 PetscContainer container; 34 35 PetscFunctionBegin; 36 ierr = PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container);CHKERRQ(ierr); 37 if (container) { 38 ierr = PetscContainerGetPointer(container, (void **) &ctx);CHKERRQ(ierr); 39 ierr = VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 40 ierr = VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 41 ierr = (*ctx->seqaij_mult)(A,ctx->work_vec,y);CHKERRQ(ierr); 42 ierr = VecCopy(y, ctx->work_vec);CHKERRQ(ierr); 43 ierr = VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 44 ierr = VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 45 PetscFunctionReturn(0); 46 } 47 ierr = MatMult(A,x,y);CHKERRQ(ierr); 48 PetscFunctionReturn(0); 49 } 50 51 // Computes v3 = v2 + A * v1. 52 static PetscErrorCode LandauMatMultAdd(Mat A,Vec v1,Vec v2,Vec v3) 53 { 54 PetscErrorCode ierr; 55 56 PetscFunctionBegin; 57 SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "?????"); 58 ierr = LandauMatMult(A,v1,v3);CHKERRQ(ierr); 59 ierr = VecAYPX(v3,1,v2);CHKERRQ(ierr); 60 PetscFunctionReturn(0); 61 } 62 63 static PetscErrorCode LandauMatMultTranspose(Mat A, Vec x, Vec y) 64 { 65 PetscErrorCode ierr; 66 LandauCtx *ctx; 67 PetscContainer container; 68 69 PetscFunctionBegin; 70 ierr = PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container);CHKERRQ(ierr); 71 if (container) { 72 ierr = PetscContainerGetPointer(container, (void **) &ctx);CHKERRQ(ierr); 73 ierr = VecScatterBegin(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 74 ierr = VecScatterEnd(ctx->plex_batch,x,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 75 ierr = (*ctx->seqaij_multtranspose)(A,ctx->work_vec,y);CHKERRQ(ierr); 76 ierr = VecCopy(y, ctx->work_vec);CHKERRQ(ierr); 77 ierr = VecScatterBegin(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 78 ierr = VecScatterEnd(ctx->plex_batch,ctx->work_vec,y,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 79 PetscFunctionReturn(0); 80 } 81 ierr = MatMultTranspose(A,x,y);CHKERRQ(ierr); 82 PetscFunctionReturn(0); 83 } 84 85 static PetscErrorCode LandauMatGetDiagonal(Mat A,Vec x) 86 { 87 PetscErrorCode ierr; 88 LandauCtx *ctx; 89 PetscContainer container; 90 91 PetscFunctionBegin; 92 ierr = PetscObjectQuery((PetscObject) A, "LandauCtx", (PetscObject *) &container);CHKERRQ(ierr); 93 if (container) { 94 ierr = PetscContainerGetPointer(container, (void **) &ctx);CHKERRQ(ierr); 95 ierr = (*ctx->seqaij_getdiagonal)(A,ctx->work_vec);CHKERRQ(ierr); 96 ierr = VecScatterBegin(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 97 ierr = VecScatterEnd(ctx->plex_batch,ctx->work_vec,x,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 98 PetscFunctionReturn(0); 99 } 100 ierr = MatGetDiagonal(A, x);CHKERRQ(ierr); 101 PetscFunctionReturn(0); 102 } 103 104 static PetscErrorCode LandauGPUMapsDestroy(void *ptr) 105 { 106 P4estVertexMaps *maps = (P4estVertexMaps*)ptr; 107 PetscErrorCode ierr; 108 PetscFunctionBegin; 109 // free device data 110 if (maps[0].deviceType != LANDAU_CPU) { 111 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 112 if (maps[0].deviceType == LANDAU_KOKKOS) { 113 ierr = LandauKokkosDestroyMatMaps(maps, maps[0].numgrids);CHKERRQ(ierr); // imples Kokkos does 114 } // else could be CUDA 115 #elif defined(PETSC_HAVE_CUDA) 116 if (maps[0].deviceType == LANDAU_CUDA) { 117 ierr = LandauCUDADestroyMatMaps(maps, maps[0].numgrids);CHKERRQ(ierr); 118 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %D ?????",maps->deviceType); 119 #endif 120 } 121 // free host data 122 for (PetscInt grid=0 ; grid < maps[0].numgrids ; grid++) { 123 ierr = PetscFree(maps[grid].c_maps);CHKERRQ(ierr); 124 ierr = PetscFree(maps[grid].gIdx);CHKERRQ(ierr); 125 } 126 ierr = PetscFree(maps);CHKERRQ(ierr); 127 128 PetscFunctionReturn(0); 129 } 130 static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 131 { 132 PetscReal v2 = 0; 133 PetscFunctionBegin; 134 /* compute v^2 / 2 */ 135 for (int i = 0; i < dim; ++i) v2 += x[i]*x[i]; 136 /* evaluate the Maxwellian */ 137 u[0] = v2/2; 138 PetscFunctionReturn(0); 139 } 140 141 /* needs double */ 142 static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 143 { 144 PetscReal *c2_0_arr = ((PetscReal*)actx); 145 double u2 = 0, c02 = (double)*c2_0_arr, xx; 146 147 PetscFunctionBegin; 148 /* compute u^2 / 2 */ 149 for (int i = 0; i < dim; ++i) u2 += x[i]*x[i]; 150 /* gamma - 1 = g_eps, for conditioning and we only take derivatives */ 151 xx = u2/c02; 152 #if defined(PETSC_USE_DEBUG) 153 u[0] = PetscSqrtReal(1. + xx); 154 #else 155 u[0] = xx/(PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative 156 #endif 157 PetscFunctionReturn(0); 158 } 159 160 /* 161 LandauFormJacobian_Internal - Evaluates Jacobian matrix. 162 163 Input Parameters: 164 . globX - input vector 165 . actx - optional user-defined context 166 . dim - dimension 167 168 Output Parameters: 169 . J0acP - Jacobian matrix filled, not created 170 */ 171 static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx) 172 { 173 LandauCtx *ctx = (LandauCtx*)a_ctx; 174 PetscErrorCode ierr; 175 PetscInt numCells[LANDAU_MAX_GRIDS],Nq,Nb; 176 PetscQuadrature quad; 177 PetscReal Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2) 178 PetscScalar *cellClosure=NULL; 179 const PetscScalar *xdata=NULL; 180 PetscDS prob; 181 PetscContainer container; 182 P4estVertexMaps *maps; 183 Mat subJ[LANDAU_MAX_GRIDS*LANDAU_MAX_BATCH_SZ]; 184 185 PetscFunctionBegin; 186 PetscValidHeaderSpecific(a_X,VEC_CLASSID,1); 187 PetscValidHeaderSpecific(JacP,MAT_CLASSID,2); 188 PetscValidPointer(ctx,5); 189 /* check for matrix container for GPU assembly. Support CPU assembly for debugging */ 190 PetscCheckFalse(ctx->plex[0] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 191 ierr = PetscLogEventBegin(ctx->events[10],0,0,0,0);CHKERRQ(ierr); 192 ierr = DMGetDS(ctx->plex[0], &prob);CHKERRQ(ierr); // same DS for all grids 193 ierr = PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);CHKERRQ(ierr); 194 if (container) { 195 PetscCheck(ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"maps but no GPU assembly"); 196 ierr = PetscContainerGetPointer(container, (void **) &maps);CHKERRQ(ierr); 197 PetscCheckFalse(!maps,ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container"); 198 for (PetscInt i=0;i<ctx->num_grids*ctx->batch_sz;i++) subJ[i] = NULL; 199 } else { 200 PetscCheck(!ctx->gpu_assembly,ctx->comm,PETSC_ERR_ARG_WRONG,"No maps but GPU assembly"); 201 for (PetscInt tid=0 ; tid<ctx->batch_sz ; tid++) { 202 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 203 ierr = DMCreateMatrix(ctx->plex[grid], &subJ[ LAND_PACK_IDX(tid,grid) ]);CHKERRQ(ierr); 204 } 205 } 206 maps = NULL; 207 } 208 // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck) 209 ierr = PetscFEGetQuadrature(ctx->fe[0], &quad);CHKERRQ(ierr); 210 ierr = PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);CHKERRQ(ierr); Nb = Nq; 211 PetscCheckFalse(Nq >LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %D > LANDAU_MAX_NQ (%D)",Nq,LANDAU_MAX_NQ); 212 // get metadata for collecting dynamic data 213 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 214 PetscInt cStart, cEnd; 215 PetscCheckFalse(ctx->plex[grid] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 216 ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr); 217 numCells[grid] = cEnd - cStart; // grids can have different topology 218 } 219 ierr = PetscLogEventEnd(ctx->events[10],0,0,0,0);CHKERRQ(ierr); 220 if (shift==0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */ 221 DM pack; 222 ierr = VecGetDM(a_X, &pack);CHKERRQ(ierr); 223 PetscCheckFalse(!pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM"); 224 ierr = PetscLogEventBegin(ctx->events[1],0,0,0,0);CHKERRQ(ierr); 225 ierr = MatZeroEntries(JacP);CHKERRQ(ierr); 226 for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) { 227 Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */ 228 if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */ 229 } 230 if (!ctx->gpu_assembly) { 231 Vec *locXArray,*globXArray; 232 PetscScalar *cellClosure_it; 233 PetscInt cellClosure_sz=0,nDMs,Nf[LANDAU_MAX_GRIDS]; 234 PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS]; 235 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 236 ierr = DMGetLocalSection(ctx->plex[grid], §ion[grid]);CHKERRQ(ierr); 237 ierr = DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);CHKERRQ(ierr); 238 ierr = PetscSectionGetNumFields(section[grid], &Nf[grid]);CHKERRQ(ierr); 239 } 240 /* count cellClosure size */ 241 ierr = DMCompositeGetNumberDM(pack,&nDMs);CHKERRQ(ierr); 242 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) cellClosure_sz += Nb*Nf[grid]*numCells[grid]; 243 ierr = PetscMalloc1(cellClosure_sz*ctx->batch_sz,&cellClosure);CHKERRQ(ierr); 244 cellClosure_it = cellClosure; 245 ierr = PetscMalloc(sizeof(*locXArray)*nDMs, &locXArray);CHKERRQ(ierr); 246 ierr = PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray);CHKERRQ(ierr); 247 ierr = DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray);CHKERRQ(ierr); 248 ierr = DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray);CHKERRQ(ierr); 249 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP (once) 250 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 251 Vec locX = locXArray[ LAND_PACK_IDX(b_id,grid) ], globX = globXArray[ LAND_PACK_IDX(b_id,grid) ], locX2; 252 PetscInt cStart, cEnd, ei; 253 ierr = VecDuplicate(locX,&locX2);CHKERRQ(ierr); 254 ierr = DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2);CHKERRQ(ierr); 255 ierr = DMGlobalToLocalEnd (ctx->plex[grid], globX, INSERT_VALUES, locX2);CHKERRQ(ierr); 256 ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr); 257 for (ei = cStart ; ei < cEnd; ++ei) { 258 PetscScalar *coef = NULL; 259 ierr = DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);CHKERRQ(ierr); 260 ierr = PetscMemcpy(cellClosure_it,coef,Nb*Nf[grid]*sizeof(*cellClosure_it));CHKERRQ(ierr); /* change if LandauIPReal != PetscScalar */ 261 ierr = DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);CHKERRQ(ierr); 262 cellClosure_it += Nb*Nf[grid]; 263 } 264 ierr = VecDestroy(&locX2);CHKERRQ(ierr); 265 } 266 } 267 PetscCheckFalse(cellClosure_it-cellClosure != cellClosure_sz*ctx->batch_sz,PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %D != cellClosure_sz = %D",cellClosure_it-cellClosure,cellClosure_sz*ctx->batch_sz); 268 ierr = DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray);CHKERRQ(ierr); 269 ierr = DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray);CHKERRQ(ierr); 270 ierr = PetscFree(locXArray);CHKERRQ(ierr); 271 ierr = PetscFree(globXArray);CHKERRQ(ierr); 272 xdata = NULL; 273 } else { 274 PetscMemType mtype; 275 if (ctx->jacobian_field_major_order) { // get data in batch ordering 276 ierr = VecScatterBegin(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 277 ierr = VecScatterEnd(ctx->plex_batch,a_X,ctx->work_vec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 278 ierr = VecGetArrayReadAndMemType(ctx->work_vec,&xdata,&mtype);CHKERRQ(ierr); 279 } else { 280 ierr = VecGetArrayReadAndMemType(a_X,&xdata,&mtype);CHKERRQ(ierr); 281 } 282 if (mtype!=PETSC_MEMTYPE_HOST && ctx->deviceType == LANDAU_CPU) { 283 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"CPU run with device data: use -mat_type aij"); 284 } 285 cellClosure = NULL; 286 } 287 ierr = PetscLogEventEnd(ctx->events[1],0,0,0,0);CHKERRQ(ierr); 288 } else xdata = cellClosure = NULL; 289 290 /* do it */ 291 if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) { 292 if (ctx->deviceType == LANDAU_CUDA) { 293 #if defined(PETSC_HAVE_CUDA) 294 ierr = LandauCUDAJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,ctx->subThreadBlockSize,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ, JacP);CHKERRQ(ierr); 295 #else 296 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda"); 297 #endif 298 } else if (ctx->deviceType == LANDAU_KOKKOS) { 299 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 300 ierr = LandauKokkosJacobian(ctx->plex,Nq,ctx->batch_sz,ctx->num_grids,numCells,Eq_m,cellClosure,xdata,&ctx->SData_d,ctx->subThreadBlockSize,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ,JacP);CHKERRQ(ierr); 301 #else 302 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos"); 303 #endif 304 } 305 } else { /* CPU version */ 306 PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species 307 PetscInt ip_offset[LANDAU_MAX_GRIDS+1], ipf_offset[LANDAU_MAX_GRIDS+1], elem_offset[LANDAU_MAX_GRIDS+1],IPf_sz_glb,IPf_sz_tot,num_grids=ctx->num_grids,Nf[LANDAU_MAX_GRIDS]; 308 PetscReal *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w; 309 PetscReal Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES]; 310 PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS]; 311 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 312 ierr = DMGetLocalSection(ctx->plex[grid], §ion[grid]);CHKERRQ(ierr); 313 ierr = DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);CHKERRQ(ierr); 314 ierr = PetscSectionGetNumFields(section[grid], &Nf[grid]);CHKERRQ(ierr); 315 } 316 /* count IPf size, etc */ 317 ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr); // Bf, &Df same for all grids 318 const PetscReal *const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1]; 319 ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0; 320 for (PetscInt grid=0 ; grid<num_grids ; grid++) { 321 PetscInt nfloc = ctx->species_offset[grid+1] - ctx->species_offset[grid]; 322 elem_offset[grid+1] = elem_offset[grid] + numCells[grid]; 323 ip_offset[grid+1] = ip_offset[grid] + numCells[grid]*Nq; 324 ipf_offset[grid+1] = ipf_offset[grid] + Nq*nfloc*numCells[grid]; 325 } 326 IPf_sz_glb = ipf_offset[num_grids]; 327 IPf_sz_tot = IPf_sz_glb*ctx->batch_sz; 328 if (shift==0.0) { /* compute dynamic data f and df and init data for Jacobian */ 329 #if defined(PETSC_HAVE_THREADSAFETY) 330 double starttime, endtime; 331 starttime = MPI_Wtime(); 332 #endif 333 ierr = PetscLogEventBegin(ctx->events[8],0,0,0,0);CHKERRQ(ierr); 334 for (PetscInt fieldA=0;fieldA<ctx->num_species;fieldA++) { 335 invMass[fieldA] = ctx->m_0/ctx->masses[fieldA]; 336 Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */ 337 if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */ 338 nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->m_0)*ctx->m_0/ctx->masses[fieldA]; 339 nu_beta[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3); 340 } 341 ierr = PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim==3 ? IPf_sz_tot : 0, &dudz);CHKERRQ(ierr); 342 // F df/dx 343 for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element 344 const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; // b_id == OMP thd_id in batch 345 // find my grid: 346 PetscInt grid = 0; 347 while (b_elem_idx >= elem_offset[grid+1]) grid++; // yuck search for grid 348 { 349 const PetscInt loc_nip = numCells[grid]*Nq, loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid]; 350 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid]; 351 PetscScalar *coef, coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ]; 352 PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; // ingJ is static data on batch 0 353 PetscInt b,f,q; 354 if (cellClosure) { 355 coef = &cellClosure[b_id*IPf_sz_glb + ipf_offset[grid] + loc_elem*Nb*loc_Nf]; // this is const 356 } else { 357 coef = coef_buff; 358 for (f = 0; f < loc_Nf; ++f) { 359 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0]; 360 for (b = 0; b < Nb; ++b) { 361 PetscInt idx = Idxs[b]; 362 if (idx >= 0) { 363 coef[f*Nb+b] = xdata[idx+moffset]; 364 } else { 365 idx = -idx - 1; 366 coef[f*Nb+b] = 0; 367 for (q = 0; q < maps[grid].num_face; q++) { 368 PetscInt id = maps[grid].c_maps[idx][q].gid; 369 PetscScalar scale = maps[grid].c_maps[idx][q].scale; 370 coef[f*Nb+b] += scale*xdata[id+moffset]; 371 } 372 } 373 } 374 } 375 } 376 /* get f and df */ 377 for (PetscInt qi = 0; qi < Nq; qi++) { 378 const PetscReal *invJ = &invJe[qi*dim*dim]; 379 const PetscReal *Bq = &BB[qi*Nb]; 380 const PetscReal *Dq = &DD[qi*Nb*dim]; 381 PetscReal u_x[LANDAU_DIM]; 382 /* get f & df */ 383 for (f = 0; f < loc_Nf; ++f) { 384 const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid] + f*loc_nip + loc_elem*Nq + qi; 385 PetscInt b, e; 386 PetscReal refSpaceDer[LANDAU_DIM]; 387 ff[idx] = 0.0; 388 for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0; 389 for (b = 0; b < Nb; ++b) { 390 const PetscInt cidx = b; 391 ff[idx] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]); 392 for (int d = 0; d < dim; ++d) { 393 refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]); 394 } 395 } 396 for (int d = 0; d < LANDAU_DIM; ++d) { 397 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) { 398 u_x[d] += invJ[e*dim+d]*refSpaceDer[e]; 399 } 400 } 401 dudx[idx] = u_x[0]; 402 dudy[idx] = u_x[1]; 403 #if LANDAU_DIM==3 404 dudz[idx] = u_x[2]; 405 #endif 406 } 407 } // q 408 } // grid 409 } // grid*batch 410 ierr = PetscLogEventEnd(ctx->events[8],0,0,0,0);CHKERRQ(ierr); 411 #if defined(PETSC_HAVE_THREADSAFETY) 412 endtime = MPI_Wtime(); 413 if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime); 414 #endif 415 } // Jacobian setup 416 /* doit it */ 417 for (PetscInt tid = 0 ; tid < ctx->batch_sz*elem_offset[num_grids] ; tid++) { // for each element 418 const PetscInt b_Nelem = elem_offset[num_grids]; 419 const PetscInt b_elem_idx = tid%b_Nelem, b_id = tid/b_Nelem; 420 PetscInt grid = 0; 421 #if defined(PETSC_HAVE_THREADSAFETY) 422 double starttime, endtime; 423 starttime = MPI_Wtime(); 424 #endif 425 while (b_elem_idx >= elem_offset[grid+1]) grid++; 426 { 427 const PetscInt loc_Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid]; 428 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset)/* ; b_id*b_N + ctx->mat_offset[grid] */, totDim = loc_Nf*Nq, elemMatSize = totDim*totDim; 429 PetscScalar *elemMat; 430 const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem*Nq)*dim*dim]; 431 ierr = PetscMalloc1(elemMatSize, &elemMat);CHKERRQ(ierr); 432 ierr = PetscMemzero(elemMat, elemMatSize*sizeof(*elemMat));CHKERRQ(ierr); 433 if (shift==0.0) { // Jacobian 434 ierr = PetscLogEventBegin(ctx->events[4],0,0,0,0);CHKERRQ(ierr); 435 } else { 436 ierr = PetscLogEventBegin(ctx->events[16],0,0,0,0);CHKERRQ(ierr); 437 } 438 for (PetscInt qj = 0; qj < Nq; ++qj) { 439 const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq; 440 PetscReal g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1 441 PetscInt d,d2,dp,d3,IPf_idx; 442 if (shift==0.0) { // Jacobian 443 const PetscReal * const invJj = &invJe[qj*dim*dim]; 444 PetscReal gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM]; 445 const PetscReal vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb]; 446 // create g2 & g3 447 for (d=0;d<LANDAU_DIM;d++) { // clear accumulation data D & K 448 gg2_temp[d] = 0; 449 for (d2=0;d2<LANDAU_DIM;d2++) gg3_temp[d][d2] = 0; 450 } 451 /* inner beta reduction */ 452 IPf_idx = 0; 453 for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids ; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r 454 PetscInt nip_loc_r = numCells[grid_r]*Nq, Nfloc_r = Nf[grid_r]; 455 for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) { 456 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) { 457 const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx]; 458 PetscReal temp1[3] = {0, 0, 0}, temp2 = 0; 459 #if LANDAU_DIM==2 460 PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 461 LandauTensor2D(vj, x, y, Ud, Uk, mask); 462 #else 463 PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2]-z) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.; 464 if (ctx->use_relativistic_corrections) { 465 LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0)); 466 } else { 467 LandauTensor3D(vj, x, y, z, U, mask); 468 } 469 #endif 470 for (int f = 0; f < Nfloc_r ; ++f) { 471 const PetscInt idx = b_id*IPf_sz_glb + ipf_offset[grid_r] + f*nip_loc_r + ei_r*Nq + qi; // IPf_idx + f*nip_loc_r + loc_fdf_idx; 472 temp1[0] += dudx[idx]*nu_beta[f+f_off]*invMass[f+f_off]; 473 temp1[1] += dudy[idx]*nu_beta[f+f_off]*invMass[f+f_off]; 474 #if LANDAU_DIM==3 475 temp1[2] += dudz[idx]*nu_beta[f+f_off]*invMass[f+f_off]; 476 #endif 477 temp2 += ff[idx]*nu_beta[f+f_off]; 478 } 479 temp1[0] *= wi; 480 temp1[1] *= wi; 481 #if LANDAU_DIM==3 482 temp1[2] *= wi; 483 #endif 484 temp2 *= wi; 485 #if LANDAU_DIM==2 486 for (d2 = 0; d2 < 2; d2++) { 487 for (d3 = 0; d3 < 2; ++d3) { 488 /* K = U * grad(f): g2=e: i,A */ 489 gg2_temp[d2] += Uk[d2][d3]*temp1[d3]; 490 /* D = -U * (I \kron (fx)): g3=f: i,j,A */ 491 gg3_temp[d2][d3] += Ud[d2][d3]*temp2; 492 } 493 } 494 #else 495 for (d2 = 0; d2 < 3; ++d2) { 496 for (d3 = 0; d3 < 3; ++d3) { 497 /* K = U * grad(f): g2 = e: i,A */ 498 gg2_temp[d2] += U[d2][d3]*temp1[d3]; 499 /* D = -U * (I \kron (fx)): g3 = f: i,j,A */ 500 gg3_temp[d2][d3] += U[d2][d3]*temp2; 501 } 502 } 503 #endif 504 } // qi 505 } // ei_r 506 IPf_idx += nip_loc_r*Nfloc_r; 507 } /* grid_r - IPs */ 508 PetscCheckFalse(IPf_idx != IPf_sz_glb,PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %D %D",IPf_idx,IPf_sz_glb); 509 // add alpha and put in gg2/3 510 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) { 511 for (d2 = 0; d2 < dim; d2++) { 512 gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA+f_off]; 513 for (d3 = 0; d3 < dim; d3++) { 514 gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA+f_off]*invMass[fieldA+f_off]; 515 } 516 } 517 } 518 /* add electric field term once per IP */ 519 for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid] ; fieldA < loc_Nf; ++fieldA) { 520 gg2[fieldA][dim-1] += Eq_m[fieldA+f_off]; 521 } 522 /* Jacobian transform - g2, g3 */ 523 for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) { 524 for (d = 0; d < dim; ++d) { 525 g2[fieldA][d] = 0.0; 526 for (d2 = 0; d2 < dim; ++d2) { 527 g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2]; 528 g3[fieldA][d][d2] = 0.0; 529 for (d3 = 0; d3 < dim; ++d3) { 530 for (dp = 0; dp < dim; ++dp) { 531 g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp]; 532 } 533 } 534 g3[fieldA][d][d2] *= wj; 535 } 536 g2[fieldA][d] *= wj; 537 } 538 } 539 } else { // mass 540 PetscReal wj = ww[jpidx_glb]; 541 /* Jacobian transform - g0 */ 542 for (PetscInt fieldA = 0; fieldA < loc_Nf ; ++fieldA) { 543 if (dim==2) { 544 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0 545 } else { 546 g0[fieldA] = wj * shift; // move this to below and remove g0 547 } 548 } 549 } 550 /* FE matrix construction */ 551 { 552 PetscInt fieldA,d,f,d2,g; 553 const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim]; 554 /* assemble - on the diagonal (I,I) */ 555 for (fieldA = 0; fieldA < loc_Nf ; fieldA++) { 556 for (f = 0; f < Nb ; f++) { 557 const PetscInt i = fieldA*Nb + f; /* Element matrix row */ 558 for (g = 0; g < Nb; ++g) { 559 const PetscInt j = fieldA*Nb + g; /* Element matrix column */ 560 const PetscInt fOff = i*totDim + j; 561 if (shift==0.0) { 562 for (d = 0; d < dim; ++d) { 563 elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g]; 564 for (d2 = 0; d2 < dim; ++d2) { 565 elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2]; 566 } 567 } 568 } else { // mass 569 elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g]; 570 } 571 } 572 } 573 } 574 } 575 } /* qj loop */ 576 if (shift==0.0) { // Jacobian 577 ierr = PetscLogEventEnd(ctx->events[4],0,0,0,0);CHKERRQ(ierr); 578 } else { 579 ierr = PetscLogEventEnd(ctx->events[16],0,0,0,0);CHKERRQ(ierr); 580 } 581 #if defined(PETSC_HAVE_THREADSAFETY) 582 endtime = MPI_Wtime(); 583 if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime); 584 #endif 585 /* assemble matrix */ 586 if (!container) { 587 PetscInt cStart; 588 ierr = PetscLogEventBegin(ctx->events[6],0,0,0,0);CHKERRQ(ierr); 589 ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL);CHKERRQ(ierr); 590 ierr = DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[ LAND_PACK_IDX(b_id,grid) ], loc_elem + cStart, elemMat, ADD_VALUES);CHKERRQ(ierr); 591 ierr = PetscLogEventEnd(ctx->events[6],0,0,0,0);CHKERRQ(ierr); 592 } else { // GPU like assembly for debugging 593 PetscInt fieldA,idx,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE]={0},cols0[LANDAU_MAX_Q_FACE]={0},rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE]; 594 PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]={0},row_scale[LANDAU_MAX_Q_FACE]={0},col_scale[LANDAU_MAX_Q_FACE]={0}; 595 /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */ 596 for (fieldA = 0; fieldA < loc_Nf ; fieldA++) { 597 LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0]; 598 for (f = 0; f < Nb ; f++) { 599 idx = Idxs[f]; 600 if (idx >= 0) { 601 nr = 1; 602 rows0[0] = idx; 603 row_scale[0] = 1.; 604 } else { 605 idx = -idx - 1; 606 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) { 607 if (maps[grid].c_maps[idx][q].gid < 0) break; 608 rows0[q] = maps[grid].c_maps[idx][q].gid; 609 row_scale[q] = maps[grid].c_maps[idx][q].scale; 610 } 611 } 612 for (g = 0; g < Nb; ++g) { 613 idx = Idxs[g]; 614 if (idx >= 0) { 615 nc = 1; 616 cols0[0] = idx; 617 col_scale[0] = 1.; 618 } else { 619 idx = -idx - 1; 620 nc = maps[grid].num_face; 621 for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) { 622 if (maps[grid].c_maps[idx][q].gid < 0) break; 623 cols0[q] = maps[grid].c_maps[idx][q].gid; 624 col_scale[q] = maps[grid].c_maps[idx][q].scale; 625 } 626 } 627 const PetscInt i = fieldA*Nb + f; /* Element matrix row */ 628 const PetscInt j = fieldA*Nb + g; /* Element matrix column */ 629 const PetscScalar Aij = elemMat[i*totDim + j]; 630 for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset; 631 for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset; 632 for (q = 0; q < nr; q++) { 633 for (d = 0; d < nc; d++) { 634 vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij; 635 } 636 } 637 ierr = MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES);CHKERRQ(ierr); 638 } 639 } 640 } 641 } 642 if (loc_elem==-1) { 643 PetscErrorCode ierr2; 644 ierr2 = PetscPrintf(ctx->comm,"CPU Element matrix\n");CHKERRQ(ierr2); 645 for (int d = 0; d < totDim; ++d) { 646 for (int f = 0; f < totDim; ++f) {ierr2 = PetscPrintf(ctx->comm," %12.5e", PetscRealPart(elemMat[d*totDim + f]));CHKERRQ(ierr2);} 647 ierr2 = PetscPrintf(ctx->comm,"\n");CHKERRQ(ierr2); 648 } 649 exit(12); 650 } 651 ierr = PetscFree(elemMat);CHKERRQ(ierr); 652 } /* grid */ 653 } /* outer element & batch loop */ 654 if (shift==0.0) { // mass 655 ierr = PetscFree4(ff, dudx, dudy, dudz);CHKERRQ(ierr); 656 } 657 if (!container) { // move nest matrix to global JacP 658 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { // OpenMP 659 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 660 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid]; 661 PetscInt nloc, nzl, colbuf[1024], row; 662 const PetscInt *cols; 663 const PetscScalar *vals; 664 Mat B = subJ[ LAND_PACK_IDX(b_id,grid) ]; 665 ierr = MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 666 ierr = MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 667 ierr = MatGetSize(B, &nloc, NULL);CHKERRQ(ierr); 668 for (int i=0 ; i<nloc ; i++) { 669 ierr = MatGetRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr); 670 PetscCheck(nzl<=1024,PetscObjectComm((PetscObject) B), PETSC_ERR_PLIB, "Row too big: %D",nzl); 671 for (int j=0; j<nzl; j++) colbuf[j] = moffset + cols[j]; 672 row = moffset + i; 673 ierr = MatSetValues(JacP,1,&row,nzl,colbuf,vals,ADD_VALUES);CHKERRQ(ierr); 674 ierr = MatRestoreRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr); 675 } 676 ierr = MatDestroy(&B);CHKERRQ(ierr); 677 } 678 } 679 } 680 } /* CPU version */ 681 ierr = MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 682 ierr = MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 683 /* clean up */ 684 if (cellClosure) { 685 ierr = PetscFree(cellClosure);CHKERRQ(ierr); 686 } 687 if (xdata) { 688 ierr = VecRestoreArrayReadAndMemType(a_X,&xdata);CHKERRQ(ierr); 689 } 690 PetscFunctionReturn(0); 691 } 692 693 #if defined(LANDAU_ADD_BCS) 694 static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux, 695 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 696 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 697 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[]) 698 { 699 uexact[0] = 0; 700 } 701 #endif 702 703 #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }} 704 static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y, 705 PetscReal *outX, PetscReal *outY) 706 { 707 PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact; 708 if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) { 709 *outX = x; *outY = y; 710 } else { 711 const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr; 712 PetscReal cth,sth,xyprime[2],Rth[2][2],rotcos,newrr; 713 if (num_sections==2) { 714 rotcos = 0.70710678118654; 715 outfact = 1.5; efact = 2.5; 716 /* rotate normalized vector into [-pi/4,pi/4) */ 717 if (sinphi >= 0.) { /* top cell, -pi/2 */ 718 cth = 0.707106781186548; sth = -0.707106781186548; 719 } else { /* bottom cell -pi/8 */ 720 cth = 0.707106781186548; sth = .707106781186548; 721 } 722 } else if (num_sections==3) { 723 rotcos = 0.86602540378443; 724 outfact = 1.5; efact = 2.5; 725 /* rotate normalized vector into [-pi/6,pi/6) */ 726 if (sinphi >= 0.5) { /* top cell, -pi/3 */ 727 cth = 0.5; sth = -0.866025403784439; 728 } else if (sinphi >= -.5) { /* mid cell 0 */ 729 cth = 1.; sth = .0; 730 } else { /* bottom cell +pi/3 */ 731 cth = 0.5; sth = 0.866025403784439; 732 } 733 } else if (num_sections==4) { 734 rotcos = 0.9238795325112; 735 outfact = 1.5; efact = 3; 736 /* rotate normalized vector into [-pi/8,pi/8) */ 737 if (sinphi >= 0.707106781186548) { /* top cell, -3pi/8 */ 738 cth = 0.38268343236509; sth = -0.923879532511287; 739 } else if (sinphi >= 0.) { /* mid top cell -pi/8 */ 740 cth = 0.923879532511287; sth = -.38268343236509; 741 } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */ 742 cth = 0.923879532511287; sth = 0.38268343236509; 743 } else { /* bottom cell + 3pi/8 */ 744 cth = 0.38268343236509; sth = .923879532511287; 745 } 746 } else { 747 cth = 0.; sth = 0.; rotcos = 0; efact = 0; 748 } 749 Rth[0][0] = cth; Rth[0][1] =-sth; 750 Rth[1][0] = sth; Rth[1][1] = cth; 751 MATVEC2(Rth,xy,xyprime); 752 if (num_sections==2) { 753 newrr = xyprime[0]/rotcos; 754 } else { 755 PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin; 756 PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax; 757 newrr = rin + routfrac*nroutmax; 758 } 759 *outX = cosphi*newrr; *outY = sinphi*newrr; 760 /* grade */ 761 PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY)); 762 if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */ 763 else { rs = r1; re = r2; fact = efact;} /* electron zone */ 764 tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr; 765 *outX *= tt; 766 *outY *= tt; 767 } 768 } 769 770 static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx) 771 { 772 LandauCtx *ctx = (LandauCtx*)a_ctx; 773 PetscReal r = abc[0], z = abc[1]; 774 if (ctx->inflate) { 775 PetscReal absR, absZ; 776 absR = PetscAbs(r); 777 absZ = PetscAbs(z); 778 CircleInflate(ctx->i_radius[0],ctx->e_radius,ctx->radius[0],ctx->num_sections,absR,absZ,&absR,&absZ); // wrong: how do I know what grid I am on? 779 r = (r > 0) ? absR : -absR; 780 z = (z > 0) ? absZ : -absZ; 781 } 782 xyz[0] = r; 783 xyz[1] = z; 784 if (dim==3) xyz[2] = abc[2]; 785 786 PetscFunctionReturn(0); 787 } 788 789 /* create DMComposite of meshes for each species group */ 790 static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack) 791 { 792 PetscErrorCode ierr; 793 794 PetscFunctionBegin; 795 { /* p4est, quads */ 796 /* Create plex mesh of Landau domain */ 797 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 798 PetscReal radius = ctx->radius[grid]; 799 if (!ctx->sphere) { 800 PetscInt cells[] = {2,2,2}; 801 PetscReal lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius}; 802 DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE}; 803 if (dim==2) { lo[0] = 0; cells[0] /* = cells[1] */ = 1; } 804 ierr = DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid]);CHKERRQ(ierr); // todo: make composite and create dm[grid] here 805 ierr = DMLocalizeCoordinates(ctx->plex[grid]);CHKERRQ(ierr); /* needed for periodic */ 806 if (dim==3) {ierr = PetscObjectSetName((PetscObject) ctx->plex[grid], "cube");CHKERRQ(ierr);} 807 else {ierr = PetscObjectSetName((PetscObject) ctx->plex[grid], "half-plane");CHKERRQ(ierr);} 808 } else if (dim==2) { // sphere is all wrong. should just have one inner radius 809 PetscInt numCells,cells[16][4],i,j; 810 PetscInt numVerts; 811 PetscReal inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius; 812 PetscReal *flatCoords = NULL; 813 PetscInt *flatCells = NULL, *pcell; 814 if (ctx->num_sections==2) { 815 #if 1 816 numCells = 5; 817 numVerts = 10; 818 int cells2[][4] = { {0,1,4,3}, 819 {1,2,5,4}, 820 {3,4,7,6}, 821 {4,5,8,7}, 822 {6,7,8,9} }; 823 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 824 ierr = PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);CHKERRQ(ierr); 825 { 826 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 827 for (j = 0; j < numVerts-1; j++) { 828 PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2; 829 PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid]; 830 z = rad * PetscSinReal(theta); 831 coords[j][1] = z; 832 r = rad * PetscCosReal(theta); 833 coords[j][0] = r; 834 } 835 coords[numVerts-1][0] = coords[numVerts-1][1] = 0; 836 } 837 #else 838 numCells = 4; 839 numVerts = 8; 840 static int cells2[][4] = {{0,1,2,3}, 841 {4,5,1,0}, 842 {5,6,2,1}, 843 {6,7,3,2}}; 844 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 845 ierr = loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);CHKERRQ(ierr); 846 { 847 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 848 PetscInt j; 849 for (j = 0; j < 8; j++) { 850 PetscReal z, r; 851 PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.; 852 PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0); 853 z = rad * PetscSinReal(theta); 854 coords[j][1] = z; 855 r = rad * PetscCosReal(theta); 856 coords[j][0] = r; 857 } 858 } 859 #endif 860 } else if (ctx->num_sections==3) { 861 numCells = 7; 862 numVerts = 12; 863 int cells2[][4] = { {0,1,5,4}, 864 {1,2,6,5}, 865 {2,3,7,6}, 866 {4,5,9,8}, 867 {5,6,10,9}, 868 {6,7,11,10}, 869 {8,9,10,11} }; 870 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 871 ierr = PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);CHKERRQ(ierr); 872 { 873 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 874 for (j = 0; j < numVerts; j++) { 875 PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3; 876 PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid]; 877 z = rad * PetscSinReal(theta); 878 coords[j][1] = z; 879 r = rad * PetscCosReal(theta); 880 coords[j][0] = r; 881 } 882 } 883 } else if (ctx->num_sections==4) { 884 numCells = 10; 885 numVerts = 16; 886 int cells2[][4] = { {0,1,6,5}, 887 {1,2,7,6}, 888 {2,3,8,7}, 889 {3,4,9,8}, 890 {5,6,11,10}, 891 {6,7,12,11}, 892 {7,8,13,12}, 893 {8,9,14,13}, 894 {10,11,12,15}, 895 {12,13,14,15}}; 896 for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j]; 897 ierr = PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);CHKERRQ(ierr); 898 { 899 PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords; 900 for (j = 0; j < numVerts-1; j++) { 901 PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4; 902 PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid]; 903 z = rad * PetscSinReal(theta); 904 coords[j][1] = z; 905 r = rad * PetscCosReal(theta); 906 coords[j][0] = r; 907 } 908 coords[numVerts-1][0] = coords[numVerts-1][1] = 0; 909 } 910 } else { 911 numCells = 0; 912 numVerts = 0; 913 } 914 for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) { 915 pcell[0] = cells[j][0]; pcell[1] = cells[j][1]; 916 pcell[2] = cells[j][2]; pcell[3] = cells[j][3]; 917 } 918 ierr = DMPlexCreateFromCellListPetsc(comm_self,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,&ctx->plex[grid]);CHKERRQ(ierr); 919 ierr = PetscFree2(flatCoords,flatCells);CHKERRQ(ierr); 920 ierr = PetscObjectSetName((PetscObject) ctx->plex[grid], "semi-circle");CHKERRQ(ierr); 921 } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere"); 922 923 ierr = DMSetFromOptions(ctx->plex[grid]);CHKERRQ(ierr); 924 } // grid loop 925 ierr = PetscObjectSetOptionsPrefix((PetscObject)pack,prefix);CHKERRQ(ierr); 926 ierr = DMSetFromOptions(pack);CHKERRQ(ierr); 927 928 { /* convert to p4est (or whatever), wait for discretization to create pack */ 929 char convType[256]; 930 PetscBool flg; 931 ierr = PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");CHKERRQ(ierr); 932 ierr = PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (p4est)","plexland.c",DMList,DMPLEX,convType,256,&flg);CHKERRQ(ierr); 933 ierr = PetscOptionsEnd();CHKERRQ(ierr); 934 if (flg) { 935 ctx->use_p4est = PETSC_TRUE; /* flag for Forest */ 936 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 937 DM dmforest; 938 ierr = DMConvert(ctx->plex[grid],convType,&dmforest);CHKERRQ(ierr); 939 if (dmforest) { 940 PetscBool isForest; 941 ierr = PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix);CHKERRQ(ierr); 942 ierr = DMIsForest(dmforest,&isForest);CHKERRQ(ierr); 943 if (isForest) { 944 if (ctx->sphere && ctx->inflate) { 945 ierr = DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx);CHKERRQ(ierr); 946 } 947 ierr = DMDestroy(&ctx->plex[grid]);CHKERRQ(ierr); 948 ctx->plex[grid] = dmforest; // Forest for adaptivity 949 } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?"); 950 } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?"); 951 } 952 } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */ 953 } 954 } /* non-file */ 955 ierr = DMSetDimension(pack, dim);CHKERRQ(ierr); 956 ierr = PetscObjectSetName((PetscObject) pack, "Mesh");CHKERRQ(ierr); 957 ierr = DMSetApplicationContext(pack, ctx);CHKERRQ(ierr); 958 959 PetscFunctionReturn(0); 960 } 961 962 static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx) 963 { 964 PetscErrorCode ierr; 965 PetscInt ii,i0; 966 char buf[256]; 967 PetscSection section; 968 969 PetscFunctionBegin; 970 for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) { 971 if (ii==0) ierr = PetscSNPrintf(buf, 256, "e"); 972 else {ierr = PetscSNPrintf(buf, 256, "i%D", ii);CHKERRQ(ierr);} 973 /* Setup Discretization - FEM */ 974 ierr = PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]);CHKERRQ(ierr); 975 ierr = PetscObjectSetName((PetscObject) ctx->fe[ii], buf);CHKERRQ(ierr); 976 ierr = DMSetField(ctx->plex[grid], i0, NULL, (PetscObject) ctx->fe[ii]);CHKERRQ(ierr); 977 } 978 ierr = DMCreateDS(ctx->plex[grid]);CHKERRQ(ierr); 979 ierr = DMGetSection(ctx->plex[grid], §ion);CHKERRQ(ierr); 980 for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) { 981 if (ii==0) ierr = PetscSNPrintf(buf, 256, "se"); 982 else ierr = PetscSNPrintf(buf, 256, "si%D", ii); 983 ierr = PetscSectionSetComponentName(section, i0, 0, buf);CHKERRQ(ierr); 984 } 985 PetscFunctionReturn(0); 986 } 987 988 /* Define a Maxwellian function for testing out the operator. */ 989 990 /* Using cartesian velocity space coordinates, the particle */ 991 /* density, [1/m^3], is defined according to */ 992 993 /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */ 994 995 /* Using some constant, c, we normalize the velocity vector into a */ 996 /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */ 997 998 /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */ 999 1000 /* Defining $\theta=2T/mc^2$, we thus find that the probability density */ 1001 /* for finding the particle within the interval in a box dx^3 around x is */ 1002 1003 /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */ 1004 1005 typedef struct { 1006 PetscReal v_0; 1007 PetscReal kT_m; 1008 PetscReal n; 1009 PetscReal shift; 1010 } MaxwellianCtx; 1011 1012 static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx) 1013 { 1014 MaxwellianCtx *mctx = (MaxwellianCtx*)actx; 1015 PetscInt i; 1016 PetscReal v2 = 0, theta = 2*mctx->kT_m/(mctx->v_0*mctx->v_0); /* theta = 2kT/mc^2 */ 1017 PetscFunctionBegin; 1018 /* compute the exponents, v^2 */ 1019 for (i = 0; i < dim; ++i) v2 += x[i]*x[i]; 1020 /* evaluate the Maxwellian */ 1021 u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta)); 1022 if (mctx->shift!=0.) { 1023 v2 = 0; 1024 for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i]; 1025 v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift); 1026 /* evaluate the shifted Maxwellian */ 1027 u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta)); 1028 } 1029 PetscFunctionReturn(0); 1030 } 1031 1032 /*@ 1033 LandauAddMaxwellians - Add a Maxwellian distribution to a state 1034 1035 Collective on X 1036 1037 Input Parameters: 1038 . dm - The mesh (local) 1039 + time - Current time 1040 - temps - Temperatures of each species (global) 1041 . ns - Number density of each species (global) 1042 - grid - index into current grid - just used for offset into temp and ns 1043 + actx - Landau context 1044 1045 Output Parameter: 1046 . X - The state (local to this grid) 1047 1048 Level: beginner 1049 1050 .keywords: mesh 1051 .seealso: LandauCreateVelocitySpace() 1052 @*/ 1053 PetscErrorCode LandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, void *actx) 1054 { 1055 LandauCtx *ctx = (LandauCtx*)actx; 1056 PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *); 1057 PetscErrorCode ierr,ii,i0; 1058 PetscInt dim; 1059 MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES]; 1060 1061 PetscFunctionBegin; 1062 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1063 if (!ctx) { ierr = DMGetApplicationContext(dm, &ctx);CHKERRQ(ierr); } 1064 for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) { 1065 mctxs[i0] = &data[i0]; 1066 data[i0].v_0 = ctx->v_0; // v_0 same for all grids 1067 data[i0].kT_m = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */ 1068 data[i0].n = ns[ii] * (1+(double)b_id/100.0); // make solves a little different to mimic application, n[0] use for Conner-Hastie 1069 initu[i0] = maxwellian; 1070 data[i0].shift = 0; 1071 } 1072 data[0].shift = ctx->electronShift; 1073 /* need to make ADD_ALL_VALUES work - TODO */ 1074 ierr = DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X);CHKERRQ(ierr); 1075 PetscFunctionReturn(0); 1076 } 1077 1078 /* 1079 LandauSetInitialCondition - Addes Maxwellians with context 1080 1081 Collective on X 1082 1083 Input Parameters: 1084 . dm - The mesh 1085 - grid - index into current grid - just used for offset into temp and ns 1086 + actx - Landau context with T and n 1087 1088 Output Parameter: 1089 . X - The state 1090 1091 Level: beginner 1092 1093 .keywords: mesh 1094 .seealso: LandauCreateVelocitySpace(), LandauAddMaxwellians() 1095 */ 1096 static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, void *actx) 1097 { 1098 LandauCtx *ctx = (LandauCtx*)actx; 1099 PetscErrorCode ierr; 1100 PetscFunctionBegin; 1101 if (!ctx) { ierr = DMGetApplicationContext(dm, &ctx);CHKERRQ(ierr); } 1102 ierr = VecZeroEntries(X);CHKERRQ(ierr); 1103 ierr = LandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, ctx);CHKERRQ(ierr); 1104 PetscFunctionReturn(0); 1105 } 1106 1107 // adapt a level once. Forest in/out 1108 static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest) 1109 { 1110 DM forest, plex, adaptedDM = NULL; 1111 PetscDS prob; 1112 PetscBool isForest; 1113 PetscQuadrature quad; 1114 PetscInt Nq, *Nb, cStart, cEnd, c, dim, qj, k; 1115 DMLabel adaptLabel = NULL; 1116 PetscErrorCode ierr; 1117 1118 PetscFunctionBegin; 1119 forest = ctx->plex[grid]; 1120 ierr = DMCreateDS(forest);CHKERRQ(ierr); 1121 ierr = DMGetDS(forest, &prob);CHKERRQ(ierr); 1122 ierr = DMGetDimension(forest, &dim);CHKERRQ(ierr); 1123 ierr = DMIsForest(forest, &isForest);CHKERRQ(ierr); 1124 PetscCheckFalse(!isForest,ctx->comm,PETSC_ERR_ARG_WRONG,"! Forest"); 1125 ierr = DMConvert(forest, DMPLEX, &plex);CHKERRQ(ierr); 1126 ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr); 1127 ierr = DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel);CHKERRQ(ierr); 1128 ierr = PetscFEGetQuadrature(fem, &quad);CHKERRQ(ierr); 1129 ierr = PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);CHKERRQ(ierr); 1130 PetscCheckFalse(Nq >LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %D > LANDAU_MAX_NQ (%D)",Nq,LANDAU_MAX_NQ); 1131 ierr = PetscDSGetDimensions(prob, &Nb);CHKERRQ(ierr); 1132 if (type==4) { 1133 for (c = cStart; c < cEnd; c++) { 1134 ierr = DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);CHKERRQ(ierr); 1135 } 1136 ierr = PetscInfo(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM");CHKERRQ(ierr); 1137 } else if (type==2) { 1138 PetscInt rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2; 1139 PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY; 1140 for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; } 1141 for (c = cStart; c < cEnd; c++) { 1142 PetscReal tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ]; 1143 ierr = DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ);CHKERRQ(ierr); 1144 for (qj = 0; qj < Nq; ++qj) { 1145 tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0)); 1146 r = PetscSqrtReal(tt); 1147 if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) { 1148 minRad = r; 1149 nr = 0; 1150 rCellIdx[nr++]= c; 1151 ierr = PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %D, qp %D/%D\n", r, c, qj+1, Nq);CHKERRQ(ierr); 1152 } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) { 1153 for (k=0;k<nr;k++) if (c == rCellIdx[k]) break; 1154 if (k==nr) { 1155 rCellIdx[nr++]= c; 1156 ierr = PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %D, qp %D/%D, d=%e\n", r, c, qj+1, Nq, r-minRad);CHKERRQ(ierr); 1157 } 1158 } 1159 if (ctx->sphere) { 1160 if ((tt=r-ctx->e_radius) > 0) { 1161 PetscInfo(sol, "\t\t\t %D cell r=%g\n",c,tt); 1162 if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) { 1163 eMinRad = tt; 1164 eMaxIdx = 0; 1165 eCellIdx[eMaxIdx++] = c; 1166 } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) { 1167 eCellIdx[eMaxIdx++] = c; 1168 } 1169 } 1170 if ((tt=r-ctx->i_radius[grid]) > 0) { 1171 if (tt < iMinRad - 1.e-5) { 1172 iMinRad = tt; 1173 iMaxIdx = 0; 1174 iCellIdx[iMaxIdx++] = c; 1175 } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) { 1176 iCellIdx[iMaxIdx++] = c; 1177 } 1178 } 1179 } 1180 } 1181 } 1182 for (k=0;k<nr;k++) { 1183 ierr = DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE);CHKERRQ(ierr); 1184 } 1185 if (ctx->sphere) { 1186 for (c = 0; c < eMaxIdx; c++) { 1187 ierr = DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE);CHKERRQ(ierr); 1188 ierr = PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %D r=%g\n","adaptToleranceFEM",eCellIdx[c],eMinRad);CHKERRQ(ierr); 1189 } 1190 for (c = 0; c < iMaxIdx; c++) { 1191 ierr = DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE);CHKERRQ(ierr); 1192 ierr = PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %D r=%g\n","adaptToleranceFEM",iCellIdx[c],iMinRad);CHKERRQ(ierr); 1193 } 1194 } 1195 ierr = PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %D,%D r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],minRad);CHKERRQ(ierr); 1196 } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */ 1197 PetscScalar *coef = NULL; 1198 Vec coords; 1199 PetscInt csize,Nv,d,nz; 1200 DM cdm; 1201 PetscSection cs; 1202 ierr = DMGetCoordinatesLocal(forest, &coords);CHKERRQ(ierr); 1203 ierr = DMGetCoordinateDM(forest, &cdm);CHKERRQ(ierr); 1204 ierr = DMGetLocalSection(cdm, &cs);CHKERRQ(ierr); 1205 for (c = cStart; c < cEnd; c++) { 1206 PetscInt doit = 0, outside = 0; 1207 ierr = DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef);CHKERRQ(ierr); 1208 Nv = csize/dim; 1209 for (nz = d = 0; d < Nv; d++) { 1210 PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0); 1211 x = PetscSqrtReal(x); 1212 if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1; /* refine origin */ 1213 else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++; /* first pass don't refine bottom */ 1214 else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */ 1215 else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */ 1216 if (x < PETSC_MACHINE_EPSILON*10.) nz++; 1217 } 1218 ierr = DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef);CHKERRQ(ierr); 1219 if (doit || (outside<Nv && nz)) { 1220 ierr = DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);CHKERRQ(ierr); 1221 } 1222 } 1223 ierr = PetscInfo(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM");CHKERRQ(ierr); 1224 } 1225 ierr = DMDestroy(&plex);CHKERRQ(ierr); 1226 ierr = DMAdaptLabel(forest, adaptLabel, &adaptedDM);CHKERRQ(ierr); 1227 ierr = DMLabelDestroy(&adaptLabel);CHKERRQ(ierr); 1228 *newForest = adaptedDM; 1229 if (adaptedDM) { 1230 if (isForest) { 1231 ierr = DMForestSetAdaptivityForest(adaptedDM,NULL);CHKERRQ(ierr); // ???? 1232 } else exit(33); // ??????? 1233 ierr = DMConvert(adaptedDM, DMPLEX, &plex);CHKERRQ(ierr); 1234 ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr); 1235 ierr = PetscInfo(sol, "\tPhase: adaptToleranceFEM: %D cells, %d total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart));CHKERRQ(ierr); 1236 ierr = DMDestroy(&plex);CHKERRQ(ierr); 1237 } else *newForest = NULL; 1238 PetscFunctionReturn(0); 1239 } 1240 1241 // forest goes in (ctx->plex[grid]), plex comes out 1242 static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu) 1243 { 1244 PetscErrorCode ierr; 1245 PetscInt adaptIter; 1246 1247 PetscFunctionBegin; 1248 PetscInt type, limits[5] = {(grid==0) ? ctx->numRERefine : 0, (grid==0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid==0) ? ctx->nZRefine2 : 0,ctx->postAMRRefine[grid]}; 1249 for (type=0;type<5;type++) { 1250 for (adaptIter = 0; adaptIter<limits[type];adaptIter++) { 1251 DM newForest = NULL; 1252 ierr = adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest);CHKERRQ(ierr); 1253 if (newForest) { 1254 ierr = DMDestroy(&ctx->plex[grid]);CHKERRQ(ierr); 1255 ierr = VecDestroy(uu);CHKERRQ(ierr); 1256 ierr = DMCreateGlobalVector(newForest,uu);CHKERRQ(ierr); 1257 ierr = PetscObjectSetName((PetscObject) *uu, "uAMR");CHKERRQ(ierr); 1258 ierr = LandauSetInitialCondition(newForest, *uu, grid, 0, ctx);CHKERRQ(ierr); 1259 ctx->plex[grid] = newForest; 1260 } else { 1261 exit(4); // can happen with no AMR and post refinement 1262 } 1263 } 1264 } 1265 PetscFunctionReturn(0); 1266 } 1267 1268 static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[]) 1269 { 1270 PetscErrorCode ierr; 1271 PetscBool flg, sph_flg; 1272 PetscInt ii,nt,nm,nc,num_species_grid[LANDAU_MAX_GRIDS]; 1273 PetscReal v0_grid[LANDAU_MAX_GRIDS]; 1274 DM dummy; 1275 1276 PetscFunctionBegin; 1277 ierr = DMCreate(ctx->comm,&dummy);CHKERRQ(ierr); 1278 /* get options - initialize context */ 1279 ctx->verbose = 1; // should be 0 for silent compliance 1280 #if defined(PETSC_HAVE_THREADSAFETY) 1281 ctx->batch_sz = PetscNumOMPThreads; 1282 #else 1283 ctx->batch_sz = 1; 1284 #endif 1285 ctx->batch_view_idx = 0; 1286 ctx->interpolate = PETSC_TRUE; 1287 ctx->gpu_assembly = PETSC_TRUE; 1288 ctx->aux_bool = PETSC_FALSE; 1289 ctx->electronShift = 0; 1290 ctx->M = NULL; 1291 ctx->J = NULL; 1292 /* geometry and grids */ 1293 ctx->sphere = PETSC_FALSE; 1294 ctx->inflate = PETSC_FALSE; 1295 ctx->aux_bool = PETSC_FALSE; 1296 ctx->use_p4est = PETSC_FALSE; 1297 ctx->num_sections = 3; /* 2, 3 or 4 */ 1298 for (PetscInt grid=0;grid<LANDAU_MAX_GRIDS;grid++) { 1299 ctx->radius[grid] = 5.; /* thermal radius (velocity) */ 1300 ctx->numAMRRefine[grid] = 5; 1301 ctx->postAMRRefine[grid] = 0; 1302 ctx->species_offset[grid+1] = 1; // one species default 1303 num_species_grid[grid] = 0; 1304 ctx->plex[grid] = NULL; /* cache as expensive to Convert */ 1305 } 1306 ctx->species_offset[0] = 0; 1307 ctx->re_radius = 0.; 1308 ctx->vperp0_radius1 = 0; 1309 ctx->vperp0_radius2 = 0; 1310 ctx->nZRefine1 = 0; 1311 ctx->nZRefine2 = 0; 1312 ctx->numRERefine = 0; 1313 num_species_grid[0] = 1; // one species default 1314 /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */ 1315 ctx->charges[0] = -1; /* electron charge (MKS) */ 1316 ctx->masses[0] = 1/1835.469965278441013; /* temporary value in proton mass */ 1317 ctx->n[0] = 1; 1318 ctx->v_0 = 1; /* thermal velocity, we could start with a scale != 1 */ 1319 ctx->thermal_temps[0] = 1; 1320 /* constants, etc. */ 1321 ctx->epsilon0 = 8.8542e-12; /* permittivity of free space (MKS) F/m */ 1322 ctx->k = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */ 1323 ctx->lnLam = 10; /* cross section ratio large - small angle collisions */ 1324 ctx->n_0 = 1.e20; /* typical plasma n, but could set it to 1 */ 1325 ctx->Ez = 0; 1326 ctx->subThreadBlockSize = 1; /* for device and maybe OMP */ 1327 for (PetscInt grid=0;grid<LANDAU_NUM_TIMERS;grid++) ctx->times[grid] = 0; 1328 ctx->use_matrix_mass = PETSC_FALSE; /* fast but slightly fragile */ 1329 ctx->use_relativistic_corrections = PETSC_FALSE; 1330 ctx->use_energy_tensor_trick = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */ 1331 ctx->SData_d.w = NULL; 1332 ctx->SData_d.x = NULL; 1333 ctx->SData_d.y = NULL; 1334 ctx->SData_d.z = NULL; 1335 ctx->SData_d.invJ = NULL; 1336 ctx->jacobian_field_major_order = PETSC_FALSE; 1337 ierr = PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");CHKERRQ(ierr); 1338 { 1339 char opstring[256]; 1340 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 1341 ctx->deviceType = LANDAU_KOKKOS; 1342 ierr = PetscStrcpy(opstring,"kokkos");CHKERRQ(ierr); 1343 #if defined(PETSC_HAVE_CUDA) 1344 ctx->subThreadBlockSize = 16; 1345 #endif 1346 #elif defined(PETSC_HAVE_CUDA) 1347 ctx->deviceType = LANDAU_CUDA; 1348 ierr = PetscStrcpy(opstring,"cuda");CHKERRQ(ierr); 1349 #else 1350 ctx->deviceType = LANDAU_CPU; 1351 ierr = PetscStrcpy(opstring,"cpu");CHKERRQ(ierr); 1352 ctx->subThreadBlockSize = 0; 1353 #endif 1354 ierr = PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,256,NULL);CHKERRQ(ierr); 1355 ierr = PetscStrcmp("cpu",opstring,&flg);CHKERRQ(ierr); 1356 if (flg) { 1357 ctx->deviceType = LANDAU_CPU; 1358 ctx->subThreadBlockSize = 0; 1359 } else { 1360 ierr = PetscStrcmp("cuda",opstring,&flg);CHKERRQ(ierr); 1361 if (flg) { 1362 ctx->deviceType = LANDAU_CUDA; 1363 ctx->subThreadBlockSize = 0; 1364 } else { 1365 ierr = PetscStrcmp("kokkos",opstring,&flg);CHKERRQ(ierr); 1366 if (flg) ctx->deviceType = LANDAU_KOKKOS; 1367 else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring); 1368 } 1369 } 1370 } 1371 ierr = PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL);CHKERRQ(ierr); 1372 ierr = PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL);CHKERRQ(ierr); 1373 ierr = PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL);CHKERRQ(ierr); 1374 PetscCheckFalse(LANDAU_MAX_BATCH_SZ < ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"LANDAU_MAX_BATCH_SZ %D < ctx->batch_sz %D",LANDAU_MAX_BATCH_SZ,ctx->batch_sz); 1375 ierr = PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL);CHKERRQ(ierr); 1376 PetscCheckFalse(ctx->batch_view_idx >= ctx->batch_sz,ctx->comm,PETSC_ERR_ARG_WRONG,"-ctx->batch_view_idx %D > ctx->batch_sz %D",ctx->batch_view_idx,ctx->batch_sz); 1377 ierr = PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie critical field","plexland.c",ctx->Ez,&ctx->Ez, NULL);CHKERRQ(ierr); 1378 ierr = PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL);CHKERRQ(ierr); 1379 ierr = PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL);CHKERRQ(ierr); 1380 ierr = PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL);CHKERRQ(ierr); 1381 ierr = PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL);CHKERRQ(ierr); 1382 ierr = PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, &ctx->use_energy_tensor_trick, NULL);CHKERRQ(ierr); 1383 1384 /* get num species with temperature, set defaults */ 1385 for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) { 1386 ctx->thermal_temps[ii] = 1; 1387 ctx->charges[ii] = 1; 1388 ctx->masses[ii] = 1; 1389 ctx->n[ii] = 1; 1390 } 1391 nt = LANDAU_MAX_SPECIES; 1392 ierr = PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg);CHKERRQ(ierr); 1393 if (flg) { 1394 PetscInfo(dummy, "num_species set to number of thermal temps provided (%D)\n",nt); 1395 ctx->num_species = nt; 1396 } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species"); 1397 for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */ 1398 nm = LANDAU_MAX_SPECIES-1; 1399 ierr = PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg);CHKERRQ(ierr); 1400 if (flg && nm != ctx->num_species-1) { 1401 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %D != num species %D",nm,ctx->num_species-1); 1402 } 1403 nm = LANDAU_MAX_SPECIES; 1404 ierr = PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg);CHKERRQ(ierr); 1405 PetscCheckFalse(flg && nm != ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %D != num species %D",nm,ctx->num_species); 1406 for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */ 1407 ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */ 1408 ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */ 1409 nc = LANDAU_MAX_SPECIES-1; 1410 ierr = PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg);CHKERRQ(ierr); 1411 PetscCheckFalse(flg && nc != ctx->num_species-1,ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %D != num species %D",nc,ctx->num_species-1); 1412 for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */ 1413 /* geometry and grids */ 1414 nt = LANDAU_MAX_GRIDS; 1415 ierr = PetscOptionsIntArray("-dm_landau_num_species_grid","Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid","plexland.c", num_species_grid, &nt, &flg);CHKERRQ(ierr); 1416 if (flg) { 1417 ctx->num_grids = nt; 1418 for (ii=nt=0;ii<ctx->num_grids;ii++) nt += num_species_grid[ii]; 1419 PetscCheckFalse(ctx->num_species != nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_num_species_grid: sum %D != num_species = %D. %D grids (check that number of grids <= LANDAU_MAX_GRIDS = %D)",nt,ctx->num_species,ctx->num_grids,LANDAU_MAX_GRIDS); 1420 } else { 1421 ctx->num_grids = 1; // go back to a single grid run 1422 num_species_grid[0] = ctx->num_species; 1423 } 1424 for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids ; ii++) ctx->species_offset[ii+1] = ctx->species_offset[ii] + num_species_grid[ii]; 1425 PetscCheckFalse(ctx->species_offset[ctx->num_grids] != ctx->num_species,ctx->comm,PETSC_ERR_ARG_WRONG,"ctx->species_offset[ctx->num_grids] %D != ctx->num_species = %D ???????????",ctx->species_offset[ctx->num_grids],ctx->num_species); 1426 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 1427 int iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid 1428 v0_grid[grid] = PetscSqrtReal(ctx->k*ctx->thermal_temps[iii]/ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */ 1429 } 1430 ii = 0; 1431 ierr = PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL);CHKERRQ(ierr); 1432 ctx->v_0 = v0_grid[ii]; /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */ 1433 ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */ 1434 /* domain */ 1435 nt = LANDAU_MAX_GRIDS; 1436 ierr = PetscOptionsRealArray("-dm_landau_domain_radius","Phase space size in units of thermal velocity of grid","plexland.c",ctx->radius,&nt, &flg);CHKERRQ(ierr); 1437 PetscCheckFalse(flg && nt < ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_domain_radius: given %D radius != number grids %D",nt,ctx->num_grids); 1438 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 1439 if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */ 1440 if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75; 1441 else ctx->radius[grid] = -ctx->radius[grid]; 1442 ctx->radius[grid] = ctx->radius[grid]*SPEED_OF_LIGHT/ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid) 1443 ierr = PetscInfo(dummy, "Change domain radius to %g for grid %D\n",ctx->radius[grid],grid);CHKERRQ(ierr); 1444 } 1445 ctx->radius[grid] *= v0_grid[grid]/ctx->v_0; // scale domain by thermal radius relative to v_0 1446 } 1447 /* amr parametres */ 1448 nt = LANDAU_MAX_GRIDS; 1449 ierr = PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg);CHKERRQ(ierr); 1450 PetscCheckFalse(flg && nt < ctx->num_grids,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_amr_levels_max: given %D != number grids %D",nt,ctx->num_grids); 1451 nt = LANDAU_MAX_GRIDS; 1452 ierr = PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg);CHKERRQ(ierr); 1453 for (ii=1;ii<ctx->num_grids;ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now 1454 ierr = PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg);CHKERRQ(ierr); 1455 ierr = PetscOptionsInt("-dm_landau_amr_z_refine1", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg);CHKERRQ(ierr); 1456 ierr = PetscOptionsInt("-dm_landau_amr_z_refine2", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg);CHKERRQ(ierr); 1457 ierr = PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg);CHKERRQ(ierr); 1458 ierr = PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg);CHKERRQ(ierr); 1459 ierr = PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg);CHKERRQ(ierr); 1460 /* spherical domain (not used) */ 1461 ierr = PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL);CHKERRQ(ierr); 1462 ierr = PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg);CHKERRQ(ierr); 1463 ierr = PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg);CHKERRQ(ierr); 1464 ierr = PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius, &ctx->e_radius, &flg);CHKERRQ(ierr); 1465 if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */ 1466 if (!flg) { 1467 ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0; 1468 } 1469 nt = LANDAU_MAX_GRIDS; 1470 ierr = PetscOptionsRealArray("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius, &nt, &flg);CHKERRQ(ierr); 1471 if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; 1472 if (!flg) { 1473 ctx->i_radius[0] = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; // need to correct for ion grid domain 1474 } 1475 PetscCheckFalse(flg && ctx->num_grids != nt,ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_i_radius: %D != num_species = %D",nt,ctx->num_grids); 1476 PetscCheckFalse(ctx->sphere && ctx->e_radius <= ctx->i_radius[0],ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",ctx->i_radius[0],ctx->e_radius,ctx->radius[0]); 1477 /* processing options */ 1478 ierr = PetscOptionsInt("-dm_landau_sub_thread_block_size", "Number of threads in Kokkos integration point subblock", "plexland.c", ctx->subThreadBlockSize, &ctx->subThreadBlockSize, NULL);CHKERRQ(ierr); 1479 ierr = PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL);CHKERRQ(ierr); 1480 ierr = PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL);CHKERRQ(ierr); 1481 if (ctx->jacobian_field_major_order && !ctx->gpu_assembly) { 1482 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_jacobian_field_major_order requires -dm_landau_gpu_assembly"); 1483 } 1484 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1485 1486 for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0; 1487 if (ctx->verbose > 0) { 1488 ierr = PetscPrintf(ctx->comm, "masses: e=%10.3e; ions in proton mass units: %10.3e %10.3e ...\n",ctx->masses[0],ctx->masses[1]/1.6720e-27,ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0);CHKERRQ(ierr); 1489 ierr = PetscPrintf(ctx->comm, "charges: e=%10.3e; charges in elementary units: %10.3e %10.3e\n", ctx->charges[0],-ctx->charges[1]/ctx->charges[0],ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0);CHKERRQ(ierr); 1490 ierr = PetscPrintf(ctx->comm, "n: e: %10.3e i: %10.3e %10.3e\n", ctx->n[0],ctx->n[1],ctx->num_species>2 ? ctx->n[2] : 0);CHKERRQ(ierr); 1491 ierr = PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %D batched\n", ctx->thermal_temps[0], ctx->thermal_temps[1], (ctx->num_species>2) ? ctx->thermal_temps[2] : 0, ctx->v_0, ctx->v_0/SPEED_OF_LIGHT, ctx->n_0, ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",ctx->batch_sz);CHKERRQ(ierr); 1492 ierr = PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %D: %g (%D) ",0,ctx->radius[0],ctx->numAMRRefine[0]);CHKERRQ(ierr); 1493 for (ii=1;ii<ctx->num_grids;ii++) PetscPrintf(ctx->comm, ", %D: %10.3e (%D) ",ii,ctx->radius[ii],ctx->numAMRRefine[ii]); 1494 ierr = PetscPrintf(ctx->comm,"\n");CHKERRQ(ierr); 1495 if (ctx->jacobian_field_major_order) { 1496 ierr = PetscPrintf(ctx->comm,"Using field major order for GPU Jacobian\n");CHKERRQ(ierr); 1497 } else { 1498 ierr = PetscPrintf(ctx->comm,"Using default Plex order for all matrices\n");CHKERRQ(ierr); 1499 } 1500 } 1501 ierr = DMDestroy(&dummy);CHKERRQ(ierr); 1502 { 1503 PetscMPIInt rank; 1504 ierr = MPI_Comm_rank(ctx->comm, &rank);CHKERRMPI(ierr); 1505 ctx->stage = 0; 1506 ierr = PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13]);CHKERRQ(ierr); /* 13 */ 1507 ierr = PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2]);CHKERRQ(ierr); /* 2 */ 1508 ierr = PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12]);CHKERRQ(ierr); /* 12 */ 1509 ierr = PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15]);CHKERRQ(ierr); /* 15 */ 1510 ierr = PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14]);CHKERRQ(ierr); /* 14 */ 1511 ierr = PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11]);CHKERRQ(ierr); /* 11 */ 1512 ierr = PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]);CHKERRQ(ierr); /* 0 */ 1513 ierr = PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9]);CHKERRQ(ierr); /* 9 */ 1514 ierr = PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10]);CHKERRQ(ierr); /* 10 */ 1515 ierr = PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7]);CHKERRQ(ierr); /* 7 */ 1516 ierr = PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1]);CHKERRQ(ierr); /* 1 */ 1517 ierr = PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3]);CHKERRQ(ierr); /* 3 */ 1518 ierr = PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8]);CHKERRQ(ierr); /* 8 */ 1519 ierr = PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4]);CHKERRQ(ierr); /* 4 */ 1520 ierr = PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16]);CHKERRQ(ierr); /* 16 */ 1521 ierr = PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]);CHKERRQ(ierr); /* 5 */ 1522 ierr = PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6]);CHKERRQ(ierr); /* 6 */ 1523 1524 if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */ 1525 ierr = PetscOptionsClearValue(NULL,"-snes_converged_reason");CHKERRQ(ierr); 1526 ierr = PetscOptionsClearValue(NULL,"-ksp_converged_reason");CHKERRQ(ierr); 1527 ierr = PetscOptionsClearValue(NULL,"-snes_monitor");CHKERRQ(ierr); 1528 ierr = PetscOptionsClearValue(NULL,"-ksp_monitor");CHKERRQ(ierr); 1529 ierr = PetscOptionsClearValue(NULL,"-ts_monitor");CHKERRQ(ierr); 1530 ierr = PetscOptionsClearValue(NULL,"-ts_view");CHKERRQ(ierr); 1531 ierr = PetscOptionsClearValue(NULL,"-ts_adapt_monitor");CHKERRQ(ierr); 1532 ierr = PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view");CHKERRQ(ierr); 1533 ierr = PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view");CHKERRQ(ierr); 1534 ierr = PetscOptionsClearValue(NULL,"-dm_landau_mass_dm_view");CHKERRQ(ierr); 1535 ierr = PetscOptionsClearValue(NULL,"-dm_landau_mass_view");CHKERRQ(ierr); 1536 ierr = PetscOptionsClearValue(NULL,"-dm_landau_jacobian_view");CHKERRQ(ierr); 1537 ierr = PetscOptionsClearValue(NULL,"-dm_landau_mat_view");CHKERRQ(ierr); 1538 ierr = PetscOptionsClearValue(NULL,"-");CHKERRQ(ierr); 1539 ierr = PetscOptionsClearValue(NULL,"-info");CHKERRQ(ierr); 1540 } 1541 } 1542 PetscFunctionReturn(0); 1543 } 1544 1545 static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx) 1546 { 1547 PetscErrorCode ierr; 1548 PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS]; 1549 PetscQuadrature quad; 1550 const PetscReal *quadWeights; 1551 PetscInt q,eidx,fieldA,numCells[LANDAU_MAX_GRIDS],Nq,Nb,Nf[LANDAU_MAX_GRIDS]; 1552 PetscTabulation *Tf; 1553 PetscDS prob; 1554 1555 PetscFunctionBegin; 1556 ierr = DMGetDS(ctx->plex[0], &prob);CHKERRQ(ierr); // same DS for all grids 1557 ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr); // Bf, &Df same for all grids 1558 /* DS, Tab and quad is same on all grids */ 1559 PetscCheckFalse(ctx->plex[0] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 1560 ierr = PetscFEGetQuadrature(ctx->fe[0], &quad);CHKERRQ(ierr); 1561 ierr = PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights);CHKERRQ(ierr); Nb = Nq; 1562 PetscCheckFalse(Nq >LANDAU_MAX_NQ,ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %D > LANDAU_MAX_NQ (%D)",Nq,LANDAU_MAX_NQ); 1563 /* setup each grid */ 1564 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 1565 PetscInt cStart, cEnd; 1566 PetscCheckFalse(ctx->plex[grid] == NULL,ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created"); 1567 ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr); 1568 numCells[grid] = cEnd - cStart; // grids can have different topology 1569 ierr = DMGetLocalSection(ctx->plex[grid], §ion[grid]);CHKERRQ(ierr); 1570 ierr = DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);CHKERRQ(ierr); 1571 ierr = PetscSectionGetNumFields(section[grid], &Nf[grid]);CHKERRQ(ierr); 1572 } 1573 #define MAP_BF_SIZE (64*LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES) 1574 /* create GPU assembly data */ 1575 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 1576 PetscContainer container; 1577 PetscScalar elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat; 1578 pointInterpolationP4est pointMaps[MAP_BF_SIZE][LANDAU_MAX_Q_FACE]; 1579 P4estVertexMaps *maps; 1580 const PetscInt *plex_batch=NULL; 1581 1582 /* create GPU asssembly data */ 1583 ierr = PetscInfo(ctx->plex[0], "Make GPU maps %D\n",1);CHKERRQ(ierr); 1584 ierr = PetscLogEventBegin(ctx->events[2],0,0,0,0);CHKERRQ(ierr); 1585 ierr = PetscMalloc(sizeof(*maps)*ctx->num_grids, &maps);CHKERRQ(ierr); 1586 ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr); 1587 ierr = PetscContainerSetPointer(container, (void *)maps);CHKERRQ(ierr); 1588 ierr = PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy);CHKERRQ(ierr); 1589 ierr = PetscObjectCompose((PetscObject) ctx->J, "assembly_maps", (PetscObject) container);CHKERRQ(ierr); 1590 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 1591 1592 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 1593 PetscInt cStart, cEnd, ej, Nfloc = Nf[grid], totDim = Nfloc*Nq; 1594 if (grid_batch_is_inv[grid]) { 1595 ierr = ISGetIndices(grid_batch_is_inv[grid], &plex_batch);CHKERRQ(ierr); 1596 } 1597 ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr); 1598 // make maps 1599 maps[grid].d_self = NULL; 1600 maps[grid].num_elements = numCells[grid]; 1601 maps[grid].num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q 1602 maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face,(double)(dim-1))+.001); // Q^2 1603 maps[grid].num_reduced = 0; 1604 maps[grid].deviceType = ctx->deviceType; 1605 maps[grid].numgrids = ctx->num_grids; 1606 // count reduced and get 1607 ierr = PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx);CHKERRQ(ierr); 1608 for (fieldA=0;fieldA<Nf[grid];fieldA++) { 1609 for (ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx) { 1610 for (q = 0; q < Nb; ++q) { 1611 PetscInt numindices,*indices; 1612 PetscScalar *valuesOrig = elMat = elemMatrix; 1613 ierr = PetscMemzero(elMat, totDim*totDim*sizeof(*elMat));CHKERRQ(ierr); 1614 elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1; 1615 ierr = DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);CHKERRQ(ierr); 1616 for (PetscInt f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal 1617 if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) { 1618 // found it 1619 if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0 1620 if (plex_batch) { 1621 maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx) plex_batch[indices[f]]; 1622 } else { 1623 maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f]; 1624 } 1625 } else { //found a constraint 1626 int jj = 0; 1627 PetscReal sum = 0; 1628 const PetscInt ff = f; 1629 maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1 1630 1631 do { // constraints are continuous in Plex - exploit that here 1632 int ii; // get 'scale' 1633 for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value 1634 if (ff + ii < numindices) { // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not 1635 pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]); 1636 } 1637 } 1638 sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic 1639 // get 'gid' 1640 if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps 1641 else { 1642 if (plex_batch) { 1643 pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]]; 1644 } else { 1645 pointMaps[maps[grid].num_reduced][jj].gid = indices[f]; 1646 } 1647 } 1648 } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end 1649 while (jj < maps[grid].num_face) { 1650 pointMaps[maps[grid].num_reduced][jj].scale = 0; 1651 pointMaps[maps[grid].num_reduced][jj].gid = -1; 1652 jj++; 1653 } 1654 if (PetscAbs(sum-1.0) > 10*PETSC_MACHINE_EPSILON) { // debug 1655 int d,f; 1656 PetscReal tmp = 0; 1657 PetscPrintf(PETSC_COMM_SELF,"\t\t%D.%D.%D) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%D)\n",eidx,q,fieldA,sum,LANDAU_MAX_Q_FACE,maps[grid].num_face); 1658 for (d = 0, tmp = 0; d < numindices; ++d) { 1659 if (tmp!=0 && PetscAbs(tmp-1.0) > 10*PETSC_MACHINE_EPSILON) {ierr = PetscPrintf(PETSC_COMM_WORLD,"%3D) %3D: ",d,indices[d]);CHKERRQ(ierr);} 1660 for (f = 0; f < numindices; ++f) { 1661 tmp += PetscRealPart(elMat[d*numindices + f]); 1662 } 1663 if (tmp!=0) {ierr = PetscPrintf(ctx->comm," | %22.16e\n",tmp);CHKERRQ(ierr);} 1664 } 1665 } 1666 maps[grid].num_reduced++; 1667 PetscCheckFalse(maps[grid].num_reduced>=MAP_BF_SIZE,PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %d",maps[grid].num_reduced,MAP_BF_SIZE); 1668 } 1669 break; 1670 } 1671 } 1672 // cleanup 1673 ierr = DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);CHKERRQ(ierr); 1674 if (elMat != valuesOrig) {ierr = DMRestoreWorkArray(ctx->plex[grid], numindices*numindices, MPIU_SCALAR, &elMat);CHKERRQ(ierr);} 1675 } 1676 } // cell 1677 } // field 1678 // allocate and copy point data maps[grid].gIdx[eidx][field][q] 1679 ierr = PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps);CHKERRQ(ierr); 1680 for (ej = 0; ej < maps[grid].num_reduced; ++ej) { 1681 for (q = 0; q < maps[grid].num_face; ++q) { 1682 maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale; 1683 maps[grid].c_maps[ej][q].gid = pointMaps[ej][q].gid; 1684 } 1685 } 1686 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 1687 if (ctx->deviceType == LANDAU_KOKKOS) { 1688 ierr = LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid);CHKERRQ(ierr); // imples Kokkos does 1689 } // else could be CUDA 1690 #endif 1691 #if defined(PETSC_HAVE_CUDA) 1692 if (ctx->deviceType == LANDAU_CUDA) { 1693 ierr = LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid);CHKERRQ(ierr); 1694 } 1695 #endif 1696 if (plex_batch) { 1697 ierr = ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch);CHKERRQ(ierr); 1698 ierr = ISDestroy(&grid_batch_is_inv[grid]);CHKERRQ(ierr); // we are done with this 1699 } 1700 } /* grids */ 1701 ierr = PetscLogEventEnd(ctx->events[2],0,0,0,0);CHKERRQ(ierr); 1702 } // end GPU assembly 1703 { /* create static point data, Jacobian called first, only one vertex copy */ 1704 PetscReal *invJe,*ww,*xx,*yy,*zz=NULL,*invJ_a; 1705 PetscInt outer_ipidx, outer_ej,grid, nip_glb = 0; 1706 PetscFE fe; 1707 1708 ierr = PetscLogEventBegin(ctx->events[7],0,0,0,0);CHKERRQ(ierr); 1709 ierr = PetscInfo(ctx->plex[0], "Initialize static data\n");CHKERRQ(ierr); 1710 for (PetscInt grid=0;grid<ctx->num_grids;grid++) nip_glb += Nq*numCells[grid]; 1711 /* collect f data, first time is for Jacobian, but make mass now */ 1712 if (ctx->verbose > 0) { 1713 PetscInt ncells = 0, N; 1714 ierr = MatGetSize(ctx->J,&N,NULL);CHKERRQ(ierr); 1715 for (PetscInt grid=0;grid<ctx->num_grids;grid++) ncells += numCells[grid]; 1716 ierr = PetscPrintf(ctx->comm,"%D) %s %D IPs, %D cells total, Nb=%D, Nq=%D, dim=%D, Tab: Nb=%D Nf=%D Np=%D cdim=%D N=%D\n", 1717 0,"FormLandau",nip_glb,ncells, Nb, Nq, dim, Nb, ctx->num_species, Nb, dim, N);CHKERRQ(ierr); 1718 } 1719 ierr = PetscMalloc4(nip_glb,&ww,nip_glb,&xx,nip_glb,&yy,nip_glb*dim*dim,&invJ_a);CHKERRQ(ierr); 1720 if (dim==3) { 1721 ierr = PetscMalloc1(nip_glb,&zz);CHKERRQ(ierr); 1722 } 1723 if (ctx->use_energy_tensor_trick) { 1724 ierr = PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe);CHKERRQ(ierr); 1725 ierr = PetscObjectSetName((PetscObject) fe, "energy");CHKERRQ(ierr); 1726 } 1727 /* init each grids static data - no batch */ 1728 for (grid=0, outer_ipidx=0, outer_ej=0 ; grid < ctx->num_grids ; grid++) { // OpenMP (once) 1729 Vec v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic 1730 PetscSection e_section; 1731 DM dmEnergy; 1732 PetscInt cStart, cEnd, ej; 1733 1734 ierr = DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);CHKERRQ(ierr); 1735 // prep energy trick, get v^2 / 2 vector 1736 if (ctx->use_energy_tensor_trick) { 1737 PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f}; 1738 Vec glob_v2; 1739 PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))}; 1740 1741 ierr = DMClone(ctx->plex[grid], &dmEnergy);CHKERRQ(ierr); 1742 ierr = PetscObjectSetName((PetscObject) dmEnergy, "energy");CHKERRQ(ierr); 1743 ierr = DMSetField(dmEnergy, 0, NULL, (PetscObject)fe);CHKERRQ(ierr); 1744 ierr = DMCreateDS(dmEnergy);CHKERRQ(ierr); 1745 ierr = DMGetSection(dmEnergy, &e_section);CHKERRQ(ierr); 1746 ierr = DMGetGlobalVector(dmEnergy,&glob_v2);CHKERRQ(ierr); 1747 ierr = PetscObjectSetName((PetscObject) glob_v2, "trick");CHKERRQ(ierr); 1748 c2_0[0] = &data[0]; 1749 ierr = DMProjectFunction(dmEnergy, 0., energyf, (void**)c2_0, INSERT_ALL_VALUES, glob_v2);CHKERRQ(ierr); 1750 ierr = DMGetLocalVector(dmEnergy, &v2_2);CHKERRQ(ierr); 1751 ierr = VecZeroEntries(v2_2);CHKERRQ(ierr); /* zero BCs so don't set */ 1752 ierr = DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2);CHKERRQ(ierr); 1753 ierr = DMGlobalToLocalEnd (dmEnergy, glob_v2, INSERT_VALUES, v2_2);CHKERRQ(ierr); 1754 ierr = DMViewFromOptions(dmEnergy,NULL, "-energy_dm_view");CHKERRQ(ierr); 1755 ierr = VecViewFromOptions(glob_v2,NULL, "-energy_vec_view");CHKERRQ(ierr); 1756 ierr = DMRestoreGlobalVector(dmEnergy, &glob_v2);CHKERRQ(ierr); 1757 } 1758 /* append part of the IP data for each grid */ 1759 for (ej = 0 ; ej < numCells[grid]; ++ej, ++outer_ej) { 1760 PetscScalar *coefs = NULL; 1761 PetscReal vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0); 1762 invJe = invJ_a + outer_ej*Nq*dim*dim; 1763 ierr = DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej+cStart, quad, vj, Jdummy, invJe, detJj);CHKERRQ(ierr); 1764 if (ctx->use_energy_tensor_trick) { 1765 ierr = DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs);CHKERRQ(ierr); 1766 } 1767 /* create static point data */ 1768 for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) { 1769 const PetscInt gidx = outer_ipidx; 1770 const PetscReal *invJ = &invJe[qj*dim*dim]; 1771 ww [gidx] = detJj[qj] * quadWeights[qj]; 1772 if (dim==2) ww [gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */ 1773 // get xx, yy, zz 1774 if (ctx->use_energy_tensor_trick) { 1775 double refSpaceDer[3],eGradPhi[3]; 1776 const PetscReal * const DD = Tf[0]->T[1]; 1777 const PetscReal *Dq = &DD[qj*Nb*dim]; 1778 for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0; 1779 for (int b = 0; b < Nb; ++b) { 1780 for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b*dim+d]*PetscRealPart(coefs[b]); 1781 } 1782 xx[gidx] = 1e10; 1783 if (ctx->use_relativistic_corrections) { 1784 double dg2_c2 = 0; 1785 //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02; 1786 for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]); 1787 dg2_c2 *= (double)c02; 1788 if (dg2_c2 >= .999) { 1789 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1790 yy[gidx] = vj[qj * dim + 1]; 1791 if (dim==3) zz[gidx] = vj[qj * dim + 2]; 1792 PetscPrintf(ctx->comm,"Error: %12.5e %D.%D) dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n",PetscSqrtReal(xx[gidx]*xx[gidx] + yy[gidx]*yy[gidx] + zz[gidx]*zz[gidx]), ej, qj, dg2_c2, xx[gidx],yy[gidx],zz[gidx]); 1793 } else { 1794 PetscReal fact = c02/PetscSqrtReal(1. - dg2_c2); 1795 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact; 1796 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0 1797 } 1798 } 1799 if (xx[gidx] == 1e10) { 1800 for (int d = 0; d < dim; ++d) { 1801 for (int e = 0 ; e < dim; ++e) { 1802 eGradPhi[d] += invJ[e*dim+d]*refSpaceDer[e]; 1803 } 1804 } 1805 xx[gidx] = eGradPhi[0]; 1806 yy[gidx] = eGradPhi[1]; 1807 if (dim==3) zz[gidx] = eGradPhi[2]; 1808 } 1809 } else { 1810 xx[gidx] = vj[qj * dim + 0]; /* coordinate */ 1811 yy[gidx] = vj[qj * dim + 1]; 1812 if (dim==3) zz[gidx] = vj[qj * dim + 2]; 1813 } 1814 } /* q */ 1815 if (ctx->use_energy_tensor_trick) { 1816 ierr = DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs);CHKERRQ(ierr); 1817 } 1818 } /* ej */ 1819 if (ctx->use_energy_tensor_trick) { 1820 ierr = DMRestoreLocalVector(dmEnergy, &v2_2);CHKERRQ(ierr); 1821 ierr = DMDestroy(&dmEnergy);CHKERRQ(ierr); 1822 } 1823 } /* grid */ 1824 if (ctx->use_energy_tensor_trick) { 1825 ierr = PetscFEDestroy(&fe);CHKERRQ(ierr); 1826 } 1827 /* cache static data */ 1828 if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) { 1829 #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS) 1830 PetscReal invMass[LANDAU_MAX_SPECIES],nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES]; 1831 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 1832 for (PetscInt ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++) { 1833 invMass[ii] = ctx->m_0/ctx->masses[ii]; 1834 nu_alpha[ii] = PetscSqr(ctx->charges[ii]/ctx->m_0)*ctx->m_0/ctx->masses[ii]; 1835 nu_beta[ii] = PetscSqr(ctx->charges[ii]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3); 1836 } 1837 } 1838 if (ctx->deviceType == LANDAU_CUDA) { 1839 #if defined(PETSC_HAVE_CUDA) 1840 ierr = LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, 1841 nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d);CHKERRQ(ierr); 1842 #else 1843 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda"); 1844 #endif 1845 } else if (ctx->deviceType == LANDAU_KOKKOS) { 1846 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 1847 ierr = LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, 1848 nu_alpha, nu_beta, invMass,invJ_a,xx,yy,zz,ww,&ctx->SData_d);CHKERRQ(ierr); 1849 #else 1850 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos"); 1851 #endif 1852 } 1853 #endif 1854 /* free */ 1855 ierr = PetscFree4(ww,xx,yy,invJ_a);CHKERRQ(ierr); 1856 if (dim==3) { 1857 ierr = PetscFree(zz);CHKERRQ(ierr); 1858 } 1859 } else { /* CPU version, just copy in, only use part */ 1860 ctx->SData_d.w = (void*)ww; 1861 ctx->SData_d.x = (void*)xx; 1862 ctx->SData_d.y = (void*)yy; 1863 ctx->SData_d.z = (void*)zz; 1864 ctx->SData_d.invJ = (void*)invJ_a; 1865 } 1866 ierr = PetscLogEventEnd(ctx->events[7],0,0,0,0);CHKERRQ(ierr); 1867 } // initialize 1868 PetscFunctionReturn(0); 1869 } 1870 1871 /* < v, u > */ 1872 static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, 1873 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 1874 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 1875 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1876 { 1877 g0[0] = 1.; 1878 } 1879 1880 /* < v, u > */ 1881 static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, 1882 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 1883 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 1884 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1885 { 1886 static double ttt = 1; 1887 g0[0] = ttt++; 1888 } 1889 1890 /* < v, u > */ 1891 static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, 1892 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 1893 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 1894 PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[]) 1895 { 1896 g0[0] = 2.*PETSC_PI*x[0]; 1897 } 1898 1899 static PetscErrorCode MatrixNfDestroy(void *ptr) 1900 { 1901 PetscInt *nf = (PetscInt *)ptr; 1902 PetscErrorCode ierr; 1903 PetscFunctionBegin; 1904 ierr = PetscFree(nf);CHKERRQ(ierr); 1905 PetscFunctionReturn(0); 1906 } 1907 1908 static PetscErrorCode LandauCreateMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx) 1909 { 1910 PetscErrorCode ierr; 1911 PetscInt *idxs=NULL; 1912 Mat subM[LANDAU_MAX_GRIDS]; 1913 1914 PetscFunctionBegin; 1915 if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 1916 PetscFunctionReturn(0); 1917 } 1918 // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is' 1919 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1920 ierr = PetscMalloc1(ctx->mat_offset[ctx->num_grids]*ctx->batch_sz, &idxs);CHKERRQ(ierr); 1921 } 1922 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 1923 const PetscInt *values, n = ctx->mat_offset[grid+1] - ctx->mat_offset[grid]; 1924 Mat gMat; 1925 DM massDM; 1926 PetscDS prob; 1927 Vec tvec; 1928 // get "mass" matrix for reordering 1929 ierr = DMClone(ctx->plex[grid], &massDM);CHKERRQ(ierr); 1930 ierr = DMCopyFields(ctx->plex[grid], massDM);CHKERRQ(ierr); 1931 ierr = DMCreateDS(massDM);CHKERRQ(ierr); 1932 ierr = DMGetDS(massDM, &prob);CHKERRQ(ierr); 1933 for (int ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) { 1934 ierr = PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL);CHKERRQ(ierr); 1935 } 1936 ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only"); 1937 ierr = DMSetFromOptions(massDM);CHKERRQ(ierr); 1938 ierr = DMCreateMatrix(massDM, &gMat);CHKERRQ(ierr); 1939 ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only false"); 1940 ierr = DMCreateLocalVector(ctx->plex[grid],&tvec);CHKERRQ(ierr); 1941 ierr = DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx);CHKERRQ(ierr); 1942 ierr = DMDestroy(&massDM);CHKERRQ(ierr); 1943 ierr = VecDestroy(&tvec);CHKERRQ(ierr); 1944 subM[grid] = gMat; 1945 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1946 MatOrderingType rtype = MATORDERINGRCM; 1947 IS isrow,isicol; 1948 ierr = MatGetOrdering(gMat,rtype,&isrow,&isicol);CHKERRQ(ierr); 1949 ierr = ISInvertPermutation(isrow,PETSC_DECIDE,&grid_batch_is_inv[grid]);CHKERRQ(ierr); 1950 ierr = ISGetIndices(isrow, &values);CHKERRQ(ierr); 1951 for (PetscInt b_id=0 ; b_id < ctx->batch_sz ; b_id++) { // add batch size DMs for this species grid 1952 #if !defined(LANDAU_SPECIES_MAJOR) 1953 PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id*N; 1954 for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0; 1955 #else 1956 PetscInt n0 = ctx->mat_offset[grid]*ctx->batch_sz + b_id*n; 1957 for (int ii = 0; ii < n; ++ii) idxs[n0+ii] = values[ii] + n0; 1958 #endif 1959 } 1960 ierr = ISRestoreIndices(isrow, &values);CHKERRQ(ierr); 1961 ierr = ISDestroy(&isrow);CHKERRQ(ierr); 1962 ierr = ISDestroy(&isicol);CHKERRQ(ierr); 1963 } 1964 } 1965 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1966 ierr = ISCreateGeneral(comm,ctx->mat_offset[ctx->num_grids]*ctx->batch_sz,idxs,PETSC_OWN_POINTER,&ctx->batch_is);CHKERRQ(ierr); 1967 } 1968 // get a block matrix 1969 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 1970 Mat B = subM[grid]; 1971 PetscInt nloc, nzl, colbuf[1024], row; 1972 ierr = MatGetSize(B, &nloc, NULL);CHKERRQ(ierr); 1973 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 1974 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 1975 const PetscInt *cols; 1976 const PetscScalar *vals; 1977 for (int i=0 ; i<nloc ; i++) { 1978 ierr = MatGetRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr); 1979 PetscCheck(nzl<=1024,comm, PETSC_ERR_PLIB, "Row too big: %D",nzl); 1980 for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset; 1981 row = i + moffset; 1982 ierr = MatSetValues(ctx->J,1,&row,nzl,colbuf,vals,INSERT_VALUES);CHKERRQ(ierr); 1983 ierr = MatRestoreRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr); 1984 } 1985 } 1986 } 1987 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 1988 ierr = MatDestroy(&subM[grid]);CHKERRQ(ierr); 1989 } 1990 ierr = MatAssemblyBegin(ctx->J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1991 ierr = MatAssemblyEnd(ctx->J,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1992 1993 if (ctx->gpu_assembly && ctx->jacobian_field_major_order) { 1994 Mat mat_block_order; 1995 PetscContainer container; 1996 ierr = MatCreateSubMatrix(ctx->J,ctx->batch_is,ctx->batch_is,MAT_INITIAL_MATRIX,&mat_block_order);CHKERRQ(ierr); // use MatPermute 1997 ierr = MatDestroy(&ctx->J);CHKERRQ(ierr); 1998 ctx->J = mat_block_order; 1999 // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order 2000 ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr); 2001 ierr = PetscContainerSetPointer(container, (void *)ctx);CHKERRQ(ierr); 2002 ierr = PetscObjectCompose((PetscObject) ctx->J, "LandauCtx", (PetscObject) container);CHKERRQ(ierr); 2003 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 2004 // override ops to make KSP work in field major space 2005 ctx->seqaij_mult = mat_block_order->ops->mult; 2006 mat_block_order->ops->mult = LandauMatMult; 2007 mat_block_order->ops->multadd = LandauMatMultAdd; 2008 ctx->seqaij_solve = NULL; 2009 ctx->seqaij_getdiagonal = mat_block_order->ops->getdiagonal; 2010 mat_block_order->ops->getdiagonal = LandauMatGetDiagonal; 2011 ctx->seqaij_multtranspose = mat_block_order->ops->multtranspose; 2012 mat_block_order->ops->multtranspose = LandauMatMultTranspose; 2013 ierr = VecDuplicate(X,&ctx->work_vec);CHKERRQ(ierr); 2014 ierr = VecScatterCreate(X, ctx->batch_is, ctx->work_vec, NULL, &ctx->plex_batch);CHKERRQ(ierr); 2015 // batch solvers need to map -- can batch solvers work 2016 ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr); 2017 ierr = PetscContainerSetPointer(container, (void *)ctx->plex_batch);CHKERRQ(ierr); 2018 ierr = PetscObjectCompose((PetscObject) ctx->J, "plex_batch_is", (PetscObject) container);CHKERRQ(ierr); 2019 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 2020 } 2021 { 2022 PetscContainer container; 2023 PetscInt *pNf; 2024 ierr = PetscContainerCreate(PETSC_COMM_SELF, &container);CHKERRQ(ierr); 2025 ierr = PetscMalloc(sizeof(*pNf), &pNf);CHKERRQ(ierr); 2026 *pNf = ctx->batch_sz; 2027 ierr = PetscContainerSetPointer(container, (void *)pNf);CHKERRQ(ierr); 2028 ierr = PetscContainerSetUserDestroy(container, MatrixNfDestroy);CHKERRQ(ierr); 2029 ierr = PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject) container);CHKERRQ(ierr); 2030 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 2031 } 2032 2033 PetscFunctionReturn(0); 2034 } 2035 2036 PetscErrorCode LandauCreateMassMatrix(DM pack, Mat *Amat); 2037 /*@C 2038 LandauCreateVelocitySpace - Create a DMPlex velocity space mesh 2039 2040 Collective on comm 2041 2042 Input Parameters: 2043 + comm - The MPI communicator 2044 . dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver) 2045 - prefix - prefix for options (not tested) 2046 2047 Output Parameter: 2048 . pack - The DM object representing the mesh 2049 + X - A vector (user destroys) 2050 - J - Optional matrix (object destroys) 2051 2052 Level: beginner 2053 2054 .keywords: mesh 2055 .seealso: DMPlexCreate(), LandauDestroyVelocitySpace() 2056 @*/ 2057 PetscErrorCode LandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack) 2058 { 2059 PetscErrorCode ierr; 2060 LandauCtx *ctx; 2061 Vec Xsub[LANDAU_MAX_GRIDS]; 2062 IS grid_batch_is_inv[LANDAU_MAX_GRIDS]; 2063 2064 PetscFunctionBegin; 2065 PetscCheckFalse(dim!=2 && dim!=3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported"); 2066 PetscCheckFalse(LANDAU_DIM != dim,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim %D != LANDAU_DIM %d",dim,LANDAU_DIM); 2067 ierr = PetscNew(&ctx);CHKERRQ(ierr); 2068 ctx->comm = comm; /* used for diagnostics and global errors */ 2069 /* process options */ 2070 ierr = ProcessOptions(ctx,prefix);CHKERRQ(ierr); 2071 if (dim==2) ctx->use_relativistic_corrections = PETSC_FALSE; 2072 /* Create Mesh */ 2073 ierr = DMCompositeCreate(PETSC_COMM_SELF,pack);CHKERRQ(ierr); 2074 ierr = PetscLogEventBegin(ctx->events[13],0,0,0,0);CHKERRQ(ierr); 2075 ierr = PetscLogEventBegin(ctx->events[15],0,0,0,0);CHKERRQ(ierr); 2076 ierr = LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack);CHKERRQ(ierr); // creates grids (Forest of AMR) 2077 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2078 /* create FEM */ 2079 ierr = SetupDS(ctx->plex[grid],dim,grid,ctx);CHKERRQ(ierr); 2080 /* set initial state */ 2081 ierr = DMCreateGlobalVector(ctx->plex[grid],&Xsub[grid]);CHKERRQ(ierr); 2082 ierr = PetscObjectSetName((PetscObject) Xsub[grid], "u_orig");CHKERRQ(ierr); 2083 /* initial static refinement, no solve */ 2084 ierr = LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, ctx);CHKERRQ(ierr); 2085 /* forest refinement - forest goes in (if forest), plex comes out */ 2086 if (ctx->use_p4est) { 2087 DM plex; 2088 ierr = adapt(grid,ctx,&Xsub[grid]);CHKERRQ(ierr); // forest goes in, plex comes out 2089 ierr = DMViewFromOptions(ctx->plex[grid],NULL,"-dm_landau_amr_dm_view");CHKERRQ(ierr); // need to differentiate - todo 2090 ierr = VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view");CHKERRQ(ierr); 2091 // convert to plex, all done with this level 2092 ierr = DMConvert(ctx->plex[grid], DMPLEX, &plex);CHKERRQ(ierr); 2093 ierr = DMDestroy(&ctx->plex[grid]);CHKERRQ(ierr); 2094 ctx->plex[grid] = plex; 2095 } 2096 #if !defined(LANDAU_SPECIES_MAJOR) 2097 ierr = DMCompositeAddDM(*pack,ctx->plex[grid]);CHKERRQ(ierr); 2098 #else 2099 for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid 2100 ierr = DMCompositeAddDM(*pack,ctx->plex[grid]);CHKERRQ(ierr); 2101 } 2102 #endif 2103 ierr = DMSetApplicationContext(ctx->plex[grid], ctx);CHKERRQ(ierr); 2104 } 2105 #if !defined(LANDAU_SPECIES_MAJOR) 2106 // stack the batched DMs, could do it all here!!! b_id=0 2107 for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) { 2108 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2109 ierr = DMCompositeAddDM(*pack,ctx->plex[grid]);CHKERRQ(ierr); 2110 } 2111 } 2112 #endif 2113 // create ctx->mat_offset 2114 ctx->mat_offset[0] = 0; 2115 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2116 PetscInt n; 2117 ierr = VecGetLocalSize(Xsub[grid],&n);CHKERRQ(ierr); 2118 ctx->mat_offset[grid+1] = ctx->mat_offset[grid] + n; 2119 } 2120 // creat DM & Jac 2121 ierr = DMSetApplicationContext(*pack, ctx);CHKERRQ(ierr); 2122 ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only"); 2123 ierr = DMSetFromOptions(*pack);CHKERRQ(ierr); 2124 ierr = DMCreateMatrix(*pack, &ctx->J);CHKERRQ(ierr); 2125 ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only false"); 2126 ierr = MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);CHKERRQ(ierr); 2127 ierr = PetscObjectSetName((PetscObject)ctx->J, "Jac");CHKERRQ(ierr); 2128 // construct initial conditions in X 2129 ierr = DMCreateGlobalVector(*pack,X);CHKERRQ(ierr); 2130 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2131 PetscInt n; 2132 ierr = VecGetLocalSize(Xsub[grid],&n);CHKERRQ(ierr); 2133 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 2134 PetscScalar const *values; 2135 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 2136 ierr = LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx);CHKERRQ(ierr); 2137 ierr = VecGetArrayRead(Xsub[grid],&values);CHKERRQ(ierr); 2138 for (int i=0, idx = moffset; i<n; i++, idx++) { 2139 ierr = VecSetValue(*X,idx,values[i],INSERT_VALUES);CHKERRQ(ierr); 2140 } 2141 ierr = VecRestoreArrayRead(Xsub[grid],&values);CHKERRQ(ierr); 2142 } 2143 } 2144 // cleanup 2145 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2146 ierr = VecDestroy(&Xsub[grid]);CHKERRQ(ierr); 2147 } 2148 /* check for correct matrix type */ 2149 if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */ 2150 PetscBool flg; 2151 if (ctx->deviceType == LANDAU_CUDA) { 2152 ierr = PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"");CHKERRQ(ierr); 2153 PetscCheckFalse(!flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda or use '-dm_landau_device_type cpu'"); 2154 } else if (ctx->deviceType == LANDAU_KOKKOS) { 2155 ierr = PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"");CHKERRQ(ierr); 2156 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 2157 PetscCheckFalse(!flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2158 #else 2159 PetscCheckFalse(!flg,ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels' for GPU assembly and Kokkos or use '-dm_landau_device_type cpu'"); 2160 #endif 2161 } 2162 } 2163 ierr = PetscLogEventEnd(ctx->events[15],0,0,0,0);CHKERRQ(ierr); 2164 // create field major ordering 2165 2166 ctx->work_vec = NULL; 2167 ctx->plex_batch = NULL; 2168 ctx->batch_is = NULL; 2169 for (int i=0;i<LANDAU_MAX_GRIDS;i++) grid_batch_is_inv[i] = NULL; 2170 ierr = PetscLogEventBegin(ctx->events[12],0,0,0,0);CHKERRQ(ierr); 2171 ierr = LandauCreateMatrix(comm, *X, grid_batch_is_inv, ctx);CHKERRQ(ierr); 2172 ierr = PetscLogEventEnd(ctx->events[12],0,0,0,0);CHKERRQ(ierr); 2173 2174 // create AMR GPU assembly maps and static GPU data 2175 ierr = CreateStaticGPUData(dim,grid_batch_is_inv,ctx);CHKERRQ(ierr); 2176 2177 ierr = PetscLogEventEnd(ctx->events[13],0,0,0,0);CHKERRQ(ierr); 2178 2179 // create mass matrix 2180 ierr = LandauCreateMassMatrix(*pack, NULL);CHKERRQ(ierr); 2181 2182 if (J) *J = ctx->J; 2183 2184 PetscFunctionReturn(0); 2185 } 2186 2187 /*@ 2188 LandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh 2189 2190 Collective on dm 2191 2192 Input/Output Parameters: 2193 . dm - the dm to destroy 2194 2195 Level: beginner 2196 2197 .keywords: mesh 2198 .seealso: LandauCreateVelocitySpace() 2199 @*/ 2200 PetscErrorCode LandauDestroyVelocitySpace(DM *dm) 2201 { 2202 PetscErrorCode ierr,ii; 2203 LandauCtx *ctx; 2204 PetscFunctionBegin; 2205 ierr = DMGetApplicationContext(*dm, &ctx);CHKERRQ(ierr); 2206 ierr = MatDestroy(&ctx->M);CHKERRQ(ierr); 2207 ierr = MatDestroy(&ctx->J);CHKERRQ(ierr); 2208 for (ii=0;ii<ctx->num_species;ii++) { 2209 ierr = PetscFEDestroy(&ctx->fe[ii]);CHKERRQ(ierr); 2210 } 2211 ierr = ISDestroy(&ctx->batch_is);CHKERRQ(ierr); 2212 ierr = VecDestroy(&ctx->work_vec);CHKERRQ(ierr); 2213 ierr = VecScatterDestroy(&ctx->plex_batch);CHKERRQ(ierr); 2214 if (ctx->deviceType == LANDAU_CUDA) { 2215 #if defined(PETSC_HAVE_CUDA) 2216 ierr = LandauCUDAStaticDataClear(&ctx->SData_d);CHKERRQ(ierr); 2217 #else 2218 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda"); 2219 #endif 2220 } else if (ctx->deviceType == LANDAU_KOKKOS) { 2221 #if defined(PETSC_HAVE_KOKKOS_KERNELS) 2222 ierr = LandauKokkosStaticDataClear(&ctx->SData_d);CHKERRQ(ierr); 2223 #else 2224 SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos"); 2225 #endif 2226 } else { 2227 if (ctx->SData_d.x) { /* in a CPU run */ 2228 PetscReal *invJ = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w; 2229 ierr = PetscFree4(ww,xx,yy,invJ);CHKERRQ(ierr); 2230 if (zz) { 2231 ierr = PetscFree(zz);CHKERRQ(ierr); 2232 } 2233 } 2234 } 2235 2236 if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings 2237 ierr = PetscPrintf(ctx->comm, "TSStep N 1.0 %10.3e\n",ctx->times[LANDAU_EX2_TSSOLVE]);CHKERRQ(ierr); 2238 ierr = PetscPrintf(ctx->comm, "2: Solve: %10.3e with %D threads\n",ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL],ctx->batch_sz);CHKERRQ(ierr); 2239 ierr = PetscPrintf(ctx->comm, "3: Landau: %10.3e\n",ctx->times[LANDAU_MATRIX_TOTAL]);CHKERRQ(ierr); 2240 ierr = PetscPrintf(ctx->comm, "Landau Jacobian %D 1.0 %10.3e\n",(PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT],ctx->times[LANDAU_JACOBIAN]);CHKERRQ(ierr); 2241 ierr = PetscPrintf(ctx->comm, "Landau Operator N 1.0 %10.3e\n",ctx->times[LANDAU_OPERATOR]);CHKERRQ(ierr); 2242 ierr = PetscPrintf(ctx->comm, "Landau Mass N 1.0 %10.3e\n",ctx->times[LANDAU_MASS]);CHKERRQ(ierr); 2243 ierr = PetscPrintf(ctx->comm, " Jac-f-df (GPU) N 1.0 %10.3e\n",ctx->times[LANDAU_F_DF]);CHKERRQ(ierr); 2244 ierr = PetscPrintf(ctx->comm, " Kernel (GPU) N 1.0 %10.3e\n",ctx->times[LANDAU_KERNEL]);CHKERRQ(ierr); 2245 ierr = PetscPrintf(ctx->comm, "MatLUFactorNum X 1.0 %10.3e\n",ctx->times[KSP_FACTOR]);CHKERRQ(ierr); 2246 ierr = PetscPrintf(ctx->comm, "MatSolve X 1.0 %10.3e\n",ctx->times[KSP_SOLVE]);CHKERRQ(ierr); 2247 } 2248 for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) { 2249 ierr = DMDestroy(&ctx->plex[grid]);CHKERRQ(ierr); 2250 } 2251 PetscFree(ctx); 2252 ierr = DMDestroy(dm);CHKERRQ(ierr); 2253 PetscFunctionReturn(0); 2254 } 2255 2256 /* < v, ru > */ 2257 static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2258 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2259 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2260 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2261 { 2262 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2263 f0[0] = u[ii]; 2264 } 2265 2266 /* < v, ru > */ 2267 static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2268 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2269 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2270 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2271 { 2272 PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]); 2273 f0[0] = x[jj]*u[ii]; /* x momentum */ 2274 } 2275 2276 static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2277 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2278 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2279 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2280 { 2281 PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]); 2282 double tmp1 = 0.; 2283 for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i]; 2284 f0[0] = tmp1*u[ii]; 2285 } 2286 2287 static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx) 2288 { 2289 const PetscReal *c2_0_arr = ((PetscReal*)actx); 2290 const PetscReal c02 = c2_0_arr[0]; 2291 2292 PetscFunctionBegin; 2293 for (int s = 0 ; s < Nf ; s++) { 2294 PetscReal tmp1 = 0.; 2295 for (int i = 0; i < dim; ++i) tmp1 += x[i]*x[i]; 2296 #if defined(PETSC_USE_DEBUG) 2297 u[s] = PetscSqrtReal(1. + tmp1/c02);// u[0] = PetscSqrtReal(1. + xx); 2298 #else 2299 { 2300 PetscReal xx = tmp1/c02; 2301 u[s] = xx/(PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.) 2302 } 2303 #endif 2304 } 2305 PetscFunctionReturn(0); 2306 } 2307 2308 /* < v, ru > */ 2309 static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2310 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2311 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2312 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2313 { 2314 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2315 f0[0] = 2.*PETSC_PI*x[0]*u[ii]; 2316 } 2317 2318 /* < v, ru > */ 2319 static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2320 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2321 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2322 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2323 { 2324 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2325 f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii]; 2326 } 2327 2328 static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, 2329 const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], 2330 const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], 2331 PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0) 2332 { 2333 PetscInt ii = (PetscInt)PetscRealPart(constants[0]); 2334 f0[0] = 2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii]; 2335 } 2336 2337 /*@ 2338 LandauPrintNorms - collects moments and prints them 2339 2340 Collective on dm 2341 2342 Input Parameters: 2343 + X - the state 2344 - stepi - current step to print 2345 2346 Level: beginner 2347 2348 .keywords: mesh 2349 .seealso: LandauCreateVelocitySpace() 2350 @*/ 2351 PetscErrorCode LandauPrintNorms(Vec X, PetscInt stepi) 2352 { 2353 PetscErrorCode ierr; 2354 LandauCtx *ctx; 2355 PetscDS prob; 2356 DM pack; 2357 PetscInt cStart, cEnd, dim, ii, i0, nDMs; 2358 PetscScalar xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES]; 2359 PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES]; 2360 Vec *globXArray; 2361 2362 PetscFunctionBegin; 2363 ierr = VecGetDM(X, &pack);CHKERRQ(ierr); 2364 PetscCheckFalse(!pack,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM"); 2365 ierr = DMGetDimension(pack, &dim);CHKERRQ(ierr); 2366 PetscCheckFalse(dim!=2 && dim!=3,PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim= %D",dim); 2367 ierr = DMGetApplicationContext(pack, &ctx);CHKERRQ(ierr); 2368 PetscCheckFalse(!ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2369 /* print momentum and energy */ 2370 ierr = DMCompositeGetNumberDM(pack,&nDMs);CHKERRQ(ierr); 2371 PetscCheckFalse(nDMs != ctx->num_grids*ctx->batch_sz,PETSC_COMM_WORLD, PETSC_ERR_PLIB, "#DM wrong %D %D",nDMs,ctx->num_grids*ctx->batch_sz); 2372 ierr = PetscMalloc(sizeof(*globXArray)*nDMs, &globXArray);CHKERRQ(ierr); 2373 ierr = DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray);CHKERRQ(ierr); 2374 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { 2375 Vec Xloc = globXArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ]; 2376 ierr = DMGetDS(ctx->plex[grid], &prob);CHKERRQ(ierr); 2377 for (ii=ctx->species_offset[grid],i0=0;ii<ctx->species_offset[grid+1];ii++,i0++) { 2378 PetscScalar user[2] = { (PetscScalar)i0, (PetscScalar)ctx->charges[ii]}; 2379 ierr = PetscDSSetConstants(prob, 2, user);CHKERRQ(ierr); 2380 if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */ 2381 ierr = PetscDSSetObjective(prob, 0, &f0_s_rden);CHKERRQ(ierr); 2382 ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr); 2383 density[ii] = tt[0]*ctx->n_0*ctx->charges[ii]; 2384 ierr = PetscDSSetObjective(prob, 0, &f0_s_rmom);CHKERRQ(ierr); 2385 ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr); 2386 zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2387 ierr = PetscDSSetObjective(prob, 0, &f0_s_rv2);CHKERRQ(ierr); 2388 ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr); 2389 energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii]; 2390 zmomentumtot += zmomentum[ii]; 2391 energytot += energy[ii]; 2392 densitytot += density[ii]; 2393 ierr = PetscPrintf(ctx->comm, "%3D) species-%D: charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));CHKERRQ(ierr); 2394 } else { /* 2/3Xloc + 3V */ 2395 ierr = PetscDSSetObjective(prob, 0, &f0_s_den);CHKERRQ(ierr); 2396 ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr); 2397 density[ii] = tt[0]*ctx->n_0*ctx->charges[ii]; 2398 ierr = PetscDSSetObjective(prob, 0, &f0_s_mom);CHKERRQ(ierr); 2399 user[1] = 0; 2400 ierr = PetscDSSetConstants(prob, 2, user);CHKERRQ(ierr); 2401 ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr); 2402 xmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2403 user[1] = 1; 2404 ierr = PetscDSSetConstants(prob, 2, user);CHKERRQ(ierr); 2405 ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr); 2406 ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2407 user[1] = 2; 2408 ierr = PetscDSSetConstants(prob, 2, user);CHKERRQ(ierr); 2409 ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr); 2410 zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii]; 2411 if (ctx->use_relativistic_corrections) { 2412 /* gamma * M * f */ 2413 if (ii==0 && grid==0) { // do all at once 2414 Vec Mf, globGamma, *globMfArray, *globGammaArray; 2415 PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {gamma_n_f}; 2416 PetscReal *c2_0[1], data[1]; 2417 2418 ierr = VecDuplicate(X,&globGamma);CHKERRQ(ierr); 2419 ierr = VecDuplicate(X,&Mf);CHKERRQ(ierr); 2420 ierr = PetscMalloc(sizeof(*globMfArray)*nDMs, &globMfArray);CHKERRQ(ierr); 2421 ierr = PetscMalloc(sizeof(*globMfArray)*nDMs, &globGammaArray);CHKERRQ(ierr); 2422 /* M * f */ 2423 ierr = MatMult(ctx->M,X,Mf);CHKERRQ(ierr); 2424 /* gamma */ 2425 ierr = DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);CHKERRQ(ierr); 2426 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching 2427 Vec v1 = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ]; 2428 data[0] = PetscSqr(C_0(ctx->v_0)); 2429 c2_0[0] = &data[0]; 2430 ierr = DMProjectFunction(ctx->plex[grid], 0., gammaf, (void**)c2_0, INSERT_ALL_VALUES, v1);CHKERRQ(ierr); 2431 } 2432 ierr = DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);CHKERRQ(ierr); 2433 /* gamma * Mf */ 2434 ierr = DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);CHKERRQ(ierr); 2435 ierr = DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray);CHKERRQ(ierr); 2436 for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice 2437 PetscInt Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], N, bs; 2438 Vec Mfsub = globMfArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], Gsub = globGammaArray[ LAND_PACK_IDX(ctx->batch_view_idx,grid) ], v1, v2; 2439 // get each component 2440 ierr = VecGetSize(Mfsub,&N);CHKERRQ(ierr); 2441 ierr = VecCreate(ctx->comm,&v1);CHKERRQ(ierr); 2442 ierr = VecSetSizes(v1,PETSC_DECIDE,N/Nf);CHKERRQ(ierr); 2443 ierr = VecCreate(ctx->comm,&v2);CHKERRQ(ierr); 2444 ierr = VecSetSizes(v2,PETSC_DECIDE,N/Nf);CHKERRQ(ierr); 2445 ierr = VecSetFromOptions(v1);CHKERRQ(ierr); // ??? 2446 ierr = VecSetFromOptions(v2);CHKERRQ(ierr); 2447 // get each component 2448 ierr = VecGetBlockSize(Gsub,&bs);CHKERRQ(ierr); 2449 PetscCheckFalse(bs != Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %D != num_species %D in Gsub",bs,Nf); 2450 ierr = VecGetBlockSize(Mfsub,&bs);CHKERRQ(ierr); 2451 PetscCheckFalse(bs != Nf,PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %D != num_species %D",bs,Nf); 2452 for (int i=0, ix=ctx->species_offset[grid] ; i<Nf ; i++, ix++) { 2453 PetscScalar val; 2454 ierr = VecStrideGather(Gsub,i,v1,INSERT_VALUES);CHKERRQ(ierr); 2455 ierr = VecStrideGather(Mfsub,i,v2,INSERT_VALUES);CHKERRQ(ierr); 2456 ierr = VecDot(v1,v2,&val);CHKERRQ(ierr); 2457 energy[ix] = PetscRealPart(val)*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ix]; 2458 } 2459 ierr = VecDestroy(&v1);CHKERRQ(ierr); 2460 ierr = VecDestroy(&v2);CHKERRQ(ierr); 2461 } /* grids */ 2462 ierr = DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);CHKERRQ(ierr); 2463 ierr = DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray);CHKERRQ(ierr); 2464 ierr = PetscFree(globGammaArray);CHKERRQ(ierr); 2465 ierr = PetscFree(globMfArray);CHKERRQ(ierr); 2466 ierr = VecDestroy(&globGamma);CHKERRQ(ierr); 2467 ierr = VecDestroy(&Mf);CHKERRQ(ierr); 2468 } 2469 } else { 2470 ierr = PetscDSSetObjective(prob, 0, &f0_s_v2);CHKERRQ(ierr); 2471 ierr = DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);CHKERRQ(ierr); 2472 energy[ii] = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii]; 2473 } 2474 ierr = PetscPrintf(ctx->comm, "%3D) species %D: density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e", 2475 stepi,ii,PetscRealPart(density[ii]),PetscRealPart(xmomentum[ii]),PetscRealPart(ymomentum[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));CHKERRQ(ierr); 2476 xmomentumtot += xmomentum[ii]; 2477 ymomentumtot += ymomentum[ii]; 2478 zmomentumtot += zmomentum[ii]; 2479 energytot += energy[ii]; 2480 densitytot += density[ii]; 2481 } 2482 if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n"); 2483 } 2484 } 2485 ierr = DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray);CHKERRQ(ierr); 2486 ierr = PetscFree(globXArray);CHKERRQ(ierr); 2487 /* totals */ 2488 ierr = DMPlexGetHeightStratum(ctx->plex[0],0,&cStart,&cEnd);CHKERRQ(ierr); 2489 if (ctx->num_species>1) { 2490 if (dim==2) { 2491 ierr = PetscPrintf(ctx->comm, "\t%3D) Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %D cells on electron grid)", 2492 stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);CHKERRQ(ierr); 2493 } else { 2494 ierr = PetscPrintf(ctx->comm, "\t%3D) Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %D cells)", 2495 stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);CHKERRQ(ierr); 2496 } 2497 } else { 2498 ierr = PetscPrintf(ctx->comm, " -- %D cells",cEnd-cStart);CHKERRQ(ierr); 2499 } 2500 if (ctx->verbose > 1) {ierr = PetscPrintf(ctx->comm,", %D sub (vector) threads\n",ctx->subThreadBlockSize);CHKERRQ(ierr);} 2501 else {ierr = PetscPrintf(ctx->comm,"\n");CHKERRQ(ierr);} 2502 PetscFunctionReturn(0); 2503 } 2504 2505 /*@ 2506 LandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian) 2507 2508 Collective on pack 2509 2510 Input Parameters: 2511 . pack - the DM object 2512 2513 Output Parameters: 2514 . Amat - The mass matrix (optional), mass matrix is added to the DM context 2515 2516 Level: beginner 2517 2518 .keywords: mesh 2519 .seealso: LandauCreateVelocitySpace() 2520 @*/ 2521 PetscErrorCode LandauCreateMassMatrix(DM pack, Mat *Amat) 2522 { 2523 DM mass_pack,massDM[LANDAU_MAX_GRIDS]; 2524 PetscDS prob; 2525 PetscInt ii,dim,N1=1,N2; 2526 PetscErrorCode ierr; 2527 LandauCtx *ctx; 2528 Mat packM,subM[LANDAU_MAX_GRIDS]; 2529 2530 PetscFunctionBegin; 2531 PetscValidHeaderSpecific(pack,DM_CLASSID,1); 2532 if (Amat) PetscValidPointer(Amat,2); 2533 ierr = DMGetApplicationContext(pack, &ctx);CHKERRQ(ierr); 2534 PetscCheck(ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2535 ierr = PetscLogEventBegin(ctx->events[14],0,0,0,0);CHKERRQ(ierr); 2536 ierr = DMGetDimension(pack, &dim);CHKERRQ(ierr); 2537 ierr = DMCompositeCreate(PetscObjectComm((PetscObject) pack),&mass_pack);CHKERRQ(ierr); 2538 /* create pack mass matrix */ 2539 for (PetscInt grid=0, ix=0 ; grid<ctx->num_grids ; grid++) { 2540 ierr = DMClone(ctx->plex[grid], &massDM[grid]);CHKERRQ(ierr); 2541 ierr = DMCopyFields(ctx->plex[grid], massDM[grid]);CHKERRQ(ierr); 2542 ierr = DMCreateDS(massDM[grid]);CHKERRQ(ierr); 2543 ierr = DMGetDS(massDM[grid], &prob);CHKERRQ(ierr); 2544 for (ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) { 2545 if (dim==3) {ierr = PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL);CHKERRQ(ierr);} 2546 else {ierr = PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL);CHKERRQ(ierr);} 2547 } 2548 #if !defined(LANDAU_SPECIES_MAJOR) 2549 ierr = DMCompositeAddDM(mass_pack,massDM[grid]);CHKERRQ(ierr); 2550 #else 2551 for (PetscInt b_id=0;b_id<ctx->batch_sz;b_id++) { // add batch size DMs for this species grid 2552 ierr = DMCompositeAddDM(mass_pack,massDM[grid]);CHKERRQ(ierr); 2553 } 2554 #endif 2555 ierr = DMCreateMatrix(massDM[grid], &subM[grid]);CHKERRQ(ierr); 2556 } 2557 #if !defined(LANDAU_SPECIES_MAJOR) 2558 // stack the batched DMs 2559 for (PetscInt b_id=1;b_id<ctx->batch_sz;b_id++) { 2560 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2561 ierr = DMCompositeAddDM(mass_pack, massDM[grid]);CHKERRQ(ierr); 2562 } 2563 } 2564 #endif 2565 ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only"); 2566 ierr = DMSetFromOptions(mass_pack);CHKERRQ(ierr); 2567 ierr = DMCreateMatrix(mass_pack, &packM);CHKERRQ(ierr); 2568 ierr = PetscOptionsInsertString(NULL,"-dm_preallocate_only false"); 2569 ierr = MatSetOption(packM, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);CHKERRQ(ierr); 2570 ierr = DMDestroy(&mass_pack);CHKERRQ(ierr); 2571 /* make mass matrix for each block */ 2572 for (PetscInt grid=0;grid<ctx->num_grids;grid++) { 2573 Vec locX; 2574 DM plex = massDM[grid]; 2575 ierr = DMGetLocalVector(plex, &locX);CHKERRQ(ierr); 2576 /* Mass matrix is independent of the input, so no need to fill locX */ 2577 ierr = DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx);CHKERRQ(ierr); 2578 ierr = DMRestoreLocalVector(plex, &locX);CHKERRQ(ierr); 2579 ierr = DMDestroy(&massDM[grid]);CHKERRQ(ierr); 2580 } 2581 ierr = MatGetSize(ctx->J, &N1, NULL);CHKERRQ(ierr); 2582 ierr = MatGetSize(packM, &N2, NULL);CHKERRQ(ierr); 2583 PetscCheckFalse(N1 != N2,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %D, |Mass|=%D",N1,N2); 2584 /* assemble block diagonals */ 2585 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2586 Mat B = subM[grid]; 2587 PetscInt nloc, nzl, colbuf[1024], row; 2588 ierr = MatGetSize(B, &nloc, NULL);CHKERRQ(ierr); 2589 for (PetscInt b_id = 0 ; b_id < ctx->batch_sz ; b_id++) { 2590 const PetscInt moffset = LAND_MOFFSET(b_id,grid,ctx->batch_sz,ctx->num_grids,ctx->mat_offset); 2591 const PetscInt *cols; 2592 const PetscScalar *vals; 2593 for (int i=0 ; i<nloc ; i++) { 2594 ierr = MatGetRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr); 2595 PetscCheckFalse(nzl>1024,PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Row too big: %D",nzl); 2596 for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset; 2597 row = i + moffset; 2598 ierr = MatSetValues(packM,1,&row,nzl,colbuf,vals,INSERT_VALUES);CHKERRQ(ierr); 2599 ierr = MatRestoreRow(B,i,&nzl,&cols,&vals);CHKERRQ(ierr); 2600 } 2601 } 2602 } 2603 // cleanup 2604 for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { 2605 ierr = MatDestroy(&subM[grid]);CHKERRQ(ierr); 2606 } 2607 ierr = MatAssemblyBegin(packM,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2608 ierr = MatAssemblyEnd(packM,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2609 ierr = PetscObjectSetName((PetscObject)packM, "mass");CHKERRQ(ierr); 2610 ierr = MatViewFromOptions(packM,NULL,"-dm_landau_mass_view");CHKERRQ(ierr); 2611 ctx->M = packM; 2612 if (Amat) *Amat = packM; 2613 ierr = PetscLogEventEnd(ctx->events[14],0,0,0,0);CHKERRQ(ierr); 2614 PetscFunctionReturn(0); 2615 } 2616 2617 /*@ 2618 LandauIFunction - TS residual calculation 2619 2620 Collective on ts 2621 2622 Input Parameters: 2623 + TS - The time stepping context 2624 . time_dummy - current time (not used) 2625 - X - Current state 2626 + X_t - Time derivative of current state 2627 . actx - Landau context 2628 2629 Output Parameter: 2630 . F - The residual 2631 2632 Level: beginner 2633 2634 .keywords: mesh 2635 .seealso: LandauCreateVelocitySpace(), LandauIJacobian() 2636 @*/ 2637 PetscErrorCode LandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx) 2638 { 2639 PetscErrorCode ierr; 2640 LandauCtx *ctx=(LandauCtx*)actx; 2641 PetscInt dim; 2642 DM pack; 2643 #if defined(PETSC_HAVE_THREADSAFETY) 2644 double starttime, endtime; 2645 #endif 2646 2647 PetscFunctionBegin; 2648 ierr = TSGetDM(ts,&pack);CHKERRQ(ierr); 2649 ierr = DMGetApplicationContext(pack, &ctx);CHKERRQ(ierr); 2650 PetscCheckFalse(!ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2651 if (ctx->stage) { 2652 ierr = PetscLogStagePush(ctx->stage);CHKERRQ(ierr); 2653 } 2654 ierr = PetscLogEventBegin(ctx->events[11],0,0,0,0);CHKERRQ(ierr); 2655 ierr = PetscLogEventBegin(ctx->events[0],0,0,0,0);CHKERRQ(ierr); 2656 #if defined(PETSC_HAVE_THREADSAFETY) 2657 starttime = MPI_Wtime(); 2658 #endif 2659 ierr = DMGetDimension(pack, &dim);CHKERRQ(ierr); 2660 if (!ctx->aux_bool) { 2661 ierr = PetscInfo(ts, "Create Landau Jacobian t=%g X=%p %s\n",time_dummy,X_t,ctx->aux_bool ? " -- seems to be in line search" : "");CHKERRQ(ierr); 2662 ierr = LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx);CHKERRQ(ierr); 2663 ierr = MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view");CHKERRQ(ierr); 2664 ctx->aux_bool = PETSC_TRUE; 2665 } else { 2666 ierr = PetscInfo(ts, "Skip forming Jacobian, has not changed (should check norm)\n");CHKERRQ(ierr); 2667 } 2668 /* mat vec for op */ 2669 ierr = MatMult(ctx->J,X,F);CHKERRQ(ierr);CHKERRQ(ierr); /* C*f */ 2670 /* add time term */ 2671 if (X_t) { 2672 ierr = MatMultAdd(ctx->M,X_t,F,F);CHKERRQ(ierr); 2673 } 2674 #if defined(PETSC_HAVE_THREADSAFETY) 2675 if (ctx->stage) { 2676 endtime = MPI_Wtime(); 2677 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2678 ctx->times[LANDAU_JACOBIAN] += (endtime - starttime); 2679 ctx->times[LANDAU_JACOBIAN_COUNT] += 1; 2680 } 2681 #endif 2682 ierr = PetscLogEventEnd(ctx->events[0],0,0,0,0);CHKERRQ(ierr); 2683 ierr = PetscLogEventEnd(ctx->events[11],0,0,0,0);CHKERRQ(ierr); 2684 if (ctx->stage) { 2685 ierr = PetscLogStagePop();CHKERRQ(ierr); 2686 #if defined(PETSC_HAVE_THREADSAFETY) 2687 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2688 #endif 2689 } 2690 PetscFunctionReturn(0); 2691 } 2692 2693 /*@ 2694 LandauIJacobian - TS Jacobian construction 2695 2696 Collective on ts 2697 2698 Input Parameters: 2699 + TS - The time stepping context 2700 . time_dummy - current time (not used) 2701 - X - Current state 2702 + U_tdummy - Time derivative of current state (not used) 2703 . shift - shift for du/dt term 2704 - actx - Landau context 2705 2706 Output Parameter: 2707 . Amat - Jacobian 2708 + Pmat - same as Amat 2709 2710 Level: beginner 2711 2712 .keywords: mesh 2713 .seealso: LandauCreateVelocitySpace(), LandauIFunction() 2714 @*/ 2715 PetscErrorCode LandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx) 2716 { 2717 PetscErrorCode ierr; 2718 LandauCtx *ctx=NULL; 2719 PetscInt dim; 2720 DM pack; 2721 #if defined(PETSC_HAVE_THREADSAFETY) 2722 double starttime, endtime; 2723 #endif 2724 PetscFunctionBegin; 2725 ierr = TSGetDM(ts,&pack);CHKERRQ(ierr); 2726 ierr = DMGetApplicationContext(pack, &ctx);CHKERRQ(ierr); 2727 PetscCheckFalse(!ctx,PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); 2728 PetscCheckFalse(Amat!=Pmat || Amat!=ctx->J,ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J"); 2729 ierr = DMGetDimension(pack, &dim);CHKERRQ(ierr); 2730 /* get collision Jacobian into A */ 2731 if (ctx->stage) { 2732 ierr = PetscLogStagePush(ctx->stage);CHKERRQ(ierr); 2733 } 2734 ierr = PetscLogEventBegin(ctx->events[11],0,0,0,0);CHKERRQ(ierr); 2735 ierr = PetscLogEventBegin(ctx->events[9],0,0,0,0);CHKERRQ(ierr); 2736 #if defined(PETSC_HAVE_THREADSAFETY) 2737 starttime = MPI_Wtime(); 2738 #endif 2739 ierr = PetscInfo(ts, "Adding just mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift);CHKERRQ(ierr); 2740 PetscCheckFalse(shift==0.0,ctx->comm, PETSC_ERR_PLIB, "zero shift"); 2741 PetscCheckFalse(!ctx->aux_bool,ctx->comm, PETSC_ERR_PLIB, "wrong state"); 2742 if (!ctx->use_matrix_mass) { 2743 ierr = LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx);CHKERRQ(ierr); 2744 ierr = MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view");CHKERRQ(ierr); 2745 } else { /* add mass */ 2746 ierr = MatAXPY(Pmat,shift,ctx->M,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 2747 } 2748 ctx->aux_bool = PETSC_FALSE; 2749 #if defined(PETSC_HAVE_THREADSAFETY) 2750 if (ctx->stage) { 2751 endtime = MPI_Wtime(); 2752 ctx->times[LANDAU_OPERATOR] += (endtime - starttime); 2753 ctx->times[LANDAU_MASS] += (endtime - starttime); 2754 } 2755 #endif 2756 ierr = PetscLogEventEnd(ctx->events[9],0,0,0,0);CHKERRQ(ierr); 2757 ierr = PetscLogEventEnd(ctx->events[11],0,0,0,0);CHKERRQ(ierr); 2758 if (ctx->stage) { 2759 ierr = PetscLogStagePop();CHKERRQ(ierr); 2760 #if defined(PETSC_HAVE_THREADSAFETY) 2761 ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime); 2762 #endif 2763 } 2764 PetscFunctionReturn(0); 2765 } 2766