xref: /petsc/src/ksp/pc/impls/bjacobi/bjkokkos/bjkokkos.kokkos.cxx (revision 3220ff8572602716d60bdda8b51773ebceb3c8ea)
1 #define PETSC_SKIP_CXX_COMPLEX_FIX // Kokkos::complex does not need the PetscComplex fix
2 
3 #include <petsc/private/pcbjkokkosimpl.h>
4 
5 #include <petsc/private/kspimpl.h>
6 #include <petscksp.h> /*I "petscksp.h" I*/
7 #include <../src/mat/impls/aij/mpi/mpiaij.h>
8 #include <../src/mat/impls/aij/seq/kokkos/aijkok.hpp>
9 #include <petscsection.h>
10 #include <petscdmcomposite.h>
11 
12 #include <../src/mat/impls/aij/seq/aij.h>
13 #include <../src/mat/impls/aij/seq/kokkos/aijkok.hpp>
14 
15 #include <petscdevice_cupm.h>
16 
PCBJKOKKOSCreateKSP_BJKOKKOS(PC pc)17 static PetscErrorCode PCBJKOKKOSCreateKSP_BJKOKKOS(PC pc)
18 {
19   const char    *prefix;
20   PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
21   DM             dm;
22 
23   PetscFunctionBegin;
24   PetscCall(KSPCreate(PetscObjectComm((PetscObject)pc), &jac->ksp));
25   PetscCall(KSPSetNestLevel(jac->ksp, pc->kspnestlevel));
26   PetscCall(KSPSetErrorIfNotConverged(jac->ksp, pc->erroriffailure));
27   PetscCall(PetscObjectIncrementTabLevel((PetscObject)jac->ksp, (PetscObject)pc, 1));
28   PetscCall(PCGetOptionsPrefix(pc, &prefix));
29   PetscCall(KSPSetOptionsPrefix(jac->ksp, prefix));
30   PetscCall(KSPAppendOptionsPrefix(jac->ksp, "pc_bjkokkos_"));
31   PetscCall(PCGetDM(pc, &dm));
32   if (dm) {
33     PetscCall(KSPSetDM(jac->ksp, dm));
34     PetscCall(KSPSetDMActive(jac->ksp, KSP_DMACTIVE_ALL, PETSC_FALSE));
35   }
36   jac->reason       = PETSC_FALSE;
37   jac->monitor      = PETSC_FALSE;
38   jac->batch_target = 0;
39   jac->rank_target  = 0;
40   jac->nsolves_team = 1;
41   jac->ksp->max_it  = 50; // this is really for GMRES w/o restarts
42   PetscFunctionReturn(PETSC_SUCCESS);
43 }
44 
45 // y <-- Ax
MatMult(const team_member team,const PetscInt * glb_Aai,const PetscInt * glb_Aaj,const PetscScalar * glb_Aaa,const PetscInt * r,const PetscInt * ic,const PetscInt start,const PetscInt end,const PetscScalar * x_loc,PetscScalar * y_loc)46 KOKKOS_INLINE_FUNCTION PetscErrorCode MatMult(const team_member team, const PetscInt *glb_Aai, const PetscInt *glb_Aaj, const PetscScalar *glb_Aaa, const PetscInt *r, const PetscInt *ic, const PetscInt start, const PetscInt end, const PetscScalar *x_loc, PetscScalar *y_loc)
47 {
48   Kokkos::parallel_for(Kokkos::TeamThreadRange(team, start, end), [=](const int rowb) {
49     int                rowa = ic[rowb];
50     int                n    = glb_Aai[rowa + 1] - glb_Aai[rowa];
51     const PetscInt    *aj   = glb_Aaj + glb_Aai[rowa]; // global
52     const PetscScalar *aa   = glb_Aaa + glb_Aai[rowa];
53     PetscScalar        sum;
54     Kokkos::parallel_reduce(Kokkos::ThreadVectorRange(team, n), [=](const int i, PetscScalar &lsum) { lsum += aa[i] * x_loc[r[aj[i]] - start]; }, sum);
55     Kokkos::single(Kokkos::PerThread(team), [=]() { y_loc[rowb - start] = sum; });
56   });
57   team.team_barrier();
58   return PETSC_SUCCESS;
59 }
60 
61 // temp buffer per thread with reduction at end?
MatMultTranspose(const team_member team,const PetscInt * glb_Aai,const PetscInt * glb_Aaj,const PetscScalar * glb_Aaa,const PetscInt * r,const PetscInt * ic,const PetscInt start,const PetscInt end,const PetscScalar * x_loc,PetscScalar * y_loc)62 KOKKOS_INLINE_FUNCTION PetscErrorCode MatMultTranspose(const team_member team, const PetscInt *glb_Aai, const PetscInt *glb_Aaj, const PetscScalar *glb_Aaa, const PetscInt *r, const PetscInt *ic, const PetscInt start, const PetscInt end, const PetscScalar *x_loc, PetscScalar *y_loc)
63 {
64   Kokkos::parallel_for(Kokkos::TeamVectorRange(team, end - start), [=](int i) { y_loc[i] = 0; });
65   team.team_barrier();
66   Kokkos::parallel_for(Kokkos::TeamThreadRange(team, start, end), [=](const int rowb) {
67     int                rowa = ic[rowb];
68     int                n    = glb_Aai[rowa + 1] - glb_Aai[rowa];
69     const PetscInt    *aj   = glb_Aaj + glb_Aai[rowa]; // global
70     const PetscScalar *aa   = glb_Aaa + glb_Aai[rowa];
71     const PetscScalar  xx   = x_loc[rowb - start]; // rowb = ic[rowa] = ic[r[rowb]]
72     Kokkos::parallel_for(Kokkos::ThreadVectorRange(team, n), [=](const int &i) {
73       PetscScalar val = aa[i] * xx;
74       Kokkos::atomic_fetch_add(&y_loc[r[aj[i]] - start], val);
75     });
76   });
77   team.team_barrier();
78   return PETSC_SUCCESS;
79 }
80 
81 typedef struct Batch_MetaData_TAG {
82   PetscInt           flops;
83   PetscInt           its;
84   KSPConvergedReason reason;
85 } Batch_MetaData;
86 
87 // Solve A(BB^-1)x = y with TFQMR. Right preconditioned to get un-preconditioned residual
BJSolve_TFQMR(const team_member team,const PetscInt * glb_Aai,const PetscInt * glb_Aaj,const PetscScalar * glb_Aaa,const PetscInt * r,const PetscInt * ic,PetscScalar * work_space_global,const int stride_global,const int nShareVec,PetscScalar * work_space_shared,const int stride_shared,PetscReal rtol,PetscReal atol,PetscReal dtol,PetscInt maxit,Batch_MetaData * metad,const PetscInt start,const PetscInt end,const PetscScalar glb_idiag[],const PetscScalar * glb_b,PetscScalar * glb_x,bool monitor)88 static KOKKOS_INLINE_FUNCTION PetscErrorCode BJSolve_TFQMR(const team_member team, const PetscInt *glb_Aai, const PetscInt *glb_Aaj, const PetscScalar *glb_Aaa, const PetscInt *r, const PetscInt *ic, PetscScalar *work_space_global, const int stride_global, const int nShareVec, PetscScalar *work_space_shared, const int stride_shared, PetscReal rtol, PetscReal atol, PetscReal dtol, PetscInt maxit, Batch_MetaData *metad, const PetscInt start, const PetscInt end, const PetscScalar glb_idiag[], const PetscScalar *glb_b, PetscScalar *glb_x, bool monitor)
89 {
90   using Kokkos::parallel_for;
91   using Kokkos::parallel_reduce;
92   int                Nblk = end - start, it, m, stride = stride_shared, idx = 0;
93   PetscReal          dp, dpold, w, dpest, tau, psi, cm, r0;
94   const PetscScalar *Diag = &glb_idiag[start];
95   PetscScalar       *ptr  = work_space_shared, rho, rhoold, a, s, b, eta, etaold, psiold, cf, dpi;
96 
97   if (idx++ == nShareVec) {
98     ptr    = work_space_global;
99     stride = stride_global;
100   }
101   PetscScalar *XX = ptr;
102   ptr += stride;
103   if (idx++ == nShareVec) {
104     ptr    = work_space_global;
105     stride = stride_global;
106   }
107   PetscScalar *R = ptr;
108   ptr += stride;
109   if (idx++ == nShareVec) {
110     ptr    = work_space_global;
111     stride = stride_global;
112   }
113   PetscScalar *RP = ptr;
114   ptr += stride;
115   if (idx++ == nShareVec) {
116     ptr    = work_space_global;
117     stride = stride_global;
118   }
119   PetscScalar *V = ptr;
120   ptr += stride;
121   if (idx++ == nShareVec) {
122     ptr    = work_space_global;
123     stride = stride_global;
124   }
125   PetscScalar *T = ptr;
126   ptr += stride;
127   if (idx++ == nShareVec) {
128     ptr    = work_space_global;
129     stride = stride_global;
130   }
131   PetscScalar *Q = ptr;
132   ptr += stride;
133   if (idx++ == nShareVec) {
134     ptr    = work_space_global;
135     stride = stride_global;
136   }
137   PetscScalar *P = ptr;
138   ptr += stride;
139   if (idx++ == nShareVec) {
140     ptr    = work_space_global;
141     stride = stride_global;
142   }
143   PetscScalar *U = ptr;
144   ptr += stride;
145   if (idx++ == nShareVec) {
146     ptr    = work_space_global;
147     stride = stride_global;
148   }
149   PetscScalar *D = ptr;
150   ptr += stride;
151   if (idx++ == nShareVec) {
152     ptr    = work_space_global;
153     stride = stride_global;
154   }
155   PetscScalar *AUQ = V;
156 
157   // init: get b, zero x
158   parallel_for(Kokkos::TeamVectorRange(team, start, end), [=](int rowb) {
159     int rowa         = ic[rowb];
160     R[rowb - start]  = glb_b[rowa];
161     XX[rowb - start] = 0;
162   });
163   team.team_barrier();
164   parallel_reduce(Kokkos::TeamVectorRange(team, Nblk), [=](const int idx, PetscScalar &lsum) { lsum += R[idx] * PetscConj(R[idx]); }, dpi);
165   team.team_barrier();
166   r0 = dp = PetscSqrtReal(PetscRealPart(dpi));
167   // diagnostics
168 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
169   if (monitor) Kokkos::single(Kokkos::PerTeam(team), [=]() { printf("%3d KSP Residual norm %14.12e\n", 0, (double)dp); });
170 #endif
171   if (dp < atol) {
172     metad->reason = KSP_CONVERGED_ATOL_NORMAL_EQUATIONS;
173     it            = 0;
174     goto done;
175   }
176   if (0 == maxit) {
177     metad->reason = KSP_CONVERGED_ITS;
178     it            = 0;
179     goto done;
180   }
181 
182   /* Make the initial Rp = R */
183   parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) { RP[idx] = R[idx]; });
184   team.team_barrier();
185   /* Set the initial conditions */
186   etaold = 0.0;
187   psiold = 0.0;
188   tau    = dp;
189   dpold  = dp;
190 
191   /* rhoold = (r,rp)     */
192   parallel_reduce(Kokkos::TeamVectorRange(team, Nblk), [=](const int idx, PetscScalar &dot) { dot += R[idx] * PetscConj(RP[idx]); }, rhoold);
193   team.team_barrier();
194   parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) {
195     U[idx] = R[idx];
196     P[idx] = R[idx];
197     T[idx] = Diag[idx] * P[idx];
198     D[idx] = 0;
199   });
200   team.team_barrier();
201   static_cast<void>(MatMult(team, glb_Aai, glb_Aaj, glb_Aaa, r, ic, start, end, T, V));
202 
203   it = 0;
204   do {
205     /* s <- (v,rp)          */
206     parallel_reduce(Kokkos::TeamVectorRange(team, Nblk), [=](const int idx, PetscScalar &dot) { dot += V[idx] * PetscConj(RP[idx]); }, s);
207     team.team_barrier();
208     if (s == 0) {
209       metad->reason = KSP_CONVERGED_HAPPY_BREAKDOWN;
210       goto done;
211     }
212     a = rhoold / s; /* a <- rho / s         */
213     /* q <- u - a v    VecWAXPY(w,alpha,x,y): w = alpha x + y.     */
214     /* t <- u + q           */
215     parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) {
216       Q[idx] = U[idx] - a * V[idx];
217       T[idx] = U[idx] + Q[idx];
218     });
219     team.team_barrier();
220     // KSP_PCApplyBAorAB
221     parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) { T[idx] = Diag[idx] * T[idx]; });
222     team.team_barrier();
223     static_cast<void>(MatMult(team, glb_Aai, glb_Aaj, glb_Aaa, r, ic, start, end, T, AUQ));
224     /* r <- r - a K (u + q) */
225     parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) { R[idx] = R[idx] - a * AUQ[idx]; });
226     team.team_barrier();
227     parallel_reduce(Kokkos::TeamVectorRange(team, Nblk), [=](const int idx, PetscScalar &lsum) { lsum += R[idx] * PetscConj(R[idx]); }, dpi);
228     team.team_barrier();
229     dp = PetscSqrtReal(PetscRealPart(dpi));
230     for (m = 0; m < 2; m++) {
231       if (!m) w = PetscSqrtReal(dp * dpold);
232       else w = dp;
233       psi = w / tau;
234       cm  = 1.0 / PetscSqrtReal(1.0 + psi * psi);
235       tau = tau * psi * cm;
236       eta = cm * cm * a;
237       cf  = psiold * psiold * etaold / a;
238       if (!m) {
239         /* D = U + cf D */
240         parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) { D[idx] = U[idx] + cf * D[idx]; });
241       } else {
242         /* D = Q + cf D */
243         parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) { D[idx] = Q[idx] + cf * D[idx]; });
244       }
245       team.team_barrier();
246       parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) { XX[idx] = XX[idx] + eta * D[idx]; });
247       team.team_barrier();
248       dpest = PetscSqrtReal(2 * it + m + 2.0) * tau;
249 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
250       if (monitor && m == 1) Kokkos::single(Kokkos::PerTeam(team), [=]() { printf("%3d KSP Residual norm %14.12e\n", it + 1, (double)dpest); });
251 #endif
252       if (dpest < atol) {
253         metad->reason = KSP_CONVERGED_ATOL_NORMAL_EQUATIONS;
254         goto done;
255       }
256       if (dpest / r0 < rtol) {
257         metad->reason = KSP_CONVERGED_RTOL_NORMAL_EQUATIONS;
258         goto done;
259       }
260 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
261       if (dpest / r0 > dtol) {
262         metad->reason = KSP_DIVERGED_DTOL;
263         Kokkos::single(Kokkos::PerTeam(team), [=]() { printf("ERROR block %d diverged: %d it, res=%e, r_0=%e\n", team.league_rank(), it, dpest, r0); });
264         goto done;
265       }
266 #else
267       if (dpest / r0 > dtol) {
268         metad->reason = KSP_DIVERGED_DTOL;
269         goto done;
270       }
271 #endif
272       if (it + 1 == maxit) {
273         metad->reason = KSP_CONVERGED_ITS;
274 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
275         Kokkos::single(Kokkos::PerTeam(team), [=]() { printf("ERROR block %d diverged: TFQMR %d:%d it, res=%e, r_0=%e r_res=%e\n", team.league_rank(), it, m, dpest, r0, dpest / r0); });
276 #endif
277         goto done;
278       }
279       etaold = eta;
280       psiold = psi;
281     }
282 
283     /* rho <- (r,rp)       */
284     parallel_reduce(Kokkos::TeamVectorRange(team, Nblk), [=](const int idx, PetscScalar &dot) { dot += R[idx] * PetscConj(RP[idx]); }, rho);
285     team.team_barrier();
286     if (rho == 0) {
287       metad->reason = KSP_CONVERGED_HAPPY_BREAKDOWN;
288       goto done;
289     }
290     b = rho / rhoold; /* b <- rho / rhoold   */
291     /* u <- r + b q        */
292     /* p <- u + b(q + b p) */
293     parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) {
294       U[idx] = R[idx] + b * Q[idx];
295       Q[idx] = Q[idx] + b * P[idx];
296       P[idx] = U[idx] + b * Q[idx];
297     });
298     /* v <- K p  */
299     team.team_barrier();
300     parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) { T[idx] = Diag[idx] * P[idx]; });
301     team.team_barrier();
302     static_cast<void>(MatMult(team, glb_Aai, glb_Aaj, glb_Aaa, r, ic, start, end, T, V));
303 
304     rhoold = rho;
305     dpold  = dp;
306 
307     it++;
308   } while (it < maxit);
309 done:
310   // KSPUnwindPreconditioner
311   parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) { XX[idx] = Diag[idx] * XX[idx]; });
312   team.team_barrier();
313   // put x into Plex order
314   parallel_for(Kokkos::TeamVectorRange(team, start, end), [=](int rowb) {
315     int rowa    = ic[rowb];
316     glb_x[rowa] = XX[rowb - start];
317   });
318   metad->its = it;
319   if (1) {
320     int nnz;
321     parallel_reduce(Kokkos::TeamVectorRange(team, start, end), [=](const int idx, int &lsum) { lsum += (glb_Aai[idx + 1] - glb_Aai[idx]); }, nnz);
322     metad->flops = 2 * (metad->its * (10 * Nblk + 2 * nnz) + 5 * Nblk);
323   } else {
324     metad->flops = 2 * (metad->its * (10 * Nblk + 2 * 50 * Nblk) + 5 * Nblk); // guess
325   }
326   return PETSC_SUCCESS;
327 }
328 
329 // Solve Ax = y with biCG
BJSolve_BICG(const team_member team,const PetscInt * glb_Aai,const PetscInt * glb_Aaj,const PetscScalar * glb_Aaa,const PetscInt * r,const PetscInt * ic,PetscScalar * work_space_global,const int stride_global,const int nShareVec,PetscScalar * work_space_shared,const int stride_shared,PetscReal rtol,PetscReal atol,PetscReal dtol,PetscInt maxit,Batch_MetaData * metad,const PetscInt start,const PetscInt end,const PetscScalar glb_idiag[],const PetscScalar * glb_b,PetscScalar * glb_x,bool monitor)330 static KOKKOS_INLINE_FUNCTION PetscErrorCode BJSolve_BICG(const team_member team, const PetscInt *glb_Aai, const PetscInt *glb_Aaj, const PetscScalar *glb_Aaa, const PetscInt *r, const PetscInt *ic, PetscScalar *work_space_global, const int stride_global, const int nShareVec, PetscScalar *work_space_shared, const int stride_shared, PetscReal rtol, PetscReal atol, PetscReal dtol, PetscInt maxit, Batch_MetaData *metad, const PetscInt start, const PetscInt end, const PetscScalar glb_idiag[], const PetscScalar *glb_b, PetscScalar *glb_x, bool monitor)
331 {
332   using Kokkos::parallel_for;
333   using Kokkos::parallel_reduce;
334   int                Nblk = end - start, it, stride = stride_shared, idx = 0; // start in shared mem
335   PetscReal          dp, r0;
336   const PetscScalar *Di  = &glb_idiag[start];
337   PetscScalar       *ptr = work_space_shared, dpi, a = 1.0, beta, betaold = 1.0, t1, t2;
338 
339   if (idx++ == nShareVec) {
340     ptr    = work_space_global;
341     stride = stride_global;
342   }
343   PetscScalar *XX = ptr;
344   ptr += stride;
345   if (idx++ == nShareVec) {
346     ptr    = work_space_global;
347     stride = stride_global;
348   }
349   PetscScalar *Rl = ptr;
350   ptr += stride;
351   if (idx++ == nShareVec) {
352     ptr    = work_space_global;
353     stride = stride_global;
354   }
355   PetscScalar *Zl = ptr;
356   ptr += stride;
357   if (idx++ == nShareVec) {
358     ptr    = work_space_global;
359     stride = stride_global;
360   }
361   PetscScalar *Pl = ptr;
362   ptr += stride;
363   if (idx++ == nShareVec) {
364     ptr    = work_space_global;
365     stride = stride_global;
366   }
367   PetscScalar *Rr = ptr;
368   ptr += stride;
369   if (idx++ == nShareVec) {
370     ptr    = work_space_global;
371     stride = stride_global;
372   }
373   PetscScalar *Zr = ptr;
374   ptr += stride;
375   if (idx++ == nShareVec) {
376     ptr    = work_space_global;
377     stride = stride_global;
378   }
379   PetscScalar *Pr = ptr;
380   ptr += stride;
381 
382   /*     r <- b (x is 0) */
383   parallel_for(Kokkos::TeamVectorRange(team, start, end), [=](int rowb) {
384     int rowa         = ic[rowb];
385     Rl[rowb - start] = Rr[rowb - start] = glb_b[rowa];
386     XX[rowb - start]                    = 0;
387   });
388   team.team_barrier();
389   /*     z <- Br         */
390   parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) {
391     Zr[idx] = Di[idx] * Rr[idx];
392     Zl[idx] = Di[idx] * Rl[idx];
393   });
394   team.team_barrier();
395   /*    dp <- r'*r       */
396   parallel_reduce(Kokkos::TeamVectorRange(team, Nblk), [=](const int idx, PetscScalar &lsum) { lsum += Rr[idx] * PetscConj(Rr[idx]); }, dpi);
397   team.team_barrier();
398   r0 = dp = PetscSqrtReal(PetscRealPart(dpi));
399 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
400   if (monitor) Kokkos::single(Kokkos::PerTeam(team), [=]() { printf("%3d KSP Residual norm %14.12e\n", 0, (double)dp); });
401 #endif
402   if (dp < atol) {
403     metad->reason = KSP_CONVERGED_ATOL_NORMAL_EQUATIONS;
404     it            = 0;
405     goto done;
406   }
407   if (0 == maxit) {
408     metad->reason = KSP_CONVERGED_ITS;
409     it            = 0;
410     goto done;
411   }
412 
413   it = 0;
414   do {
415     /*     beta <- r'z     */
416     parallel_reduce(Kokkos::TeamVectorRange(team, Nblk), [=](const int idx, PetscScalar &dot) { dot += Zr[idx] * PetscConj(Rl[idx]); }, beta);
417     team.team_barrier();
418 #if PCBJKOKKOS_VERBOSE_LEVEL >= 6
419   #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
420     Kokkos::single(Kokkos::PerTeam(team), [=]() { printf("%7d beta = Z.R = %22.14e \n", i, (double)beta); });
421   #endif
422 #endif
423     if (beta == 0.0) {
424       metad->reason = KSP_CONVERGED_HAPPY_BREAKDOWN;
425       goto done;
426     }
427     if (it == 0) {
428       /*     p <- z          */
429       parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) {
430         Pr[idx] = Zr[idx];
431         Pl[idx] = Zl[idx];
432       });
433     } else {
434       t1 = beta / betaold;
435       /*     p <- z + b* p   */
436       t2 = PetscConj(t1);
437       parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) {
438         Pr[idx] = t1 * Pr[idx] + Zr[idx];
439         Pl[idx] = t2 * Pl[idx] + Zl[idx];
440       });
441     }
442     team.team_barrier();
443     betaold = beta;
444     /*     z <- Kp         */
445     static_cast<void>(MatMult(team, glb_Aai, glb_Aaj, glb_Aaa, r, ic, start, end, Pr, Zr));
446     static_cast<void>(MatMultTranspose(team, glb_Aai, glb_Aaj, glb_Aaa, r, ic, start, end, Pl, Zl));
447     /*     dpi <- z'p      */
448     parallel_reduce(Kokkos::TeamVectorRange(team, Nblk), [=](const int idx, PetscScalar &lsum) { lsum += Zr[idx] * PetscConj(Pl[idx]); }, dpi);
449     team.team_barrier();
450     if (dpi == 0) {
451       metad->reason = KSP_CONVERGED_HAPPY_BREAKDOWN;
452       goto done;
453     }
454     //
455     a  = beta / dpi; /*     a = beta/p'z    */
456     t1 = -a;
457     t2 = PetscConj(t1);
458     /*     x <- x + ap     */
459     parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) {
460       XX[idx] = XX[idx] + a * Pr[idx];
461       Rr[idx] = Rr[idx] + t1 * Zr[idx];
462       Rl[idx] = Rl[idx] + t2 * Zl[idx];
463     });
464     team.team_barrier();
465     team.team_barrier();
466     /*    dp <- r'*r       */
467     parallel_reduce(Kokkos::TeamVectorRange(team, Nblk), [=](const int idx, PetscScalar &lsum) { lsum += Rr[idx] * PetscConj(Rr[idx]); }, dpi);
468     team.team_barrier();
469     dp = PetscSqrtReal(PetscRealPart(dpi));
470 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
471     if (monitor) Kokkos::single(Kokkos::PerTeam(team), [=]() { printf("%3d KSP Residual norm %14.12e\n", it + 1, (double)dp); });
472 #endif
473     if (dp < atol) {
474       metad->reason = KSP_CONVERGED_ATOL_NORMAL_EQUATIONS;
475       goto done;
476     }
477     if (dp / r0 < rtol) {
478       metad->reason = KSP_CONVERGED_RTOL_NORMAL_EQUATIONS;
479       goto done;
480     }
481 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
482     if (dp / r0 > dtol) {
483       metad->reason = KSP_DIVERGED_DTOL;
484       Kokkos::single(Kokkos::PerTeam(team), [=]() { printf("ERROR block %d diverged: %d it, res=%e, r_0=%e (BICG does this)\n", team.league_rank(), it, dp, r0); });
485       goto done;
486     }
487 #else
488     if (dp / r0 > dtol) {
489       metad->reason = KSP_DIVERGED_DTOL;
490       goto done;
491     }
492 #endif
493     if (it + 1 == maxit) {
494       metad->reason = KSP_CONVERGED_ITS; // don't worry about hitting max iterations
495 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
496       Kokkos::single(Kokkos::PerTeam(team), [=]() { printf("ERROR block %d diverged: BICG %d it, res=%e, r_0=%e r_res=%e\n", team.league_rank(), it, dp, r0, dp / r0); });
497 #endif
498       goto done;
499     }
500     /* z <- Br  */
501     parallel_for(Kokkos::TeamVectorRange(team, Nblk), [=](int idx) {
502       Zr[idx] = Di[idx] * Rr[idx];
503       Zl[idx] = Di[idx] * Rl[idx];
504     });
505 
506     it++;
507   } while (it < maxit);
508 done:
509   // put x back into Plex order
510   parallel_for(Kokkos::TeamVectorRange(team, start, end), [=](int rowb) {
511     int rowa    = ic[rowb];
512     glb_x[rowa] = XX[rowb - start];
513   });
514   metad->its = it;
515   if (1) {
516     int nnz;
517     parallel_reduce(Kokkos::TeamVectorRange(team, start, end), [=](const int idx, int &lsum) { lsum += (glb_Aai[idx + 1] - glb_Aai[idx]); }, nnz);
518     metad->flops = 2 * (metad->its * (10 * Nblk + 2 * nnz) + 5 * Nblk);
519   } else {
520     metad->flops = 2 * (metad->its * (10 * Nblk + 2 * 50 * Nblk) + 5 * Nblk); // guess
521   }
522   return PETSC_SUCCESS;
523 }
524 
525 // KSP solver solve Ax = b; xout is output, bin is input
PCApply_BJKOKKOS(PC pc,Vec bin,Vec xout)526 static PetscErrorCode PCApply_BJKOKKOS(PC pc, Vec bin, Vec xout)
527 {
528   PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
529   Mat            A = pc->pmat, Aseq = A;
530   PetscMPIInt    rank;
531 
532   PetscFunctionBegin;
533   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)A), &rank));
534   if (!A->spptr) Aseq = ((Mat_MPIAIJ *)A->data)->A; // MPI
535   PetscCall(MatSeqAIJKokkosSyncDevice(Aseq));
536   {
537     PetscInt           maxit = jac->ksp->max_it;
538     const PetscInt     conc = Kokkos::DefaultExecutionSpace().concurrency(), openmp = !!(conc < 1000), team_size = (openmp == 0 && PCBJKOKKOS_VEC_SIZE != 1) ? PCBJKOKKOS_TEAM_SIZE : 1;
539     const PetscInt     nwork = jac->nwork, nBlk = jac->nBlocks;
540     PetscScalar       *glb_xdata = NULL, *dummy;
541     PetscReal          rtol = jac->ksp->rtol, atol = jac->ksp->abstol, dtol = jac->ksp->divtol;
542     const PetscScalar *glb_idiag = jac->d_idiag_k->data(), *glb_bdata = NULL;
543     const PetscInt    *glb_Aai, *glb_Aaj, *d_bid_eqOffset = jac->d_bid_eqOffset_k->data();
544     const PetscScalar *glb_Aaa;
545     const PetscInt    *d_isicol = jac->d_isicol_k->data(), *d_isrow = jac->d_isrow_k->data();
546     PCFailedReason     pcreason;
547     KSPIndex           ksp_type_idx = jac->ksp_type_idx;
548     PetscMemType       mtype;
549     PetscContainer     container;
550     PetscInt           batch_sz;                // the number of repeated DMs, [DM_e_1, DM_e_2, DM_e_batch_sz, DM_i_1, ...]
551     VecScatter         plex_batch = NULL;       // not used
552     Vec                bvec;                    // a copy of b for scatter (just alias to bin now)
553     PetscBool          monitor  = jac->monitor; // captured
554     PetscInt           view_bid = jac->batch_target;
555     MatInfo            info;
556 
557     PetscCall(MatSeqAIJGetCSRAndMemType(Aseq, &glb_Aai, &glb_Aaj, &dummy, &mtype));
558     jac->max_nits = 0;
559     glb_Aaa       = dummy;
560     if (jac->rank_target != rank) view_bid = -1; // turn off all but one process
561     PetscCall(MatGetInfo(A, MAT_LOCAL, &info));
562     // get field major is to map plex IO to/from block/field major
563     PetscCall(PetscObjectQuery((PetscObject)A, "plex_batch_is", (PetscObject *)&container));
564     if (container) {
565       PetscCall(VecDuplicate(bin, &bvec));
566       PetscCall(PetscContainerGetPointer(container, &plex_batch));
567       PetscCall(VecScatterBegin(plex_batch, bin, bvec, INSERT_VALUES, SCATTER_FORWARD));
568       PetscCall(VecScatterEnd(plex_batch, bin, bvec, INSERT_VALUES, SCATTER_FORWARD));
569       SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_USER, "No plex_batch_is -- require NO field major ordering for now");
570     } else {
571       bvec = bin;
572     }
573     // get x
574     PetscCall(VecGetArrayAndMemType(xout, &glb_xdata, &mtype));
575 #if defined(PETSC_HAVE_CUDA)
576     PetscCheck(PetscMemTypeDevice(mtype), PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "No GPU data for x %d != %d", static_cast<int>(mtype), static_cast<int>(PETSC_MEMTYPE_DEVICE));
577 #endif
578     PetscCall(VecGetArrayReadAndMemType(bvec, &glb_bdata, &mtype));
579 #if defined(PETSC_HAVE_CUDA)
580     PetscCheck(PetscMemTypeDevice(mtype), PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "No GPU data for b");
581 #endif
582     // get batch size
583     PetscCall(PetscObjectQuery((PetscObject)A, "batch size", (PetscObject *)&container));
584     if (container) {
585       PetscInt *pNf = NULL;
586       PetscCall(PetscContainerGetPointer(container, &pNf));
587       batch_sz = *pNf; // number of times to repeat the DMs
588     } else batch_sz = 1;
589     PetscCheck(nBlk % batch_sz == 0, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "batch_sz = %" PetscInt_FMT ", nBlk = %" PetscInt_FMT, batch_sz, nBlk);
590     if (ksp_type_idx == BATCH_KSP_GMRESKK_IDX) {
591       // KK solver - move PETSc data into Kokkos Views, setup solver, solve, move data out of Kokkos, process metadata (convergence tests, etc.)
592 #if defined(PETSC_HAVE_KOKKOS_KERNELS_BATCH)
593       PetscCall(PCApply_BJKOKKOSKERNELS(pc, glb_bdata, glb_xdata, glb_Aai, glb_Aaj, glb_Aaa, team_size, info, batch_sz, &pcreason));
594 #else
595       PetscCheck(ksp_type_idx != BATCH_KSP_GMRESKK_IDX, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "Type: BATCH_KSP_GMRES not supported for complex");
596 #endif
597     } else { // Kokkos Krylov
598       using scr_mem_t    = Kokkos::DefaultExecutionSpace::scratch_memory_space;
599       using vect2D_scr_t = Kokkos::View<PetscScalar **, Kokkos::LayoutLeft, scr_mem_t>;
600       Kokkos::View<Batch_MetaData *, Kokkos::DefaultExecutionSpace> d_metadata("solver meta data", nBlk);
601       int                                                           stride_shared, stride_global, global_buff_words;
602       d_bid_eqOffset = jac->d_bid_eqOffset_k->data();
603       // solve each block independently
604       int scr_bytes_team_shared = 0, nShareVec = 0, nGlobBVec = 0;
605       if (jac->const_block_size) { // use shared memory for work vectors only if constant block size - TODO: test efficiency loss
606         size_t      maximum_shared_mem_size = 64000;
607         PetscDevice device;
608         PetscCall(PetscDeviceGetDefault_Internal(&device));
609         PetscCall(PetscDeviceGetAttribute(device, PETSC_DEVICE_ATTR_SIZE_T_SHARED_MEM_PER_BLOCK, &maximum_shared_mem_size));
610         stride_shared = jac->const_block_size;                                                   // captured
611         nShareVec     = maximum_shared_mem_size / (jac->const_block_size * sizeof(PetscScalar)); // integer floor, number of vectors that fit in shared
612         if (nShareVec > nwork) nShareVec = nwork;
613         else nGlobBVec = nwork - nShareVec;
614         global_buff_words     = jac->n * nGlobBVec;
615         scr_bytes_team_shared = jac->const_block_size * nShareVec * sizeof(PetscScalar);
616       } else {
617         scr_bytes_team_shared = 0;
618         stride_shared         = 0;
619         global_buff_words     = jac->n * nwork;
620         nGlobBVec             = nwork; // not needed == fix
621       }
622       stride_global = jac->n; // captured
623 #if defined(PETSC_HAVE_CUDA)
624       nvtxRangePushA("batch-kokkos-solve");
625 #endif
626       Kokkos::View<PetscScalar *, Kokkos::DefaultExecutionSpace> d_work_vecs_k("workvectors", global_buff_words); // global work vectors
627 #if PCBJKOKKOS_VERBOSE_LEVEL > 1
628       PetscCall(PetscInfo(pc, "\tn = %d. %d shared bytes/team, %d global mem bytes, rtol=%e, num blocks %d, team_size=%d, %d vector threads, %d shared vectors, %d global vectors\n", (int)jac->n, scr_bytes_team_shared, global_buff_words, rtol, (int)nBlk, (int)team_size, PCBJKOKKOS_VEC_SIZE, nShareVec, nGlobBVec));
629 #endif
630       PetscScalar *d_work_vecs = d_work_vecs_k.data();
631       Kokkos::parallel_for(
632         "Solve", Kokkos::TeamPolicy<Kokkos::LaunchBounds<256, 4>>(nBlk, team_size, PCBJKOKKOS_VEC_SIZE).set_scratch_size(PCBJKOKKOS_SHARED_LEVEL, Kokkos::PerTeam(scr_bytes_team_shared)), KOKKOS_LAMBDA(const team_member team) {
633           const int    blkID = team.league_rank(), start = d_bid_eqOffset[blkID], end = d_bid_eqOffset[blkID + 1];
634           vect2D_scr_t work_vecs_shared(team.team_scratch(PCBJKOKKOS_SHARED_LEVEL), end - start, nShareVec);
635           PetscScalar *work_buff_shared = work_vecs_shared.data();
636           PetscScalar *work_buff_global = &d_work_vecs[start]; // start inc'ed in
637           bool         print            = monitor && (blkID == view_bid);
638           switch (ksp_type_idx) {
639           case BATCH_KSP_BICG_IDX:
640             static_cast<void>(BJSolve_BICG(team, glb_Aai, glb_Aaj, glb_Aaa, d_isrow, d_isicol, work_buff_global, stride_global, nShareVec, work_buff_shared, stride_shared, rtol, atol, dtol, maxit, &d_metadata[blkID], start, end, glb_idiag, glb_bdata, glb_xdata, print));
641             break;
642           case BATCH_KSP_TFQMR_IDX:
643             static_cast<void>(BJSolve_TFQMR(team, glb_Aai, glb_Aaj, glb_Aaa, d_isrow, d_isicol, work_buff_global, stride_global, nShareVec, work_buff_shared, stride_shared, rtol, atol, dtol, maxit, &d_metadata[blkID], start, end, glb_idiag, glb_bdata, glb_xdata, print));
644             break;
645           default:
646 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
647             printf("Unknown KSP type %d\n", ksp_type_idx);
648 #else
649             /* void */;
650 #endif
651           }
652         });
653       Kokkos::fence();
654 #if defined(PETSC_HAVE_CUDA)
655       nvtxRangePop();
656       nvtxRangePushA("Post-solve-metadata");
657 #endif
658       auto h_metadata = Kokkos::create_mirror(Kokkos::HostSpace::memory_space(), d_metadata);
659       Kokkos::deep_copy(h_metadata, d_metadata);
660       PetscInt max_nnit = -1;
661 #if PCBJKOKKOS_VERBOSE_LEVEL > 1
662       PetscInt mbid = 0;
663 #endif
664       int in[2], out[2];
665       if (jac->reason) { // -pc_bjkokkos_ksp_converged_reason
666 #if PCBJKOKKOS_VERBOSE_LEVEL >= 3
667   #if PCBJKOKKOS_VERBOSE_LEVEL >= 4
668         PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Iterations\n"));
669   #endif
670         // assume species major
671   #if PCBJKOKKOS_VERBOSE_LEVEL == 3
672         if (batch_sz != 1) PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "%s: max iterations per species:", ksp_type_idx == BATCH_KSP_BICG_IDX ? "bicg" : "tfqmr"));
673         else PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "    Linear solve converged due to %s iterations ", ksp_type_idx == BATCH_KSP_BICG_IDX ? "bicg" : "tfqmr"));
674   #endif
675         for (PetscInt dmIdx = 0, head = 0, s = 0; dmIdx < jac->num_dms; dmIdx += batch_sz) {
676           for (PetscInt f = 0, idx = head; f < jac->dm_Nf[dmIdx]; f++, idx++, s++) {
677             for (int bid = 0; bid < batch_sz; bid++) {
678   #if PCBJKOKKOS_VERBOSE_LEVEL >= 4
679               jac->max_nits += h_metadata[idx + bid * jac->dm_Nf[dmIdx]].its; // report total number of iterations with high verbose
680               if (h_metadata[idx + bid * jac->dm_Nf[dmIdx]].its > max_nnit) {
681                 max_nnit = h_metadata[idx + bid * jac->dm_Nf[dmIdx]].its;
682                 mbid     = bid;
683               }
684   #else
685               if (h_metadata[idx + bid * jac->dm_Nf[dmIdx]].its > max_nnit) {
686                 jac->max_nits = max_nnit = h_metadata[idx + bid * jac->dm_Nf[dmIdx]].its;
687                 mbid                     = bid;
688               }
689   #endif
690             }
691   #if PCBJKOKKOS_VERBOSE_LEVEL >= 4
692             PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "%2" PetscInt_FMT ":", s));
693             for (int bid = 0; bid < batch_sz; bid++) PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "%3" PetscInt_FMT " ", h_metadata[idx + bid * jac->dm_Nf[dmIdx]].its));
694             PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "\n"));
695   #else // == 3
696             PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "%3" PetscInt_FMT " ", max_nnit));
697   #endif
698           }
699           head += batch_sz * jac->dm_Nf[dmIdx];
700         }
701   #if PCBJKOKKOS_VERBOSE_LEVEL == 3
702         PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "\n"));
703   #endif
704 #endif
705         if (max_nnit == -1) { // < 3
706           for (int blkID = 0; blkID < nBlk; blkID++) {
707             if (h_metadata[blkID].its > max_nnit) {
708               jac->max_nits = max_nnit = h_metadata[blkID].its;
709 #if PCBJKOKKOS_VERBOSE_LEVEL > 1
710               mbid = blkID;
711 #endif
712             }
713           }
714         }
715         in[0] = max_nnit;
716         in[1] = rank;
717         PetscCallMPI(MPIU_Allreduce(in, out, 1, MPI_2INT, MPI_MAXLOC, PetscObjectComm((PetscObject)A)));
718 #if PCBJKOKKOS_VERBOSE_LEVEL > 1
719         if (0 == rank) {
720           if (batch_sz != 1)
721             PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Linear solve converged due to %s iterations %d (max), on block %" PetscInt_FMT ", species %" PetscInt_FMT " (max)\n", out[1], KSPConvergedReasons[h_metadata[mbid].reason], out[0], mbid % batch_sz, mbid / batch_sz));
722           else PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Linear solve converged due to %s iterations %d (max), on block %" PetscInt_FMT "\n", out[1], KSPConvergedReasons[h_metadata[mbid].reason], out[0], mbid));
723         }
724 #endif
725       }
726       for (int blkID = 0; blkID < nBlk; blkID++) {
727         PetscCall(PetscLogGpuFlops((PetscLogDouble)h_metadata[blkID].flops));
728         PetscCheck(h_metadata[blkID].reason >= 0 || !jac->ksp->errorifnotconverged, PetscObjectComm((PetscObject)pc), PETSC_ERR_CONV_FAILED, "ERROR reason=%s, its=%" PetscInt_FMT ". species %" PetscInt_FMT ", batch %" PetscInt_FMT,
729                    KSPConvergedReasons[h_metadata[blkID].reason], h_metadata[blkID].its, blkID / batch_sz, blkID % batch_sz);
730       }
731       {
732         int errsum;
733         Kokkos::parallel_reduce(
734           nBlk,
735           KOKKOS_LAMBDA(const int idx, int &lsum) {
736             if (d_metadata[idx].reason < 0) ++lsum;
737           },
738           errsum);
739         pcreason = errsum ? PC_SUBPC_ERROR : PC_NOERROR;
740         if (!errsum && !jac->max_nits) { // set max its to give back to top KSP
741           for (int blkID = 0; blkID < nBlk; blkID++) {
742             if (h_metadata[blkID].its > jac->max_nits) jac->max_nits = h_metadata[blkID].its;
743           }
744         } else if (errsum) {
745           PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] ERROR Kokkos batch solver did not converge in all solves\n", (int)rank));
746         }
747       }
748 #if defined(PETSC_HAVE_CUDA)
749       nvtxRangePop();
750 #endif
751     } // end of Kokkos (not Kernels) solvers block
752     PetscCall(VecRestoreArrayAndMemType(xout, &glb_xdata));
753     PetscCall(VecRestoreArrayReadAndMemType(bvec, &glb_bdata));
754     PetscCall(PCSetFailedReason(pc, pcreason));
755     // map back to Plex space - not used
756     if (plex_batch) {
757       PetscCall(VecCopy(xout, bvec));
758       PetscCall(VecScatterBegin(plex_batch, bvec, xout, INSERT_VALUES, SCATTER_REVERSE));
759       PetscCall(VecScatterEnd(plex_batch, bvec, xout, INSERT_VALUES, SCATTER_REVERSE));
760       PetscCall(VecDestroy(&bvec));
761     }
762   }
763   PetscFunctionReturn(PETSC_SUCCESS);
764 }
765 
PCSetUp_BJKOKKOS(PC pc)766 static PetscErrorCode PCSetUp_BJKOKKOS(PC pc)
767 {
768   PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
769   Mat            A = pc->pmat, Aseq = A; // use filtered block matrix, really "P"
770   PetscBool      flg;
771 
772   PetscFunctionBegin;
773   PetscCheck(A, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "No matrix - A is used above");
774   PetscCall(PetscObjectTypeCompareAny((PetscObject)A, &flg, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, MATAIJKOKKOS, ""));
775   PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "must use '-[dm_]mat_type aijkokkos -[dm_]vec_type kokkos' for -pc_type bjkokkos");
776   if (!A->spptr) Aseq = ((Mat_MPIAIJ *)A->data)->A; // MPI
777   PetscCall(MatSeqAIJKokkosSyncDevice(Aseq));
778   {
779     PetscInt    Istart, Iend;
780     PetscMPIInt rank;
781     PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)A), &rank));
782     PetscCall(MatGetOwnershipRange(A, &Istart, &Iend));
783     if (!jac->vec_diag) {
784       Vec     *subX = NULL;
785       DM       pack, *subDM = NULL;
786       PetscInt nDMs, n, *block_sizes = NULL;
787       IS       isrow, isicol;
788       { // Permute the matrix to get a block diagonal system: d_isrow_k, d_isicol_k
789         MatOrderingType rtype;
790         const PetscInt *rowindices, *icolindices;
791         rtype = MATORDERINGRCM;
792         // get permutation. And invert. should we convert to local indices?
793         PetscCall(MatGetOrdering(Aseq, rtype, &isrow, &isicol)); // only seems to work for seq matrix
794         PetscCall(ISDestroy(&isrow));
795         PetscCall(ISInvertPermutation(isicol, PETSC_DECIDE, &isrow)); // THIS IS BACKWARD -- isrow is inverse
796         // if (rank==1) PetscCall(ISView(isicol, PETSC_VIEWER_STDOUT_SELF));
797         if (0) {
798           Mat mat_block_order; // debug
799           PetscCall(ISShift(isicol, Istart, isicol));
800           PetscCall(MatCreateSubMatrix(A, isicol, isicol, MAT_INITIAL_MATRIX, &mat_block_order));
801           PetscCall(ISShift(isicol, -Istart, isicol));
802           PetscCall(MatViewFromOptions(mat_block_order, NULL, "-ksp_batch_reorder_view"));
803           PetscCall(MatDestroy(&mat_block_order));
804         }
805         PetscCall(ISGetIndices(isrow, &rowindices)); // local idx
806         PetscCall(ISGetIndices(isicol, &icolindices));
807         const Kokkos::View<PetscInt *, Kokkos::HostSpace, Kokkos::MemoryTraits<Kokkos::Unmanaged>> h_isrow_k((PetscInt *)rowindices, A->rmap->n);
808         const Kokkos::View<PetscInt *, Kokkos::HostSpace, Kokkos::MemoryTraits<Kokkos::Unmanaged>> h_isicol_k((PetscInt *)icolindices, A->rmap->n);
809         jac->d_isrow_k  = new Kokkos::View<PetscInt *>(Kokkos::create_mirror(DefaultMemorySpace(), h_isrow_k));
810         jac->d_isicol_k = new Kokkos::View<PetscInt *>(Kokkos::create_mirror(DefaultMemorySpace(), h_isicol_k));
811         Kokkos::deep_copy(*jac->d_isrow_k, h_isrow_k);
812         Kokkos::deep_copy(*jac->d_isicol_k, h_isicol_k);
813         PetscCall(ISRestoreIndices(isrow, &rowindices));
814         PetscCall(ISRestoreIndices(isicol, &icolindices));
815         // if (rank==1) PetscCall(ISView(isicol, PETSC_VIEWER_STDOUT_SELF));
816       }
817       // get block sizes & allocate vec_diag
818       PetscCall(PCGetDM(pc, &pack));
819       if (pack) {
820         PetscCall(PetscObjectTypeCompare((PetscObject)pack, DMCOMPOSITE, &flg));
821         if (flg) {
822           PetscCall(DMCompositeGetNumberDM(pack, &nDMs));
823           PetscCall(DMCreateGlobalVector(pack, &jac->vec_diag));
824         } else pack = NULL; // flag for no DM
825       }
826       if (!jac->vec_diag) { // get 'nDMs' and sizes 'block_sizes' w/o DMComposite. TODO: User could provide ISs
827         PetscInt        bsrt, bend, ncols, ntot = 0;
828         const PetscInt *colsA, nloc = Iend - Istart;
829         const PetscInt *rowindices, *icolindices;
830         PetscCall(PetscMalloc1(nloc, &block_sizes)); // very inefficient, to big
831         PetscCall(ISGetIndices(isrow, &rowindices));
832         PetscCall(ISGetIndices(isicol, &icolindices));
833         nDMs = 0;
834         bsrt = 0;
835         bend = 1;
836         for (PetscInt row_B = 0; row_B < nloc; row_B++) { // for all rows in block diagonal space
837           PetscInt rowA = icolindices[row_B], minj = PETSC_INT_MAX, maxj = 0;
838           //PetscCall(PetscPrintf(PETSC_COMM_SELF, "\t[%d] rowA = %d\n",rank,rowA));
839           PetscCall(MatGetRow(Aseq, rowA, &ncols, &colsA, NULL)); // not sorted in permutation
840           PetscCheck(ncols, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "Empty row not supported: %" PetscInt_FMT, row_B);
841           for (PetscInt colj = 0; colj < ncols; colj++) {
842             PetscInt colB = rowindices[colsA[colj]]; // use local idx
843             //PetscCall(PetscPrintf(PETSC_COMM_SELF, "\t\t[%d] colB = %d\n",rank,colB));
844             PetscCheck(colB >= 0 && colB < nloc, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "colB < 0: %" PetscInt_FMT, colB);
845             if (colB > maxj) maxj = colB;
846             if (colB < minj) minj = colB;
847           }
848           PetscCall(MatRestoreRow(Aseq, rowA, &ncols, &colsA, NULL));
849           if (minj >= bend) { // first column is > max of last block -- new block or last block
850             //PetscCall(PetscPrintf(PetscObjectComm((PetscObject)A), "\t\t finish block %d, N loc = %d (%d,%d)\n", nDMs+1, bend - bsrt,bsrt,bend));
851             block_sizes[nDMs] = bend - bsrt;
852             ntot += block_sizes[nDMs];
853             PetscCheck(minj == bend, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "minj != bend: %" PetscInt_FMT " != %" PetscInt_FMT, minj, bend);
854             bsrt = bend;
855             bend++; // start with size 1 in new block
856             nDMs++;
857           }
858           if (maxj + 1 > bend) bend = maxj + 1;
859           PetscCheck(minj >= bsrt || row_B == Iend - 1, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "%" PetscInt_FMT ") minj < bsrt: %" PetscInt_FMT " != %" PetscInt_FMT, rowA, minj, bsrt);
860           //PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] %d) row %d.%d) cols %d : %d ; bsrt = %d, bend = %d\n",rank,row_B,nDMs,rowA,minj,maxj,bsrt,bend));
861         }
862         // do last block
863         //PetscCall(PetscPrintf(PETSC_COMM_SELF, "\t\t\t [%d] finish block %d, N loc = %d (%d,%d)\n", rank, nDMs+1, bend - bsrt,bsrt,bend));
864         block_sizes[nDMs] = bend - bsrt;
865         ntot += block_sizes[nDMs];
866         nDMs++;
867         // cleanup
868         PetscCheck(ntot == nloc, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "n total != n local: %" PetscInt_FMT " != %" PetscInt_FMT, ntot, nloc);
869         PetscCall(ISRestoreIndices(isrow, &rowindices));
870         PetscCall(ISRestoreIndices(isicol, &icolindices));
871         PetscCall(PetscRealloc(sizeof(PetscInt) * nDMs, &block_sizes));
872         PetscCall(MatCreateVecs(A, &jac->vec_diag, NULL));
873         PetscCall(PetscInfo(pc, "Setup Matrix based meta data (not DMComposite not attached to PC) %" PetscInt_FMT " sub domains\n", nDMs));
874       }
875       PetscCall(ISDestroy(&isrow));
876       PetscCall(ISDestroy(&isicol));
877       jac->num_dms = nDMs;
878       PetscCall(VecGetLocalSize(jac->vec_diag, &n));
879       jac->n         = n;
880       jac->d_idiag_k = new Kokkos::View<PetscScalar *, Kokkos::LayoutRight>("idiag", n);
881       // options
882       PetscCall(PCBJKOKKOSCreateKSP_BJKOKKOS(pc));
883       PetscCall(KSPSetFromOptions(jac->ksp));
884       PetscCall(PetscObjectTypeCompareAny((PetscObject)jac->ksp, &flg, KSPBICG, ""));
885       if (flg) {
886         jac->ksp_type_idx = BATCH_KSP_BICG_IDX;
887         jac->nwork        = 7;
888       } else {
889         PetscCall(PetscObjectTypeCompareAny((PetscObject)jac->ksp, &flg, KSPTFQMR, ""));
890         if (flg) {
891           jac->ksp_type_idx = BATCH_KSP_TFQMR_IDX;
892           jac->nwork        = 10;
893         } else {
894 #if defined(PETSC_HAVE_KOKKOS_KERNELS_BATCH)
895           PetscCall(PetscObjectTypeCompareAny((PetscObject)jac->ksp, &flg, KSPGMRES, ""));
896           PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Unsupported batch ksp type");
897           jac->ksp_type_idx = BATCH_KSP_GMRESKK_IDX;
898           jac->nwork        = 0;
899 #else
900           KSPType ksptype;
901           PetscCall(KSPGetType(jac->ksp, &ksptype));
902           PetscCheck(flg, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_WRONG, "Type: %s not supported in complex", ksptype);
903 #endif
904         }
905       }
906       PetscOptionsBegin(PetscObjectComm((PetscObject)jac->ksp), ((PetscObject)jac->ksp)->prefix, "Options for Kokkos batch solver", "none");
907       PetscCall(PetscOptionsBool("-ksp_converged_reason", "", "bjkokkos.kokkos.cxx.c", jac->reason, &jac->reason, NULL));
908       PetscCall(PetscOptionsBool("-ksp_monitor", "", "bjkokkos.kokkos.cxx.c", jac->monitor, &jac->monitor, NULL));
909       PetscCall(PetscOptionsInt("-ksp_batch_target", "", "bjkokkos.kokkos.cxx.c", jac->batch_target, &jac->batch_target, NULL));
910       PetscCall(PetscOptionsInt("-ksp_rank_target", "", "bjkokkos.kokkos.cxx.c", jac->rank_target, &jac->rank_target, NULL));
911       PetscCall(PetscOptionsInt("-ksp_batch_nsolves_team", "", "bjkokkos.kokkos.cxx.c", jac->nsolves_team, &jac->nsolves_team, NULL));
912       PetscCheck(jac->batch_target < jac->num_dms, PETSC_COMM_WORLD, PETSC_ERR_ARG_WRONG, "-ksp_batch_target (%" PetscInt_FMT ") >= number of DMs (%" PetscInt_FMT ")", jac->batch_target, jac->num_dms);
913       PetscOptionsEnd();
914       // get blocks - jac->d_bid_eqOffset_k
915       if (pack) {
916         PetscCall(PetscMalloc(sizeof(*subX) * nDMs, &subX));
917         PetscCall(PetscMalloc(sizeof(*subDM) * nDMs, &subDM));
918       }
919       PetscCall(PetscMalloc(sizeof(*jac->dm_Nf) * nDMs, &jac->dm_Nf));
920       PetscCall(PetscInfo(pc, "Have %" PetscInt_FMT " blocks, n=%" PetscInt_FMT " rtol=%g type = %s\n", nDMs, n, (double)jac->ksp->rtol, ((PetscObject)jac->ksp)->type_name));
921       if (pack) PetscCall(DMCompositeGetEntriesArray(pack, subDM));
922       jac->nBlocks = 0;
923       for (PetscInt ii = 0; ii < nDMs; ii++) {
924         PetscInt Nf;
925         if (subDM) {
926           DM           dm = subDM[ii];
927           PetscSection section;
928           PetscCall(DMGetLocalSection(dm, &section));
929           PetscCall(PetscSectionGetNumFields(section, &Nf));
930         } else Nf = 1;
931         jac->nBlocks += Nf;
932 #if PCBJKOKKOS_VERBOSE_LEVEL <= 2
933         if (ii == 0) PetscCall(PetscInfo(pc, "%" PetscInt_FMT ") %" PetscInt_FMT " blocks (%" PetscInt_FMT " total)\n", ii, Nf, jac->nBlocks));
934 #else
935         PetscCall(PetscInfo(pc, "%" PetscInt_FMT ") %" PetscInt_FMT " blocks (%" PetscInt_FMT " total)\n", ii, Nf, jac->nBlocks));
936 #endif
937         jac->dm_Nf[ii] = Nf;
938       }
939       { // d_bid_eqOffset_k
940         Kokkos::View<PetscInt *, Kokkos::LayoutRight, Kokkos::HostSpace> h_block_offsets("block_offsets", jac->nBlocks + 1);
941         if (pack) PetscCall(DMCompositeGetAccessArray(pack, jac->vec_diag, nDMs, NULL, subX));
942         h_block_offsets[0]    = 0;
943         jac->const_block_size = -1;
944         for (PetscInt ii = 0, idx = 0; ii < nDMs; ii++) {
945           PetscInt nloc, nblk;
946           if (pack) PetscCall(VecGetSize(subX[ii], &nloc));
947           else nloc = block_sizes[ii];
948           nblk = nloc / jac->dm_Nf[ii];
949           PetscCheck(nloc % jac->dm_Nf[ii] == 0, PetscObjectComm((PetscObject)pc), PETSC_ERR_USER, "nloc%%jac->dm_Nf[ii] (%" PetscInt_FMT ") != 0 DMs", nloc % jac->dm_Nf[ii]);
950           for (PetscInt jj = 0; jj < jac->dm_Nf[ii]; jj++, idx++) {
951             h_block_offsets[idx + 1] = h_block_offsets[idx] + nblk;
952 #if PCBJKOKKOS_VERBOSE_LEVEL <= 2
953             if (idx == 0) PetscCall(PetscInfo(pc, "Add first of %" PetscInt_FMT " blocks with %" PetscInt_FMT " equations\n", jac->nBlocks, nblk));
954 #else
955             PetscCall(PetscInfo(pc, "\t%" PetscInt_FMT ") Add block with %" PetscInt_FMT " equations of %" PetscInt_FMT "\n", idx + 1, nblk, jac->nBlocks));
956 #endif
957             if (jac->const_block_size == -1) jac->const_block_size = nblk;
958             else if (jac->const_block_size > 0 && jac->const_block_size != nblk) jac->const_block_size = 0;
959           }
960         }
961         if (pack) {
962           PetscCall(DMCompositeRestoreAccessArray(pack, jac->vec_diag, jac->nBlocks, NULL, subX));
963           PetscCall(PetscFree(subX));
964           PetscCall(PetscFree(subDM));
965         }
966         jac->d_bid_eqOffset_k = new Kokkos::View<PetscInt *, Kokkos::LayoutRight>(Kokkos::create_mirror(Kokkos::DefaultExecutionSpace::memory_space(), h_block_offsets));
967         Kokkos::deep_copy(*jac->d_bid_eqOffset_k, h_block_offsets);
968       }
969       if (!pack) PetscCall(PetscFree(block_sizes));
970     }
971     { // get jac->d_idiag_k (PC setup),
972       const PetscInt    *d_ai, *d_aj;
973       const PetscScalar *d_aa;
974       const PetscInt     conc = Kokkos::DefaultExecutionSpace().concurrency(), openmp = !!(conc < 1000), team_size = (openmp == 0 && PCBJKOKKOS_VEC_SIZE != 1) ? PCBJKOKKOS_TEAM_SIZE : 1;
975       const PetscInt    *d_bid_eqOffset = jac->d_bid_eqOffset_k->data(), *r = jac->d_isrow_k->data(), *ic = jac->d_isicol_k->data();
976       PetscScalar       *d_idiag = jac->d_idiag_k->data(), *dummy;
977       PetscMemType       mtype;
978       PetscCall(MatSeqAIJGetCSRAndMemType(Aseq, &d_ai, &d_aj, &dummy, &mtype));
979       d_aa = dummy;
980       Kokkos::parallel_for(
981         "Diag", Kokkos::TeamPolicy<>(jac->nBlocks, team_size, PCBJKOKKOS_VEC_SIZE), KOKKOS_LAMBDA(const team_member team) {
982           const PetscInt blkID = team.league_rank();
983           Kokkos::parallel_for(Kokkos::TeamThreadRange(team, d_bid_eqOffset[blkID], d_bid_eqOffset[blkID + 1]), [=](const int rowb) {
984             const PetscInt     rowa = ic[rowb], ai = d_ai[rowa], *aj = d_aj + ai; // grab original data
985             const PetscScalar *aa   = d_aa + ai;
986             const PetscInt     nrow = d_ai[rowa + 1] - ai;
987             int                found;
988             Kokkos::parallel_reduce(
989               Kokkos::ThreadVectorRange(team, nrow),
990               [=](const int &j, int &count) {
991                 const PetscInt colb = r[aj[j]];
992                 if (colb == rowb) {
993                   d_idiag[rowb] = 1. / aa[j];
994                   count++;
995                 }
996               },
997               found);
998 #if defined(PETSC_USE_DEBUG) && !defined(PETSC_HAVE_SYCL)
999             if (found != 1) Kokkos::single(Kokkos::PerThread(team), [=]() { printf("ERRORrow %d) found = %d\n", rowb, found); });
1000 #endif
1001           });
1002         });
1003     }
1004   }
1005   PetscFunctionReturn(PETSC_SUCCESS);
1006 }
1007 
1008 /* Default destroy, if it has never been setup */
PCReset_BJKOKKOS(PC pc)1009 static PetscErrorCode PCReset_BJKOKKOS(PC pc)
1010 {
1011   PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
1012 
1013   PetscFunctionBegin;
1014   PetscCall(KSPDestroy(&jac->ksp));
1015   PetscCall(VecDestroy(&jac->vec_diag));
1016   if (jac->d_bid_eqOffset_k) delete jac->d_bid_eqOffset_k;
1017   if (jac->d_idiag_k) delete jac->d_idiag_k;
1018   if (jac->d_isrow_k) delete jac->d_isrow_k;
1019   if (jac->d_isicol_k) delete jac->d_isicol_k;
1020   jac->d_bid_eqOffset_k = NULL;
1021   jac->d_idiag_k        = NULL;
1022   jac->d_isrow_k        = NULL;
1023   jac->d_isicol_k       = NULL;
1024   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCBJKOKKOSGetKSP_C", NULL)); // not published now (causes configure errors)
1025   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCBJKOKKOSSetKSP_C", NULL));
1026   PetscCall(PetscFree(jac->dm_Nf));
1027   jac->dm_Nf = NULL;
1028   if (jac->rowOffsets) delete jac->rowOffsets;
1029   if (jac->colIndices) delete jac->colIndices;
1030   if (jac->batch_b) delete jac->batch_b;
1031   if (jac->batch_x) delete jac->batch_x;
1032   if (jac->batch_values) delete jac->batch_values;
1033   jac->rowOffsets   = NULL;
1034   jac->colIndices   = NULL;
1035   jac->batch_b      = NULL;
1036   jac->batch_x      = NULL;
1037   jac->batch_values = NULL;
1038   PetscFunctionReturn(PETSC_SUCCESS);
1039 }
1040 
PCDestroy_BJKOKKOS(PC pc)1041 static PetscErrorCode PCDestroy_BJKOKKOS(PC pc)
1042 {
1043   PetscFunctionBegin;
1044   PetscCall(PCReset_BJKOKKOS(pc));
1045   PetscCall(PetscFree(pc->data));
1046   PetscFunctionReturn(PETSC_SUCCESS);
1047 }
1048 
PCView_BJKOKKOS(PC pc,PetscViewer viewer)1049 static PetscErrorCode PCView_BJKOKKOS(PC pc, PetscViewer viewer)
1050 {
1051   PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
1052   PetscBool      isascii;
1053 
1054   PetscFunctionBegin;
1055   if (!jac->ksp) PetscCall(PCBJKOKKOSCreateKSP_BJKOKKOS(pc));
1056   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
1057   if (isascii) {
1058     PetscCall(PetscViewerASCIIPrintf(viewer, "  Batched device linear solver: Krylov (KSP) method with Jacobi preconditioning\n"));
1059     PetscCall(PetscViewerASCIIPrintf(viewer, "\t\tnwork = %" PetscInt_FMT ", rel tol = %e, abs tol = %e, div tol = %e, max it =%" PetscInt_FMT ", type = %s\n", jac->nwork, jac->ksp->rtol, jac->ksp->abstol, jac->ksp->divtol, jac->ksp->max_it,
1060                                      ((PetscObject)jac->ksp)->type_name));
1061   }
1062   PetscFunctionReturn(PETSC_SUCCESS);
1063 }
1064 
PCSetFromOptions_BJKOKKOS(PC pc,PetscOptionItems PetscOptionsObject)1065 static PetscErrorCode PCSetFromOptions_BJKOKKOS(PC pc, PetscOptionItems PetscOptionsObject)
1066 {
1067   PetscFunctionBegin;
1068   PetscOptionsHeadBegin(PetscOptionsObject, "PC BJKOKKOS options");
1069   PetscOptionsHeadEnd();
1070   PetscFunctionReturn(PETSC_SUCCESS);
1071 }
1072 
PCBJKOKKOSSetKSP_BJKOKKOS(PC pc,KSP ksp)1073 static PetscErrorCode PCBJKOKKOSSetKSP_BJKOKKOS(PC pc, KSP ksp)
1074 {
1075   PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
1076 
1077   PetscFunctionBegin;
1078   PetscCall(PetscObjectReference((PetscObject)ksp));
1079   PetscCall(KSPDestroy(&jac->ksp));
1080   jac->ksp = ksp;
1081   PetscFunctionReturn(PETSC_SUCCESS);
1082 }
1083 
1084 /*@
1085   PCBJKOKKOSSetKSP - Sets the `KSP` context for `PCBJKOKKOS`
1086 
1087   Collective
1088 
1089   Input Parameters:
1090 + pc  - the `PCBJKOKKOS` preconditioner context
1091 - ksp - the `KSP` solver
1092 
1093   Level: advanced
1094 
1095   Notes:
1096   The `PC` and the `KSP` must have the same communicator
1097 
1098   If the `PC` is not `PCBJKOKKOS` this function returns without doing anything
1099 
1100 .seealso: [](ch_ksp), `PCBJKOKKOSGetKSP()`, `PCBJKOKKOS`
1101 @*/
PCBJKOKKOSSetKSP(PC pc,KSP ksp)1102 PetscErrorCode PCBJKOKKOSSetKSP(PC pc, KSP ksp)
1103 {
1104   PetscFunctionBegin;
1105   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1106   PetscValidHeaderSpecific(ksp, KSP_CLASSID, 2);
1107   PetscCheckSameComm(pc, 1, ksp, 2);
1108   PetscTryMethod(pc, "PCBJKOKKOSSetKSP_C", (PC, KSP), (pc, ksp));
1109   PetscFunctionReturn(PETSC_SUCCESS);
1110 }
1111 
PCBJKOKKOSGetKSP_BJKOKKOS(PC pc,KSP * ksp)1112 static PetscErrorCode PCBJKOKKOSGetKSP_BJKOKKOS(PC pc, KSP *ksp)
1113 {
1114   PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
1115 
1116   PetscFunctionBegin;
1117   if (!jac->ksp) PetscCall(PCBJKOKKOSCreateKSP_BJKOKKOS(pc));
1118   *ksp = jac->ksp;
1119   PetscFunctionReturn(PETSC_SUCCESS);
1120 }
1121 
1122 /*@
1123   PCBJKOKKOSGetKSP - Gets the `KSP` context for the `PCBJKOKKOS` preconditioner
1124 
1125   Not Collective but `KSP` returned is parallel if `PC` was parallel
1126 
1127   Input Parameter:
1128 . pc - the preconditioner context
1129 
1130   Output Parameter:
1131 . ksp - the `KSP` solver
1132 
1133   Level: advanced
1134 
1135   Notes:
1136   You must call `KSPSetUp()` before calling `PCBJKOKKOSGetKSP()`.
1137 
1138   If the `PC` is not a `PCBJKOKKOS` object it raises an error
1139 
1140 .seealso: [](ch_ksp), `PCBJKOKKOS`, `PCBJKOKKOSSetKSP()`
1141 @*/
PCBJKOKKOSGetKSP(PC pc,KSP * ksp)1142 PetscErrorCode PCBJKOKKOSGetKSP(PC pc, KSP *ksp)
1143 {
1144   PetscFunctionBegin;
1145   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1146   PetscAssertPointer(ksp, 2);
1147   PetscUseMethod(pc, "PCBJKOKKOSGetKSP_C", (PC, KSP *), (pc, ksp));
1148   PetscFunctionReturn(PETSC_SUCCESS);
1149 }
1150 
PCPostSolve_BJKOKKOS(PC pc,KSP ksp,Vec b,Vec x)1151 static PetscErrorCode PCPostSolve_BJKOKKOS(PC pc, KSP ksp, Vec b, Vec x)
1152 {
1153   PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
1154 
1155   PetscFunctionBegin;
1156   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1157   ksp->its = jac->max_nits;
1158   PetscFunctionReturn(PETSC_SUCCESS);
1159 }
1160 
PCPreSolve_BJKOKKOS(PC pc,KSP ksp,Vec b,Vec x)1161 static PetscErrorCode PCPreSolve_BJKOKKOS(PC pc, KSP ksp, Vec b, Vec x)
1162 {
1163   PC_PCBJKOKKOS *jac = (PC_PCBJKOKKOS *)pc->data;
1164 
1165   PetscFunctionBegin;
1166   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1167   jac->ksp->errorifnotconverged = ksp->errorifnotconverged;
1168   PetscFunctionReturn(PETSC_SUCCESS);
1169 }
1170 
1171 /*MC
1172      PCBJKOKKOS - A batched Krylov/block Jacobi solver that runs a solve of each diagaonl block of a block diagonal `MATSEQAIJ` in a Kokkos thread group
1173 
1174    Options Database Key:
1175 .  -pc_bjkokkos_ - options prefix for its `KSP` options
1176 
1177    Level: intermediate
1178 
1179    Note:
1180    For use with `-ksp_type preonly` to bypass any computation on the CPU
1181 
1182    Developer Notes:
1183    The entire Krylov (TFQMR or BICG) with diagonal preconditioning for each block of a block diagnaol matrix runs in a Kokkos thread group (eg, one block per SM on NVIDIA). It supports taking a non-block diagonal matrix but this is not tested. One should create an explicit block diagonal matrix and use that as the matrix for constructing the preconditioner in the outer `KSP` solver. Variable block size are supported and tested in src/ts/utils/dmplexlandau/tutorials/ex[1|2].c
1184 
1185 .seealso: [](ch_ksp), `PCCreate()`, `PCSetType()`, `PCType`, `PC`, `PCBJACOBI`,
1186           `PCSHELL`, `PCCOMPOSITE`, `PCSetUseAmat()`, `PCBJKOKKOSGetKSP()`
1187 M*/
1188 
PCCreate_BJKOKKOS(PC pc)1189 PETSC_EXTERN PetscErrorCode PCCreate_BJKOKKOS(PC pc)
1190 {
1191   PC_PCBJKOKKOS *jac;
1192 
1193   PetscFunctionBegin;
1194   PetscCall(PetscNew(&jac));
1195   pc->data = (void *)jac;
1196 
1197   jac->ksp              = NULL;
1198   jac->vec_diag         = NULL;
1199   jac->d_bid_eqOffset_k = NULL;
1200   jac->d_idiag_k        = NULL;
1201   jac->d_isrow_k        = NULL;
1202   jac->d_isicol_k       = NULL;
1203   jac->nBlocks          = 1;
1204   jac->max_nits         = 0;
1205 
1206   PetscCall(PetscMemzero(pc->ops, sizeof(struct _PCOps)));
1207   pc->ops->apply          = PCApply_BJKOKKOS;
1208   pc->ops->applytranspose = NULL;
1209   pc->ops->setup          = PCSetUp_BJKOKKOS;
1210   pc->ops->reset          = PCReset_BJKOKKOS;
1211   pc->ops->destroy        = PCDestroy_BJKOKKOS;
1212   pc->ops->setfromoptions = PCSetFromOptions_BJKOKKOS;
1213   pc->ops->view           = PCView_BJKOKKOS;
1214   pc->ops->postsolve      = PCPostSolve_BJKOKKOS;
1215   pc->ops->presolve       = PCPreSolve_BJKOKKOS;
1216 
1217   jac->rowOffsets   = NULL;
1218   jac->colIndices   = NULL;
1219   jac->batch_b      = NULL;
1220   jac->batch_x      = NULL;
1221   jac->batch_values = NULL;
1222 
1223   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCBJKOKKOSGetKSP_C", PCBJKOKKOSGetKSP_BJKOKKOS));
1224   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCBJKOKKOSSetKSP_C", PCBJKOKKOSSetKSP_BJKOKKOS));
1225   PetscFunctionReturn(PETSC_SUCCESS);
1226 }
1227