xref: /libCEED/include/ceed/jit-source/cuda/cuda-shared-basis-tensor-at-points.h (revision 9e1d4b8291fc4f4e19d20cdfdecd866260d0e6d2)
1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for CUDA tensor product basis with AtPoints evaluation
10 #include <ceed/types.h>
11 
12 #include "cuda-shared-basis-read-write-templates.h"
13 #include "cuda-shared-basis-tensor-at-points-templates.h"
14 #include "cuda-shared-basis-tensor-templates.h"
15 
16 //------------------------------------------------------------------------------
17 // Tensor Basis Kernels AtPoints
18 //------------------------------------------------------------------------------
19 
20 //------------------------------------------------------------------------------
21 // Interp
22 //------------------------------------------------------------------------------
23 extern "C" __global__ void InterpAtPoints(const CeedInt num_elem, const CeedScalar *__restrict__ c_B, const CeedInt *__restrict__ points_per_elem,
24                                           const CeedScalar *__restrict__ d_X, const CeedScalar *__restrict__ d_U, CeedScalar *__restrict__ d_V) {
25   extern __shared__ CeedScalar slice[];
26 
27   SharedData_Cuda data;
28   data.t_id_x = threadIdx.x;
29   data.t_id_y = threadIdx.y;
30   data.t_id_z = threadIdx.z;
31   data.t_id   = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x;
32   data.slice  = slice + data.t_id_z * T_1D * (BASIS_DIM > 1 ? T_1D : 1);
33 
34   CeedScalar r_U[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_P_1D : 1)];
35   CeedScalar r_C[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
36   CeedScalar r_V[BASIS_NUM_COMP];
37 
38   // Apply basis element by element
39   for (CeedInt elem = blockIdx.x * blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x * blockDim.z) {
40     // Map to coefficients
41     if (BASIS_DIM == 1) {
42       ReadElementStrided1d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * num_elem, BASIS_P_1D, d_U, r_U);
43       Interp1d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, c_B, r_C);
44     } else if (BASIS_DIM == 2) {
45       ReadElementStrided2d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * BASIS_P_1D * num_elem, BASIS_P_1D * BASIS_P_1D, d_U, r_U);
46       InterpTensor2d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, c_B, r_C);
47     } else if (BASIS_DIM == 3) {
48       ReadElementStrided3d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * BASIS_P_1D * BASIS_P_1D * num_elem,
49                                                        BASIS_P_1D * BASIS_P_1D * BASIS_P_1D, d_U, r_U);
50       InterpTensor3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, c_B, r_C);
51     }
52 
53     // Map to points
54     const CeedInt bound = (blockDim.x * blockDim.y) * ceil(1.0 * BASIS_NUM_PTS / (blockDim.x * blockDim.y));
55 
56     for (CeedInt i = threadIdx.x + threadIdx.y * blockDim.x; i < bound; i += blockDim.x * blockDim.y) {
57       const CeedInt p = i % BASIS_NUM_PTS;
58       CeedScalar    r_X[BASIS_DIM];
59 
60       for (CeedInt d = 0; d < BASIS_DIM; d++) {
61         r_X[d] = d_X[elem * BASIS_NUM_PTS + num_elem * BASIS_NUM_PTS * d + p];
62       }
63       if (BASIS_DIM == 1) {
64         InterpAtPoints1d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_C, r_X, r_V);
65       } else if (BASIS_DIM == 2) {
66         InterpAtPoints2d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_C, r_X, r_V);
67       } else if (BASIS_DIM == 3) {
68         InterpAtPoints3d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_C, r_X, r_V);
69       }
70       for (CeedInt j = 0; j < BASIS_NUM_COMP; j++) {
71         if (i < BASIS_NUM_PTS) d_V[elem * BASIS_NUM_PTS + num_elem * BASIS_NUM_PTS * j + i] = r_V[j];
72       }
73     }
74   }
75 }
76 
77 extern "C" __global__ void InterpTransposeAtPoints(const CeedInt num_elem, const CeedScalar *__restrict__ c_B,
78                                                    const CeedInt *__restrict__ points_per_elem, const CeedScalar *__restrict__ d_X,
79                                                    const CeedScalar *__restrict__ d_U, CeedScalar *__restrict__ d_V) {
80   extern __shared__ CeedScalar slice[];
81 
82   SharedData_Cuda data;
83   data.t_id_x = threadIdx.x;
84   data.t_id_y = threadIdx.y;
85   data.t_id_z = threadIdx.z;
86   data.t_id   = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x;
87   data.slice  = slice + data.t_id_z * T_1D * (BASIS_DIM > 1 ? T_1D : 1);
88 
89   CeedScalar r_U[BASIS_NUM_COMP];
90   CeedScalar r_C[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
91   CeedScalar r_V[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
92 
93   // Apply basis element by element
94   for (CeedInt elem = blockIdx.x * blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x * blockDim.z) {
95     // Clear register
96     for (CeedInt i = 0; i < BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1); i++) r_C[i] = 0.0;
97 
98     // Map from points
99     const CeedInt bound = (blockDim.x * blockDim.y) * ceil(1.0 * BASIS_NUM_PTS / (blockDim.x * blockDim.y));
100 
101     for (CeedInt i = threadIdx.x + threadIdx.y * blockDim.x; i < bound; i += blockDim.x * blockDim.y) {
102       const CeedInt p = i % BASIS_NUM_PTS;
103       CeedScalar    r_X[BASIS_DIM];
104 
105       for (CeedInt d = 0; d < BASIS_DIM; d++) {
106         r_X[d] = d_X[elem * BASIS_NUM_PTS + num_elem * BASIS_NUM_PTS * d + p];
107       }
108       for (CeedInt j = 0; j < BASIS_NUM_COMP; j++) {
109         if (i < points_per_elem[elem]) r_U[j] = d_U[elem * BASIS_NUM_PTS + num_elem * BASIS_NUM_PTS * j + p];
110         else r_U[j] = 0.0;
111       }
112       if (BASIS_DIM == 1) {
113         InterpTransposeAtPoints1d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_U, r_X, r_C);
114       } else if (BASIS_DIM == 2) {
115         InterpTransposeAtPoints2d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_U, r_X, r_C);
116       } else if (BASIS_DIM == 3) {
117         InterpTransposeAtPoints3d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_U, r_X, r_C);
118       }
119     }
120     __syncthreads();
121 
122     // Map from coefficients
123     if (BASIS_DIM == 1) {
124       InterpTranspose1d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_C, c_B, r_V);
125       SumElementStrided1d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * num_elem, BASIS_P_1D, r_V, d_V);
126     } else if (BASIS_DIM == 2) {
127       InterpTransposeTensor2d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_C, c_B, r_V);
128       SumElementStrided2d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * BASIS_P_1D * num_elem, BASIS_P_1D * BASIS_P_1D, r_V, d_V);
129     } else if (BASIS_DIM == 3) {
130       InterpTransposeTensor3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_C, c_B, r_V);
131       SumElementStrided3d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * BASIS_P_1D * BASIS_P_1D * num_elem,
132                                                       BASIS_P_1D * BASIS_P_1D * BASIS_P_1D, r_V, d_V);
133     }
134   }
135 }
136 
137 //------------------------------------------------------------------------------
138 // Grad
139 //------------------------------------------------------------------------------
140 extern "C" __global__ void GradAtPoints(const CeedInt num_elem, const CeedScalar *__restrict__ c_B, const CeedInt *__restrict__ points_per_elem,
141                                         const CeedScalar *__restrict__ d_X, const CeedScalar *__restrict__ d_U, CeedScalar *__restrict__ d_V) {
142   extern __shared__ CeedScalar slice[];
143 
144   SharedData_Cuda data;
145   data.t_id_x = threadIdx.x;
146   data.t_id_y = threadIdx.y;
147   data.t_id_z = threadIdx.z;
148   data.t_id   = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x;
149   data.slice  = slice + data.t_id_z * T_1D * (BASIS_DIM > 1 ? T_1D : 1);
150 
151   CeedScalar r_U[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_P_1D : 1)];
152   CeedScalar r_C[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
153   CeedScalar r_V[BASIS_NUM_COMP * BASIS_DIM];
154 
155   // Apply basis element by element
156   for (CeedInt elem = blockIdx.x * blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x * blockDim.z) {
157     // Map to coefficients
158     if (BASIS_DIM == 1) {
159       ReadElementStrided1d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * num_elem, BASIS_P_1D, d_U, r_U);
160       Interp1d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, c_B, r_C);
161     } else if (BASIS_DIM == 2) {
162       ReadElementStrided2d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * BASIS_P_1D * num_elem, BASIS_P_1D * BASIS_P_1D, d_U, r_U);
163       InterpTensor2d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, c_B, r_C);
164     } else if (BASIS_DIM == 3) {
165       ReadElementStrided3d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * BASIS_P_1D * BASIS_P_1D * num_elem,
166                                                        BASIS_P_1D * BASIS_P_1D * BASIS_P_1D, d_U, r_U);
167       InterpTensor3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, c_B, r_C);
168     }
169 
170     // Map to points
171     const CeedInt bound = (blockDim.x * blockDim.y) * ceil(1.0 * BASIS_NUM_PTS / (blockDim.x * blockDim.y));
172 
173     for (CeedInt i = threadIdx.x + threadIdx.y * blockDim.x; i < bound; i += blockDim.x * blockDim.y) {
174       const CeedInt p = i % BASIS_NUM_PTS;
175       CeedScalar    r_X[BASIS_DIM];
176 
177       for (CeedInt d = 0; d < BASIS_DIM; d++) {
178         r_X[d] = d_X[elem * BASIS_NUM_PTS + num_elem * BASIS_NUM_PTS * d + p];
179       }
180       if (BASIS_DIM == 1) {
181         GradAtPoints1d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_C, r_X, r_V);
182       } else if (BASIS_DIM == 2) {
183         GradAtPoints2d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_C, r_X, r_V);
184       } else if (BASIS_DIM == 3) {
185         GradAtPoints3d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_C, r_X, r_V);
186       }
187       for (CeedInt j = 0; j < BASIS_NUM_COMP * BASIS_DIM; j++) {
188         if (i < BASIS_NUM_PTS) d_V[elem * BASIS_NUM_PTS + num_elem * BASIS_NUM_PTS * j + i] = r_V[j];
189       }
190     }
191   }
192 }
193 
194 extern "C" __global__ void GradTransposeAtPoints(const CeedInt num_elem, const CeedScalar *__restrict__ c_B,
195                                                  const CeedInt *__restrict__ points_per_elem, const CeedScalar *__restrict__ d_X,
196                                                  const CeedScalar *__restrict__ d_U, CeedScalar *__restrict__ d_V) {
197   extern __shared__ CeedScalar slice[];
198 
199   SharedData_Cuda data;
200   data.t_id_x = threadIdx.x;
201   data.t_id_y = threadIdx.y;
202   data.t_id_z = threadIdx.z;
203   data.t_id   = threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x;
204   data.slice  = slice + data.t_id_z * T_1D * (BASIS_DIM > 1 ? T_1D : 1);
205 
206   CeedScalar r_U[BASIS_NUM_COMP * BASIS_DIM];
207   CeedScalar r_C[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
208   CeedScalar r_V[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
209 
210   // Apply basis element by element
211   for (CeedInt elem = blockIdx.x * blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x * blockDim.z) {
212     // Clear register
213     for (CeedInt i = 0; i < BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1); i++) r_C[i] = 0.0;
214 
215     // Map from points
216     const CeedInt bound = (blockDim.x * blockDim.y) * ceil(1.0 * BASIS_NUM_PTS / (blockDim.x * blockDim.y));
217 
218     for (CeedInt i = threadIdx.x + threadIdx.y * blockDim.x; i < bound; i += blockDim.x * blockDim.y) {
219       const CeedInt p = i % BASIS_NUM_PTS;
220       CeedScalar    r_X[BASIS_DIM];
221 
222       for (CeedInt d = 0; d < BASIS_DIM; d++) {
223         r_X[d] = d_X[elem * BASIS_NUM_PTS + num_elem * BASIS_NUM_PTS * d + p];
224       }
225       for (CeedInt j = 0; j < BASIS_NUM_COMP * BASIS_DIM; j++) {
226         if (i < points_per_elem[elem]) r_U[j] = d_U[elem * BASIS_NUM_PTS + num_elem * BASIS_NUM_PTS * j + p];
227         else r_U[j] = 0.0;
228       }
229       if (BASIS_DIM == 1) {
230         GradTransposeAtPoints1d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_U, r_X, r_C);
231       } else if (BASIS_DIM == 2) {
232         GradTransposeAtPoints2d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_U, r_X, r_C);
233       } else if (BASIS_DIM == 3) {
234         GradTransposeAtPoints3d<BASIS_NUM_COMP, BASIS_NUM_PTS, BASIS_Q_1D>(data, i, r_U, r_X, r_C);
235       }
236     }
237     __syncthreads();
238 
239     // Map from coefficients
240     if (BASIS_DIM == 1) {
241       InterpTranspose1d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_C, c_B, r_V);
242       SumElementStrided1d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * num_elem, BASIS_P_1D, r_V, d_V);
243     } else if (BASIS_DIM == 2) {
244       InterpTransposeTensor2d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_C, c_B, r_V);
245       SumElementStrided2d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * BASIS_P_1D * num_elem, BASIS_P_1D * BASIS_P_1D, r_V, d_V);
246     } else if (BASIS_DIM == 3) {
247       InterpTransposeTensor3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_C, c_B, r_V);
248       SumElementStrided3d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D * BASIS_P_1D * BASIS_P_1D * num_elem,
249                                                       BASIS_P_1D * BASIS_P_1D * BASIS_P_1D, r_V, d_V);
250     }
251   }
252 }
253