xref: /libCEED/include/ceed/jit-source/hip/hip-gen-templates.h (revision 9bc663991d6482bcb1d60b1f116148f11db83fa1)
1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for HIP backend macro and type definitions for JiT source
10 #include <ceed/types.h>
11 
12 //------------------------------------------------------------------------------
13 // Load matrices for basis actions
14 //------------------------------------------------------------------------------
15 template <int P, int Q>
16 inline __device__ void loadMatrix(SharedData_Hip &data, const CeedScalar *__restrict__ d_B, CeedScalar *B) {
17   for (CeedInt i = data.t_id; i < P * Q; i += blockDim.x * blockDim.y * blockDim.z) B[i] = d_B[i];
18 }
19 
20 //------------------------------------------------------------------------------
21 // 1D
22 //------------------------------------------------------------------------------
23 
24 //------------------------------------------------------------------------------
25 // L-vector -> E-vector, offsets provided
26 //------------------------------------------------------------------------------
27 template <int NUM_COMP, int COMP_STRIDE, int P_1d>
28 inline __device__ void readDofsOffset1d(SharedData_Hip &data, const CeedInt num_nodes, const CeedInt elem, const CeedInt *__restrict__ indices,
29                                         const CeedScalar *__restrict__ d_u, CeedScalar *__restrict__ r_u) {
30   if (data.t_id_x < P_1d) {
31     const CeedInt node = data.t_id_x;
32     const CeedInt ind  = indices[node + elem * P_1d];
33 
34     for (CeedInt comp = 0; comp < NUM_COMP; comp++) r_u[comp] = d_u[ind + COMP_STRIDE * comp];
35   }
36 }
37 
38 //------------------------------------------------------------------------------
39 // L-vector -> E-vector, strided
40 //------------------------------------------------------------------------------
41 template <int NUM_COMP, int P_1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
42 inline __device__ void readDofsStrided1d(SharedData_Hip &data, const CeedInt elem, const CeedScalar *__restrict__ d_u, CeedScalar *__restrict__ r_u) {
43   if (data.t_id_x < P_1d) {
44     const CeedInt node = data.t_id_x;
45     const CeedInt ind  = node * STRIDES_NODE + elem * STRIDES_ELEM;
46 
47     for (CeedInt comp = 0; comp < NUM_COMP; comp++) r_u[comp] = d_u[ind + comp * STRIDES_COMP];
48   }
49 }
50 
51 //------------------------------------------------------------------------------
52 // E-vector -> L-vector, offsets provided
53 //------------------------------------------------------------------------------
54 template <int NUM_COMP, int COMP_STRIDE, int P_1d>
55 inline __device__ void writeDofsOffset1d(SharedData_Hip &data, const CeedInt num_nodes, const CeedInt elem, const CeedInt *__restrict__ indices,
56                                          const CeedScalar *__restrict__ r_v, CeedScalar *__restrict__ d_v) {
57   if (data.t_id_x < P_1d) {
58     const CeedInt node = data.t_id_x;
59     const CeedInt ind  = indices[node + elem * P_1d];
60 
61     for (CeedInt comp = 0; comp < NUM_COMP; comp++) atomicAdd(&d_v[ind + COMP_STRIDE * comp], r_v[comp]);
62   }
63 }
64 
65 //------------------------------------------------------------------------------
66 // E-vector -> L-vector, strided
67 //------------------------------------------------------------------------------
68 template <int NUM_COMP, int P_1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
69 inline __device__ void writeDofsStrided1d(SharedData_Hip &data, const CeedInt elem, const CeedScalar *__restrict__ r_v,
70                                           CeedScalar *__restrict__ d_v) {
71   if (data.t_id_x < P_1d) {
72     const CeedInt node = data.t_id_x;
73     const CeedInt ind  = node * STRIDES_NODE + elem * STRIDES_ELEM;
74 
75     for (CeedInt comp = 0; comp < NUM_COMP; comp++) d_v[ind + comp * STRIDES_COMP] += r_v[comp];
76   }
77 }
78 
79 //------------------------------------------------------------------------------
80 // 2D
81 //------------------------------------------------------------------------------
82 
83 //------------------------------------------------------------------------------
84 // L-vector -> E-vector, offsets provided
85 //------------------------------------------------------------------------------
86 template <int NUM_COMP, int COMP_STRIDE, int P_1d>
87 inline __device__ void readDofsOffset2d(SharedData_Hip &data, const CeedInt num_nodes, const CeedInt elem, const CeedInt *__restrict__ indices,
88                                         const CeedScalar *__restrict__ d_u, CeedScalar *__restrict__ r_u) {
89   if (data.t_id_x < P_1d && data.t_id_y < P_1d) {
90     const CeedInt node = data.t_id_x + data.t_id_y * P_1d;
91     const CeedInt ind  = indices[node + elem * P_1d * P_1d];
92 
93     for (CeedInt comp = 0; comp < NUM_COMP; comp++) r_u[comp] = d_u[ind + COMP_STRIDE * comp];
94   }
95 }
96 
97 //------------------------------------------------------------------------------
98 // L-vector -> E-vector, strided
99 //------------------------------------------------------------------------------
100 template <int NUM_COMP, int P_1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
101 inline __device__ void readDofsStrided2d(SharedData_Hip &data, const CeedInt elem, const CeedScalar *__restrict__ d_u, CeedScalar *__restrict__ r_u) {
102   if (data.t_id_x < P_1d && data.t_id_y < P_1d) {
103     const CeedInt node = data.t_id_x + data.t_id_y * P_1d;
104     const CeedInt ind  = node * STRIDES_NODE + elem * STRIDES_ELEM;
105 
106     for (CeedInt comp = 0; comp < NUM_COMP; comp++) r_u[comp] = d_u[ind + comp * STRIDES_COMP];
107   }
108 }
109 
110 //------------------------------------------------------------------------------
111 // E-vector -> L-vector, offsets provided
112 //------------------------------------------------------------------------------
113 template <int NUM_COMP, int COMP_STRIDE, int P_1d>
114 inline __device__ void writeDofsOffset2d(SharedData_Hip &data, const CeedInt num_nodes, const CeedInt elem, const CeedInt *__restrict__ indices,
115                                          const CeedScalar *__restrict__ r_v, CeedScalar *__restrict__ d_v) {
116   if (data.t_id_x < P_1d && data.t_id_y < P_1d) {
117     const CeedInt node = data.t_id_x + data.t_id_y * P_1d;
118     const CeedInt ind  = indices[node + elem * P_1d * P_1d];
119 
120     for (CeedInt comp = 0; comp < NUM_COMP; comp++) atomicAdd(&d_v[ind + COMP_STRIDE * comp], r_v[comp]);
121   }
122 }
123 
124 //------------------------------------------------------------------------------
125 // E-vector -> L-vector, strided
126 //------------------------------------------------------------------------------
127 template <int NUM_COMP, int P_1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
128 inline __device__ void writeDofsStrided2d(SharedData_Hip &data, const CeedInt elem, const CeedScalar *__restrict__ r_v,
129                                           CeedScalar *__restrict__ d_v) {
130   if (data.t_id_x < P_1d && data.t_id_y < P_1d) {
131     const CeedInt node = data.t_id_x + data.t_id_y * P_1d;
132     const CeedInt ind  = node * STRIDES_NODE + elem * STRIDES_ELEM;
133 
134     for (CeedInt comp = 0; comp < NUM_COMP; comp++) d_v[ind + comp * STRIDES_COMP] += r_v[comp];
135   }
136 }
137 
138 //------------------------------------------------------------------------------
139 // 3D
140 //------------------------------------------------------------------------------
141 
142 //------------------------------------------------------------------------------
143 // L-vector -> E-vector, offsets provided
144 //------------------------------------------------------------------------------
145 // TODO: remove "Dofs" and "Quads" in the following function names?
146 //   - readDofsOffset3d -> readOffset3d ?
147 //   - readDofsStrided3d -> readStrided3d ?
148 //   - readSliceQuadsOffset3d -> readSliceOffset3d ?
149 //   - readSliceQuadsStrided3d -> readSliceStrided3d ?
150 //   - writeDofsOffset3d -> writeOffset3d ?
151 //   - writeDofsStrided3d -> writeStrided3d ?
152 template <int NUM_COMP, int COMP_STRIDE, int P_1d>
153 inline __device__ void readDofsOffset3d(SharedData_Hip &data, const CeedInt num_nodes, const CeedInt elem, const CeedInt *__restrict__ indices,
154                                         const CeedScalar *__restrict__ d_u, CeedScalar *__restrict__ r_u) {
155   if (data.t_id_x < P_1d && data.t_id_y < P_1d)
156     for (CeedInt z = 0; z < P_1d; z++) {
157       const CeedInt node = data.t_id_x + data.t_id_y * P_1d + z * P_1d * P_1d;
158       const CeedInt ind  = indices[node + elem * P_1d * P_1d * P_1d];
159 
160       for (CeedInt comp = 0; comp < NUM_COMP; comp++) r_u[z + comp * P_1d] = d_u[ind + COMP_STRIDE * comp];
161     }
162 }
163 
164 //------------------------------------------------------------------------------
165 // L-vector -> E-vector, strided
166 //------------------------------------------------------------------------------
167 template <int NUM_COMP, int P_1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
168 inline __device__ void readDofsStrided3d(SharedData_Hip &data, const CeedInt elem, const CeedScalar *__restrict__ d_u, CeedScalar *__restrict__ r_u) {
169   if (data.t_id_x < P_1d && data.t_id_y < P_1d)
170     for (CeedInt z = 0; z < P_1d; z++) {
171       const CeedInt node = data.t_id_x + data.t_id_y * P_1d + z * P_1d * P_1d;
172       const CeedInt ind  = node * STRIDES_NODE + elem * STRIDES_ELEM;
173 
174       for (CeedInt comp = 0; comp < NUM_COMP; comp++) r_u[z + comp * P_1d] = d_u[ind + comp * STRIDES_COMP];
175     }
176 }
177 
178 //------------------------------------------------------------------------------
179 // E-vector -> Q-vector, offests provided
180 //------------------------------------------------------------------------------
181 template <int NUM_COMP, int COMP_STRIDE, int Q_1d>
182 inline __device__ void readSliceQuadsOffset3d(SharedData_Hip &data, const CeedInt nquads, const CeedInt elem, const CeedInt q,
183                                               const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ d_u, CeedScalar *__restrict__ r_u) {
184   if (data.t_id_x < Q_1d && data.t_id_y < Q_1d) {
185     const CeedInt node = data.t_id_x + data.t_id_y * Q_1d + q * Q_1d * Q_1d;
186     const CeedInt ind  = indices[node + elem * Q_1d * Q_1d * Q_1d];
187 
188     for (CeedInt comp = 0; comp < NUM_COMP; comp++) r_u[comp] = d_u[ind + COMP_STRIDE * comp];
189   }
190 }
191 
192 //------------------------------------------------------------------------------
193 // E-vector -> Q-vector, strided
194 //------------------------------------------------------------------------------
195 template <int NUM_COMP, int Q_1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
196 inline __device__ void readSliceQuadsStrided3d(SharedData_Hip &data, const CeedInt elem, const CeedInt q, const CeedScalar *__restrict__ d_u,
197                                                CeedScalar *__restrict__ r_u) {
198   if (data.t_id_x < Q_1d && data.t_id_y < Q_1d) {
199     const CeedInt node = data.t_id_x + data.t_id_y * Q_1d + q * Q_1d * Q_1d;
200     const CeedInt ind  = node * STRIDES_NODE + elem * STRIDES_ELEM;
201 
202     for (CeedInt comp = 0; comp < NUM_COMP; comp++) r_u[comp] = d_u[ind + comp * STRIDES_COMP];
203   }
204 }
205 
206 //------------------------------------------------------------------------------
207 // E-vector -> L-vector, offsets provided
208 //------------------------------------------------------------------------------
209 template <int NUM_COMP, int COMP_STRIDE, int P_1d>
210 inline __device__ void writeDofsOffset3d(SharedData_Hip &data, const CeedInt num_nodes, const CeedInt elem, const CeedInt *__restrict__ indices,
211                                          const CeedScalar *__restrict__ r_v, CeedScalar *__restrict__ d_v) {
212   if (data.t_id_x < P_1d && data.t_id_y < P_1d)
213     for (CeedInt z = 0; z < P_1d; z++) {
214       const CeedInt node = data.t_id_x + data.t_id_y * P_1d + z * P_1d * P_1d;
215       const CeedInt ind  = indices[node + elem * P_1d * P_1d * P_1d];
216 
217       for (CeedInt comp = 0; comp < NUM_COMP; comp++) atomicAdd(&d_v[ind + COMP_STRIDE * comp], r_v[z + comp * P_1d]);
218     }
219 }
220 
221 //------------------------------------------------------------------------------
222 // E-vector -> L-vector, strided
223 //------------------------------------------------------------------------------
224 template <int NUM_COMP, int P_1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
225 inline __device__ void writeDofsStrided3d(SharedData_Hip &data, const CeedInt elem, const CeedScalar *__restrict__ r_v,
226                                           CeedScalar *__restrict__ d_v) {
227   if (data.t_id_x < P_1d && data.t_id_y < P_1d)
228     for (CeedInt z = 0; z < P_1d; z++) {
229       const CeedInt node = data.t_id_x + data.t_id_y * P_1d + z * P_1d * P_1d;
230       const CeedInt ind  = node * STRIDES_NODE + elem * STRIDES_ELEM;
231 
232       for (CeedInt comp = 0; comp < NUM_COMP; comp++) d_v[ind + comp * STRIDES_COMP] += r_v[z + comp * P_1d];
233     }
234 }
235 
236 //------------------------------------------------------------------------------
237 // 3D collocated derivatives computation
238 //------------------------------------------------------------------------------
239 template <int NUM_COMP, int Q_1d>
240 inline __device__ void gradCollo3d(SharedData_Hip &data, const CeedInt q, const CeedScalar *__restrict__ r_U, const CeedScalar *c_G,
241                                    CeedScalar *__restrict__ r_V) {
242   if (data.t_id_x < Q_1d && data.t_id_y < Q_1d) {
243     for (CeedInt comp = 0; comp < NUM_COMP; comp++) {
244       data.slice[data.t_id_x + data.t_id_y * T_1D] = r_U[q + comp * Q_1d];
245       __syncthreads();
246       // X derivative
247       r_V[comp + 0 * NUM_COMP] = 0.0;
248       for (CeedInt i = 0; i < Q_1d; i++)
249         r_V[comp + 0 * NUM_COMP] += c_G[i + data.t_id_x * Q_1d] * data.slice[i + data.t_id_y * T_1D];  // Contract x direction (X derivative)
250       // Y derivative
251       r_V[comp + 1 * NUM_COMP] = 0.0;
252       for (CeedInt i = 0; i < Q_1d; i++)
253         r_V[comp + 1 * NUM_COMP] += c_G[i + data.t_id_y * Q_1d] * data.slice[data.t_id_x + i * T_1D];  // Contract y direction (Y derivative)
254       // Z derivative
255       r_V[comp + 2 * NUM_COMP] = 0.0;
256       for (CeedInt i = 0; i < Q_1d; i++) r_V[comp + 2 * NUM_COMP] += c_G[i + q * Q_1d] * r_U[i + comp * Q_1d];  // Contract z direction (Z derivative)
257       __syncthreads();
258     }
259   }
260 }
261 
262 //------------------------------------------------------------------------------
263 // 3D collocated derivatives transpose
264 //------------------------------------------------------------------------------
265 template <int NUM_COMP, int Q_1d>
266 inline __device__ void gradColloTranspose3d(SharedData_Hip &data, const CeedInt q, const CeedScalar *__restrict__ r_U, const CeedScalar *c_G,
267                                             CeedScalar *__restrict__ r_V) {
268   if (data.t_id_x < Q_1d && data.t_id_y < Q_1d) {
269     for (CeedInt comp = 0; comp < NUM_COMP; comp++) {
270       // X derivative
271       data.slice[data.t_id_x + data.t_id_y * T_1D] = r_U[comp + 0 * NUM_COMP];
272       __syncthreads();
273       for (CeedInt i = 0; i < Q_1d; i++)
274         r_V[q + comp * Q_1d] += c_G[data.t_id_x + i * Q_1d] * data.slice[i + data.t_id_y * T_1D];  // Contract x direction (X derivative)
275       __syncthreads();
276       // Y derivative
277       data.slice[data.t_id_x + data.t_id_y * T_1D] = r_U[comp + 1 * NUM_COMP];
278       __syncthreads();
279       for (CeedInt i = 0; i < Q_1d; i++)
280         r_V[q + comp * Q_1d] += c_G[data.t_id_y + i * Q_1d] * data.slice[data.t_id_x + i * T_1D];  // Contract y direction (Y derivative)
281       __syncthreads();
282       // Z derivative
283       for (CeedInt i = 0; i < Q_1d; i++)
284         r_V[i + comp * Q_1d] += c_G[i + q * Q_1d] * r_U[comp + 2 * NUM_COMP];  // PARTIAL contract z direction (Z derivative)
285     }
286   }
287 }
288