xref: /libCEED/include/ceed/jit-source/hip/hip-shared-basis-tensor.h (revision 0be03a92683d319639505fd4b3dce80b3bae318f)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for HIP shared memory tensor product basis
10 #ifndef _ceed_hip_shared_basis_tensor_h
11 #define _ceed_hip_shared_basis_tensor_h
12 
13 #include <ceed.h>
14 #include "hip-shared-basis-read-write-templates.h"
15 #include "hip-shared-basis-tensor-templates.h"
16 
17 //------------------------------------------------------------------------------
18 // Interp kernel by dim
19 //------------------------------------------------------------------------------
20 extern "C"  __launch_bounds__(BASIS_INTERP_BLOCK_SIZE)
21             __global__ void Interp(const CeedInt num_elem,
22                                    const CeedScalar *d_interp_1d,
23                                    const CeedScalar *__restrict__ d_U,
24                                    CeedScalar *__restrict__ d_V) {
25   extern __shared__ CeedScalar slice[];
26   // load interp_1d into shared memory
27   __shared__ CeedScalar s_B[BASIS_P_1D*BASIS_Q_1D];
28   loadMatrix<BASIS_P_1D*BASIS_Q_1D>(d_interp_1d, s_B);
29   __syncthreads();
30 
31   SharedData_Hip data;
32   data.t_id_x = threadIdx.x;
33   data.t_id_y = threadIdx.y;
34   data.t_id_z = threadIdx.z;
35   data.t_id  = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;
36   data.slice = slice + data.t_id_z * T_1D * (BASIS_DIM > 1 ? T_1D : 1);
37 
38   CeedScalar r_U[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_P_1D : 1)];
39   CeedScalar r_V[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
40 
41   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {
42     if (BASIS_DIM == 1) {
43       ReadElementStrided1d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*num_elem, BASIS_P_1D, d_U, r_U);
44       Interp1d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, r_V);
45       WriteElementStrided1d<BASIS_NUM_COMP, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*num_elem, BASIS_Q_1D, r_V, d_V);
46     } else if (BASIS_DIM == 2) {
47       ReadElementStrided2d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*BASIS_P_1D*num_elem, BASIS_P_1D*BASIS_P_1D, d_U, r_U);
48       InterpTensor2d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, r_V);
49       WriteElementStrided2d<BASIS_NUM_COMP, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D, r_V, d_V);
50     } else if (BASIS_DIM == 3) {
51       ReadElementStrided3d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*BASIS_P_1D*BASIS_P_1D*num_elem, BASIS_P_1D*BASIS_P_1D*BASIS_P_1D, d_U, r_U);
52       InterpTensor3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, r_V);
53       WriteElementStrided3d<BASIS_NUM_COMP, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D, r_V, d_V);
54     }
55   }
56 }
57 
58 extern "C"  __launch_bounds__(BASIS_INTERP_BLOCK_SIZE)
59             __global__ void InterpTranspose(const CeedInt num_elem,
60                                             const CeedScalar *d_interp_1d,
61                                             const CeedScalar *__restrict__ d_U,
62                                             CeedScalar *__restrict__ d_V) {
63   extern __shared__ CeedScalar slice[];
64   // load interp_1d into shared memory
65   __shared__ CeedScalar s_B[BASIS_P_1D*BASIS_Q_1D];
66   loadMatrix<BASIS_P_1D*BASIS_Q_1D>(d_interp_1d, s_B);
67   __syncthreads();
68 
69   SharedData_Hip data;
70   data.t_id_x = threadIdx.x;
71   data.t_id_y = threadIdx.y;
72   data.t_id_z = threadIdx.z;
73   data.t_id  = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;
74   data.slice = slice + data.t_id_z * T_1D * (BASIS_DIM > 1 ? T_1D : 1);
75 
76   CeedScalar r_U[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
77   CeedScalar r_V[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_P_1D : 1)];
78 
79   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {
80     if (BASIS_DIM == 1) {
81       ReadElementStrided1d<BASIS_NUM_COMP, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*num_elem, BASIS_Q_1D, d_U, r_U);
82       InterpTranspose1d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, r_V);
83       WriteElementStrided1d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*num_elem, BASIS_P_1D, r_V, d_V);
84     } else if (BASIS_DIM == 2) {
85       ReadElementStrided2d<BASIS_NUM_COMP, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D, d_U, r_U);
86       InterpTransposeTensor2d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, r_V);
87       WriteElementStrided2d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*BASIS_P_1D*num_elem, BASIS_P_1D*BASIS_P_1D, r_V, d_V);
88     } else if (BASIS_DIM == 3) {
89       ReadElementStrided3d<BASIS_NUM_COMP, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D, d_U, r_U);
90       InterpTransposeTensor3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, r_V);
91       WriteElementStrided3d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*BASIS_P_1D*BASIS_P_1D*num_elem, BASIS_P_1D*BASIS_P_1D*BASIS_P_1D, r_V, d_V);
92     }
93   }
94 }
95 
96 //------------------------------------------------------------------------------
97 // Grad kernel by dim
98 //------------------------------------------------------------------------------
99 extern "C" __launch_bounds__(BASIS_GRAD_BLOCK_SIZE)
100            __global__ void Grad(const CeedInt num_elem,
101                                 const CeedScalar *d_interp_1d,
102                                 const CeedScalar *d_grad_1d,
103                                 const CeedScalar *__restrict__ d_U,
104                                 CeedScalar *__restrict__ d_V) {
105   extern __shared__ CeedScalar slice[];
106   // load interp_1d and grad_1d into shared memory
107   __shared__ CeedScalar s_B[BASIS_P_1D*BASIS_Q_1D];
108   loadMatrix<BASIS_P_1D*BASIS_Q_1D>(d_interp_1d, s_B);
109   __shared__ CeedScalar s_G[BASIS_Q_1D * (BASIS_HAS_COLLOCATED_GRAD ? BASIS_Q_1D : BASIS_P_1D)];
110   loadMatrix<BASIS_Q_1D * (BASIS_HAS_COLLOCATED_GRAD ? BASIS_Q_1D : BASIS_P_1D)>(d_grad_1d, s_G);
111   __syncthreads();
112 
113   SharedData_Hip data;
114   data.t_id_x = threadIdx.x;
115   data.t_id_y = threadIdx.y;
116   data.t_id_z = threadIdx.z;
117   data.t_id  = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;
118   data.slice = slice + data.t_id_z * T_1D * (BASIS_DIM > 1 ? T_1D : 1);
119 
120   CeedScalar r_U[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_P_1D : 1)];
121   CeedScalar r_V[BASIS_NUM_COMP * BASIS_DIM * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
122 
123   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {
124     if (BASIS_DIM == 1) {
125       ReadElementStrided1d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*num_elem, BASIS_P_1D, d_U, r_U);
126       Grad1d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, s_G, r_V);
127       WriteElementStrided1d<BASIS_NUM_COMP, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*num_elem, BASIS_Q_1D, r_V, d_V);
128     } else if (BASIS_DIM == 2) {
129       ReadElementStrided2d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*BASIS_P_1D*num_elem, BASIS_P_1D*BASIS_P_1D, d_U, r_U);
130       GradTensor2d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, s_G, r_V);
131       WriteElementStrided2d<BASIS_NUM_COMP*BASIS_DIM, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D, r_V, d_V);
132     } else if (BASIS_DIM == 3) {
133       ReadElementStrided3d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*BASIS_P_1D*BASIS_P_1D*num_elem, BASIS_P_1D*BASIS_P_1D*BASIS_P_1D, d_U, r_U);
134       if (BASIS_HAS_COLLOCATED_GRAD) GradTensorCollocated3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, s_G, r_V);
135       else                           GradTensor3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, s_G, r_V);
136       WriteElementStrided3d<BASIS_NUM_COMP*BASIS_DIM, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D, r_V, d_V);
137     }
138   }
139 }
140 
141 extern "C" __launch_bounds__(BASIS_GRAD_BLOCK_SIZE)
142            __global__ void GradTranspose(const CeedInt num_elem,
143                                          const CeedScalar *d_interp_1d,
144                                          const CeedScalar *d_grad_1d,
145                                          const CeedScalar *__restrict__ d_U,
146                                          CeedScalar *__restrict__ d_V) {
147   extern __shared__ CeedScalar slice[];
148   // load interp_1d and grad_1d into shared memory
149   __shared__ CeedScalar s_B[BASIS_P_1D*BASIS_Q_1D];
150   loadMatrix<BASIS_P_1D*BASIS_Q_1D>(d_interp_1d, s_B);
151   __shared__ CeedScalar s_G[BASIS_Q_1D * (BASIS_HAS_COLLOCATED_GRAD ? BASIS_Q_1D : BASIS_P_1D)];
152   loadMatrix<BASIS_Q_1D * (BASIS_HAS_COLLOCATED_GRAD ? BASIS_Q_1D : BASIS_P_1D)>(d_grad_1d, s_G);
153   __syncthreads();
154 
155   SharedData_Hip data;
156   data.t_id_x = threadIdx.x;
157   data.t_id_y = threadIdx.y;
158   data.t_id_z = threadIdx.z;
159   data.t_id  = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;
160   data.slice = slice + data.t_id_z * T_1D * (BASIS_DIM > 1 ? T_1D : 1);
161 
162   CeedScalar r_U[BASIS_NUM_COMP * BASIS_DIM * (BASIS_DIM > 2 ? BASIS_Q_1D : 1)];
163   CeedScalar r_V[BASIS_NUM_COMP * (BASIS_DIM > 2 ? BASIS_P_1D : 1)];
164 
165   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {
166     if (BASIS_DIM == 1) {
167       ReadElementStrided1d<BASIS_NUM_COMP, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*num_elem, BASIS_Q_1D, d_U, r_U);
168       GradTranspose1d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, s_G, r_V);
169       WriteElementStrided1d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*num_elem, BASIS_P_1D, r_V, d_V);
170     } else if (BASIS_DIM == 2) {
171       ReadElementStrided2d<BASIS_NUM_COMP*BASIS_DIM, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D, d_U, r_U);
172       GradTransposeTensor2d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, s_G, r_V);
173       WriteElementStrided2d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*BASIS_P_1D*num_elem, BASIS_P_1D*BASIS_P_1D, r_V, d_V);
174     } else if (BASIS_DIM == 3) {
175       ReadElementStrided3d<BASIS_NUM_COMP*BASIS_DIM, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D, d_U, r_U);
176       if (BASIS_HAS_COLLOCATED_GRAD) GradTransposeTensorCollocated3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, s_G, r_V);
177       else                           GradTransposeTensor3d<BASIS_NUM_COMP, BASIS_P_1D, BASIS_Q_1D>(data, r_U, s_B, s_G, r_V);
178       WriteElementStrided3d<BASIS_NUM_COMP, BASIS_P_1D>(data, elem, 1, BASIS_P_1D*BASIS_P_1D*BASIS_P_1D*num_elem, BASIS_P_1D*BASIS_P_1D*BASIS_P_1D, r_V, d_V);
179     }
180   }
181 }
182 
183 //------------------------------------------------------------------------------
184 // Weight kernels by dim
185 //------------------------------------------------------------------------------
186 extern "C" __launch_bounds__(BASIS_WEIGHT_BLOCK_SIZE)
187            __global__ void Weight(const CeedInt num_elem,
188                                   const CeedScalar *__restrict__ q_weight_1d,
189                                   CeedScalar *__restrict__ d_W) {
190   extern __shared__ CeedScalar slice[];
191 
192   SharedData_Hip data;
193   data.t_id_x = threadIdx.x;
194   data.t_id_y = threadIdx.y;
195   data.t_id_z = threadIdx.z;
196   data.t_id  = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;
197   data.slice = slice + data.t_id_z * T_1D * (BASIS_DIM > 1 ? T_1D : 1);
198 
199   CeedScalar r_W[BASIS_DIM > 2 ? BASIS_Q_1D : 1];
200 
201   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {
202     if (BASIS_DIM == 1) {
203       Weight1d<BASIS_Q_1D>(data, q_weight_1d, r_W);
204       WriteElementStrided1d<1, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*num_elem, BASIS_Q_1D, r_W, d_W);
205     } else if (BASIS_DIM == 2) {
206       WeightTensor2d<BASIS_Q_1D>(data, q_weight_1d, r_W);
207       WriteElementStrided2d<1, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D, r_W, d_W);
208     } else if (BASIS_DIM == 3) {
209       WeightTensor3d<BASIS_Q_1D>(data, q_weight_1d, r_W);
210       WriteElementStrided3d<1, BASIS_Q_1D>(data, elem, 1, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D*num_elem, BASIS_Q_1D*BASIS_Q_1D*BASIS_Q_1D, r_W, d_W);
211     }
212   }
213 }
214 
215 //------------------------------------------------------------------------------
216 
217 #endif
218