xref: /libCEED/include/ceed/jit-source/hip/hip-ref-basis-tensor.h (revision 07d5b98a8feba68a643190b8ea9bcdac5c3e6570)
1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for HIP tensor product basis
10 
11 #include <ceed.h>
12 
13 //------------------------------------------------------------------------------
14 // Tensor Basis Kernels
15 //------------------------------------------------------------------------------
16 
17 //------------------------------------------------------------------------------
18 // Interp
19 //------------------------------------------------------------------------------
20 extern "C" __global__ void Interp(const CeedInt num_elem, const CeedInt transpose, const CeedScalar *__restrict__ interp_1d,
21                                   const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
22   const CeedInt i = threadIdx.x;
23 
24   __shared__ CeedScalar s_mem[BASIS_Q_1D * BASIS_P_1D + 2 * BASIS_BUF_LEN];
25   CeedScalar           *s_interp_1d = s_mem;
26   CeedScalar           *s_buffer_1  = s_mem + BASIS_Q_1D * BASIS_P_1D;
27   CeedScalar           *s_buffer_2  = s_buffer_1 + BASIS_BUF_LEN;
28   for (CeedInt k = i; k < BASIS_Q_1D * BASIS_P_1D; k += blockDim.x) {
29     s_interp_1d[k] = interp_1d[k];
30   }
31 
32   const CeedInt P             = transpose ? BASIS_Q_1D : BASIS_P_1D;
33   const CeedInt Q             = transpose ? BASIS_P_1D : BASIS_Q_1D;
34   const CeedInt stride_0      = transpose ? 1 : BASIS_P_1D;
35   const CeedInt stride_1      = transpose ? BASIS_P_1D : 1;
36   const CeedInt u_stride      = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
37   const CeedInt v_stride      = transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS;
38   const CeedInt u_comp_stride = num_elem * (transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES);
39   const CeedInt v_comp_stride = num_elem * (transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS);
40   const CeedInt u_size        = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
41 
42   // Apply basis element by element
43   for (CeedInt elem = blockIdx.x; elem < num_elem; elem += gridDim.x) {
44     for (CeedInt comp = 0; comp < BASIS_NUM_COMP; comp++) {
45       const CeedScalar *cur_u = u + elem * u_stride + comp * u_comp_stride;
46       CeedScalar       *cur_v = v + elem * v_stride + comp * v_comp_stride;
47       CeedInt           pre   = u_size;
48       CeedInt           post  = 1;
49 
50       for (CeedInt k = i; k < u_size; k += blockDim.x) {
51         s_buffer_1[k] = cur_u[k];
52       }
53       for (CeedInt d = 0; d < BASIS_DIM; d++) {
54         __syncthreads();
55         // Update buffers used
56         pre /= P;
57         const CeedScalar *in       = d % 2 ? s_buffer_2 : s_buffer_1;
58         CeedScalar       *out      = d == BASIS_DIM - 1 ? cur_v : (d % 2 ? s_buffer_1 : s_buffer_2);
59         const CeedInt     writeLen = pre * post * Q;
60 
61         // Contract along middle index
62         for (CeedInt k = i; k < writeLen; k += blockDim.x) {
63           const CeedInt c  = k % post;
64           const CeedInt j  = (k / post) % Q;
65           const CeedInt a  = k / (post * Q);
66           CeedScalar    vk = 0;
67 
68           for (CeedInt b = 0; b < P; b++) vk += s_interp_1d[j * stride_0 + b * stride_1] * in[(a * P + b) * post + c];
69           out[k] = vk;
70         }
71         post *= Q;
72       }
73     }
74   }
75 }
76 
77 //------------------------------------------------------------------------------
78 // Grad
79 //------------------------------------------------------------------------------
80 extern "C" __global__ void Grad(const CeedInt num_elem, const CeedInt transpose, const CeedScalar *__restrict__ interp_1d,
81                                 const CeedScalar *__restrict__ grad_1d, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
82   const CeedInt i = threadIdx.x;
83 
84   __shared__ CeedScalar s_mem[2 * (BASIS_Q_1D * BASIS_P_1D + BASIS_BUF_LEN)];
85   CeedScalar           *s_interp_1d = s_mem;
86   CeedScalar           *s_grad_1d   = s_interp_1d + BASIS_Q_1D * BASIS_P_1D;
87   CeedScalar           *s_buffer_1  = s_grad_1d + BASIS_Q_1D * BASIS_P_1D;
88   CeedScalar           *s_buffer_2  = s_buffer_1 + BASIS_BUF_LEN;
89   for (CeedInt k = i; k < BASIS_Q_1D * BASIS_P_1D; k += blockDim.x) {
90     s_interp_1d[k] = interp_1d[k];
91     s_grad_1d[k]   = grad_1d[k];
92   }
93 
94   const CeedInt P             = transpose ? BASIS_Q_1D : BASIS_P_1D;
95   const CeedInt Q             = transpose ? BASIS_P_1D : BASIS_Q_1D;
96   const CeedInt stride_0      = transpose ? 1 : BASIS_P_1D;
97   const CeedInt stride_1      = transpose ? BASIS_P_1D : 1;
98   const CeedInt u_stride      = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
99   const CeedInt v_stride      = transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS;
100   const CeedInt u_comp_stride = num_elem * (transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES);
101   const CeedInt v_comp_stride = num_elem * (transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS);
102   const CeedInt u_dim_stride  = transpose ? num_elem * BASIS_NUM_QPTS * BASIS_NUM_COMP : 0;
103   const CeedInt v_dim_stride  = transpose ? 0 : num_elem * BASIS_NUM_QPTS * BASIS_NUM_COMP;
104 
105   // Apply basis element by element
106   for (CeedInt elem = blockIdx.x; elem < num_elem; elem += gridDim.x) {
107     for (CeedInt comp = 0; comp < BASIS_NUM_COMP; comp++) {
108       // dim*dim contractions for grad
109       for (CeedInt dim_1 = 0; dim_1 < BASIS_DIM; dim_1++) {
110         CeedInt           pre   = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
111         CeedInt           post  = 1;
112         const CeedScalar *cur_u = u + elem * u_stride + dim_1 * u_dim_stride + comp * u_comp_stride;
113         CeedScalar       *cur_v = v + elem * v_stride + dim_1 * v_dim_stride + comp * v_comp_stride;
114 
115         for (CeedInt dim_2 = 0; dim_2 < BASIS_DIM; dim_2++) {
116           __syncthreads();
117           // Update buffers used
118           pre /= P;
119           const CeedScalar *op       = dim_1 == dim_2 ? s_grad_1d : s_interp_1d;
120           const CeedScalar *in       = dim_2 == 0 ? cur_u : (dim_2 % 2 ? s_buffer_2 : s_buffer_1);
121           CeedScalar       *out      = dim_2 == BASIS_DIM - 1 ? cur_v : (dim_2 % 2 ? s_buffer_1 : s_buffer_2);
122           const CeedInt     writeLen = pre * post * Q;
123 
124           // Contract along middle index
125           for (CeedInt k = i; k < writeLen; k += blockDim.x) {
126             const CeedInt c   = k % post;
127             const CeedInt j   = (k / post) % Q;
128             const CeedInt a   = k / (post * Q);
129             CeedScalar    v_k = 0;
130 
131             for (CeedInt b = 0; b < P; b++) v_k += op[j * stride_0 + b * stride_1] * in[(a * P + b) * post + c];
132             if (transpose && dim_2 == BASIS_DIM - 1) out[k] += v_k;
133             else out[k] = v_k;
134           }
135           post *= Q;
136         }
137       }
138     }
139   }
140 }
141 
142 //------------------------------------------------------------------------------
143 // 1D quadrature weights
144 //------------------------------------------------------------------------------
145 __device__ void Weight1d(const CeedInt num_elem, const CeedScalar *q_weight_1d, CeedScalar *w) {
146   const CeedInt i = threadIdx.x;
147 
148   if (i < BASIS_Q_1D) {
149     const size_t elem = blockIdx.x;
150 
151     if (elem < num_elem) w[elem * BASIS_Q_1D + i] = q_weight_1d[i];
152   }
153 }
154 
155 //------------------------------------------------------------------------------
156 // 2D quadrature weights
157 //------------------------------------------------------------------------------
158 __device__ void Weight2d(const CeedInt num_elem, const CeedScalar *q_weight_1d, CeedScalar *w) {
159   const CeedInt i = threadIdx.x;
160   const CeedInt j = threadIdx.y;
161 
162   if (i < BASIS_Q_1D && j < BASIS_Q_1D) {
163     const size_t elem = blockIdx.x;
164 
165     if (elem < num_elem) {
166       const size_t ind = (elem * BASIS_Q_1D + j) * BASIS_Q_1D + i;
167 
168       w[ind] = q_weight_1d[i] * q_weight_1d[j];
169     }
170   }
171 }
172 
173 //------------------------------------------------------------------------------
174 // 3D quadrature weights
175 //------------------------------------------------------------------------------
176 __device__ void Weight3d(const CeedInt num_elem, const CeedScalar *q_weight_1d, CeedScalar *w) {
177   const CeedInt i = threadIdx.x;
178   const CeedInt j = threadIdx.y;
179 
180   if (i < BASIS_Q_1D && j < BASIS_Q_1D) {
181     const size_t elem = blockIdx.x;
182 
183     if (elem < num_elem) {
184       for (CeedInt k = 0; k < BASIS_Q_1D; k++) {
185         const size_t ind = ((elem * BASIS_Q_1D + k) * BASIS_Q_1D + j) * BASIS_Q_1D + i;
186 
187         w[ind] = q_weight_1d[i] * q_weight_1d[j] * q_weight_1d[k];
188       }
189     }
190   }
191 }
192 
193 //------------------------------------------------------------------------------
194 // Quadrature weights
195 //------------------------------------------------------------------------------
196 extern "C" __global__ void Weight(const CeedInt num_elem, const CeedScalar *__restrict__ q_weight_1d, CeedScalar *__restrict__ v) {
197   if (BASIS_DIM == 1) Weight1d(num_elem, q_weight_1d, v);
198   else if (BASIS_DIM == 2) Weight2d(num_elem, q_weight_1d, v);
199   else if (BASIS_DIM == 3) Weight3d(num_elem, q_weight_1d, v);
200 }
201