xref: /libCEED/include/ceed/jit-source/cuda/cuda-ref-basis-tensor.h (revision 21f16bf6c2940c092555e126518ed0c98a2b88a9)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for CUDA tensor product basis
10 #ifndef CEED_CUDA_REF_BASIS_TENSOR_H
11 #define CEED_CUDA_REF_BASIS_TENSOR_H
12 
13 #include <ceed.h>
14 
15 //------------------------------------------------------------------------------
16 // Tensor Basis Kernels
17 //------------------------------------------------------------------------------
18 
19 //------------------------------------------------------------------------------
20 // Interp
21 //------------------------------------------------------------------------------
22 extern "C" __global__ void Interp(const CeedInt num_elem, const CeedInt transpose, const CeedScalar *__restrict__ interp_1d,
23                                   const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
24   const CeedInt i = threadIdx.x;
25 
26   __shared__ CeedScalar s_mem[BASIS_Q_1D * BASIS_P_1D + 2 * BASIS_BUF_LEN];
27   CeedScalar           *s_interp_1d = s_mem;
28   CeedScalar           *s_buffer_1  = s_mem + BASIS_Q_1D * BASIS_P_1D;
29   CeedScalar           *s_buffer_2  = s_buffer_1 + BASIS_BUF_LEN;
30   for (CeedInt k = i; k < BASIS_Q_1D * BASIS_P_1D; k += blockDim.x) {
31     s_interp_1d[k] = interp_1d[k];
32   }
33 
34   const CeedInt P             = transpose ? BASIS_Q_1D : BASIS_P_1D;
35   const CeedInt Q             = transpose ? BASIS_P_1D : BASIS_Q_1D;
36   const CeedInt stride_0      = transpose ? 1 : BASIS_P_1D;
37   const CeedInt stride_1      = transpose ? BASIS_P_1D : 1;
38   const CeedInt u_stride      = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
39   const CeedInt v_stride      = transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS;
40   const CeedInt u_comp_stride = num_elem * (transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES);
41   const CeedInt v_comp_stride = num_elem * (transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS);
42   const CeedInt u_size        = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
43 
44   // Apply basis element by element
45   for (CeedInt elem = blockIdx.x; elem < num_elem; elem += gridDim.x) {
46     for (CeedInt comp = 0; comp < BASIS_NUM_COMP; comp++) {
47       const CeedScalar *cur_u = u + elem * u_stride + comp * u_comp_stride;
48       CeedScalar       *cur_v = v + elem * v_stride + comp * v_comp_stride;
49 
50       for (CeedInt k = i; k < u_size; k += blockDim.x) {
51         s_buffer_1[k] = cur_u[k];
52       }
53       CeedInt pre  = u_size;
54       CeedInt post = 1;
55 
56       for (CeedInt d = 0; d < BASIS_DIM; d++) {
57         __syncthreads();
58         // Update buffers used
59         pre /= P;
60         const CeedScalar *in  = d % 2 ? s_buffer_2 : s_buffer_1;
61         CeedScalar       *out = d == BASIS_DIM - 1 ? cur_v : (d % 2 ? s_buffer_1 : s_buffer_2);
62 
63         // Contract along middle index
64         const CeedInt writeLen = pre * post * Q;
65         for (CeedInt k = i; k < writeLen; k += blockDim.x) {
66           const CeedInt c = k % post;
67           const CeedInt j = (k / post) % Q;
68           const CeedInt a = k / (post * Q);
69 
70           CeedScalar vk = 0;
71           for (CeedInt b = 0; b < P; b++) vk += s_interp_1d[j * stride_0 + b * stride_1] * in[(a * P + b) * post + c];
72           out[k] = vk;
73         }
74         post *= Q;
75       }
76     }
77   }
78 }
79 
80 //------------------------------------------------------------------------------
81 // Grad
82 //------------------------------------------------------------------------------
83 extern "C" __global__ void Grad(const CeedInt num_elem, const CeedInt transpose, const CeedScalar *__restrict__ interp_1d,
84                                 const CeedScalar *__restrict__ grad_1d, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
85   const CeedInt i = threadIdx.x;
86 
87   __shared__ CeedScalar s_mem[2 * (BASIS_Q_1D * BASIS_P_1D + BASIS_BUF_LEN)];
88   CeedScalar           *s_interp_1d = s_mem;
89   CeedScalar           *s_grad_1d   = s_interp_1d + BASIS_Q_1D * BASIS_P_1D;
90   CeedScalar           *s_buffer_1  = s_grad_1d + BASIS_Q_1D * BASIS_P_1D;
91   CeedScalar           *s_buffer_2  = s_buffer_1 + BASIS_BUF_LEN;
92   for (CeedInt k = i; k < BASIS_Q_1D * BASIS_P_1D; k += blockDim.x) {
93     s_interp_1d[k] = interp_1d[k];
94     s_grad_1d[k]   = grad_1d[k];
95   }
96 
97   const CeedInt P             = transpose ? BASIS_Q_1D : BASIS_P_1D;
98   const CeedInt Q             = transpose ? BASIS_P_1D : BASIS_Q_1D;
99   const CeedInt stride_0      = transpose ? 1 : BASIS_P_1D;
100   const CeedInt stride_1      = transpose ? BASIS_P_1D : 1;
101   const CeedInt u_stride      = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
102   const CeedInt v_stride      = transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS;
103   const CeedInt u_comp_stride = num_elem * (transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES);
104   const CeedInt v_comp_stride = num_elem * (transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS);
105   const CeedInt u_dim_stride  = transpose ? num_elem * BASIS_NUM_QPTS * BASIS_NUM_COMP : 0;
106   const CeedInt v_dim_stride  = transpose ? 0 : num_elem * BASIS_NUM_QPTS * BASIS_NUM_COMP;
107 
108   // Apply basis element by element
109   for (CeedInt elem = blockIdx.x; elem < num_elem; elem += gridDim.x) {
110     for (CeedInt comp = 0; comp < BASIS_NUM_COMP; comp++) {
111       // dim*dim contractions for grad
112       for (CeedInt dim_1 = 0; dim_1 < BASIS_DIM; dim_1++) {
113         CeedInt           pre   = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
114         CeedInt           post  = 1;
115         const CeedScalar *cur_u = u + elem * u_stride + dim_1 * u_dim_stride + comp * u_comp_stride;
116         CeedScalar       *cur_v = v + elem * v_stride + dim_1 * v_dim_stride + comp * v_comp_stride;
117 
118         for (CeedInt dim_2 = 0; dim_2 < BASIS_DIM; dim_2++) {
119           __syncthreads();
120           // Update buffers used
121           pre /= P;
122           const CeedScalar *op  = dim_1 == dim_2 ? s_grad_1d : s_interp_1d;
123           const CeedScalar *in  = dim_2 == 0 ? cur_u : (dim_2 % 2 ? s_buffer_2 : s_buffer_1);
124           CeedScalar       *out = dim_2 == BASIS_DIM - 1 ? cur_v : (dim_2 % 2 ? s_buffer_1 : s_buffer_2);
125 
126           // Contract along middle index
127           const CeedInt writeLen = pre * post * Q;
128           for (CeedInt k = i; k < writeLen; k += blockDim.x) {
129             const CeedInt c   = k % post;
130             const CeedInt j   = (k / post) % Q;
131             const CeedInt a   = k / (post * Q);
132             CeedScalar    v_k = 0;
133 
134             for (CeedInt b = 0; b < P; b++) v_k += op[j * stride_0 + b * stride_1] * in[(a * P + b) * post + c];
135 
136             if (transpose && dim_2 == BASIS_DIM - 1) out[k] += v_k;
137             else out[k] = v_k;
138           }
139           post *= Q;
140         }
141       }
142     }
143   }
144 }
145 
146 //------------------------------------------------------------------------------
147 // 1D quadrature weights
148 //------------------------------------------------------------------------------
149 __device__ void Weight1d(const CeedInt num_elem, const CeedScalar *q_weight_1d, CeedScalar *w) {
150   const CeedInt i = threadIdx.x;
151 
152   if (i < BASIS_Q_1D) {
153     const size_t elem = blockIdx.x;
154 
155     if (elem < num_elem) w[elem * BASIS_Q_1D + i] = q_weight_1d[i];
156   }
157 }
158 
159 //------------------------------------------------------------------------------
160 // 2D quadrature weights
161 //------------------------------------------------------------------------------
162 __device__ void Weight2d(const CeedInt num_elem, const CeedScalar *q_weight_1d, CeedScalar *w) {
163   const CeedInt i = threadIdx.x;
164   const CeedInt j = threadIdx.y;
165 
166   if (i < BASIS_Q_1D && j < BASIS_Q_1D) {
167     const size_t elem = blockIdx.x;
168 
169     if (elem < num_elem) {
170       const size_t ind = (elem * BASIS_Q_1D + j) * BASIS_Q_1D + i;
171 
172       w[ind] = q_weight_1d[i] * q_weight_1d[j];
173     }
174   }
175 }
176 
177 //------------------------------------------------------------------------------
178 // 3D quadrature weights
179 //------------------------------------------------------------------------------
180 __device__ void Weight3d(const CeedInt num_elem, const CeedScalar *q_weight_1d, CeedScalar *w) {
181   const CeedInt i = threadIdx.x;
182   const CeedInt j = threadIdx.y;
183 
184   if (i < BASIS_Q_1D && j < BASIS_Q_1D) {
185     const size_t elem = blockIdx.x;
186 
187     if (elem < num_elem) {
188       for (CeedInt k = 0; k < BASIS_Q_1D; k++) {
189         const size_t ind = ((elem * BASIS_Q_1D + k) * BASIS_Q_1D + j) * BASIS_Q_1D + i;
190 
191         w[ind] = q_weight_1d[i] * q_weight_1d[j] * q_weight_1d[k];
192       }
193     }
194   }
195 }
196 
197 //------------------------------------------------------------------------------
198 // Quadrature weights
199 //------------------------------------------------------------------------------
200 extern "C" __global__ void Weight(const CeedInt num_elem, const CeedScalar *__restrict__ q_weight_1d, CeedScalar *__restrict__ v) {
201   if (BASIS_DIM == 1) Weight1d(num_elem, q_weight_1d, v);
202   else if (BASIS_DIM == 2) Weight2d(num_elem, q_weight_1d, v);
203   else if (BASIS_DIM == 3) Weight3d(num_elem, q_weight_1d, v);
204 }
205 
206 //------------------------------------------------------------------------------
207 
208 #endif  // CEED_CUDA_REF_BASIS_TENSOR_H
209