xref: /libCEED/include/ceed/jit-source/cuda/cuda-ref-basis-tensor.h (revision a0154adecfab8547cdc0febbbf40ac009dbe9d1d)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include <ceed/ceed.h>
9 
10 //------------------------------------------------------------------------------
11 // Tensor Basis Kernels
12 //------------------------------------------------------------------------------
13 
14 //------------------------------------------------------------------------------
15 // Interp
16 //------------------------------------------------------------------------------
17 extern "C" __global__ void Interp(const CeedInt num_elem, const CeedInt transpose,
18                                   const CeedScalar *__restrict__ interp_1d,
19                                   const CeedScalar *__restrict__ u,
20                                   CeedScalar *__restrict__ v) {
21   const CeedInt i = threadIdx.x;
22 
23   __shared__ CeedScalar s_mem[BASIS_Q_1D * BASIS_P_1D + 2 * BASIS_BUF_LEN];
24   CeedScalar *s_interp_1d = s_mem;
25   CeedScalar *s_buffer_1 = s_mem + BASIS_Q_1D * BASIS_P_1D;
26   CeedScalar *s_buffer_2 = s_buffer_1 + BASIS_BUF_LEN;
27   for (CeedInt k = i; k < BASIS_Q_1D * BASIS_P_1D; k += blockDim.x) {
28     s_interp_1d[k] = interp_1d[k];
29   }
30 
31   const CeedInt P = transpose ? BASIS_Q_1D : BASIS_P_1D;
32   const CeedInt Q = transpose ? BASIS_P_1D : BASIS_Q_1D;
33   const CeedInt stride_0 = transpose ? 1 : BASIS_P_1D;
34   const CeedInt stride_1 = transpose ? BASIS_P_1D : 1;
35   const CeedInt u_stride = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
36   const CeedInt v_stride = transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS;
37   const CeedInt u_comp_stride = num_elem * (transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES);
38   const CeedInt v_comp_stride = num_elem * (transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS);
39   const CeedInt u_size = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
40 
41   // Apply basis element by element
42   for (CeedInt elem = blockIdx.x; elem < num_elem; elem += gridDim.x) {
43     for (CeedInt comp = 0; comp < BASIS_NUM_COMP; comp++) {
44       const CeedScalar *cur_u = u + elem * u_stride + comp * u_comp_stride;
45       CeedScalar *cur_v = v + elem * v_stride + comp * v_comp_stride;
46       for (CeedInt k = i; k < u_size; k += blockDim.x) {
47         s_buffer_1[k] = cur_u[k];
48       }
49       CeedInt pre = u_size;
50       CeedInt post = 1;
51       for (CeedInt d = 0; d < BASIS_DIM; d++) {
52         __syncthreads();
53         // Update bufferfers used
54         pre /= P;
55         const CeedScalar *in = d % 2 ? s_buffer_2 : s_buffer_1;
56         CeedScalar *out = d == BASIS_DIM - 1 ? cur_v : (d % 2 ? s_buffer_1 : s_buffer_2);
57 
58         // Contract along middle index
59         const CeedInt writeLen = pre * post * Q;
60         for (CeedInt k = i; k < writeLen; k += blockDim.x) {
61           const CeedInt c = k % post;
62           const CeedInt j = (k / post) % Q;
63           const CeedInt a = k / (post * Q);
64 
65           CeedScalar vk = 0;
66           for (CeedInt b = 0; b < P; b++)
67             vk += s_interp_1d[j*stride_0 + b*stride_1] * in[(a*P + b)*post + c];
68 
69           out[k] = vk;
70         }
71 
72         post *= Q;
73       }
74     }
75   }
76 }
77 
78 //------------------------------------------------------------------------------
79 // Grad
80 //------------------------------------------------------------------------------
81 extern "C" __global__ void Grad(const CeedInt num_elem, const CeedInt transpose,
82                                 const CeedScalar *__restrict__ interp_1d,
83                                 const CeedScalar *__restrict__ grad_1d,
84                                 const CeedScalar *__restrict__ u,
85                                 CeedScalar *__restrict__ v) {
86   const CeedInt i = threadIdx.x;
87 
88   __shared__ CeedScalar s_mem[2 * (BASIS_Q_1D * BASIS_P_1D + BASIS_BUF_LEN)];
89   CeedScalar *s_interp_1d = s_mem;
90   CeedScalar *s_grad_1d = s_interp_1d + BASIS_Q_1D * BASIS_P_1D;
91   CeedScalar *s_buffer_1 = s_grad_1d + BASIS_Q_1D * BASIS_P_1D;
92   CeedScalar *s_buffer_2 = s_buffer_1 + BASIS_BUF_LEN;
93   for (CeedInt k = i; k < BASIS_Q_1D * BASIS_P_1D; k += blockDim.x) {
94     s_interp_1d[k] = interp_1d[k];
95     s_grad_1d[k] = grad_1d[k];
96   }
97 
98   const CeedInt P = transpose ? BASIS_Q_1D : BASIS_P_1D;
99   const CeedInt Q = transpose ? BASIS_P_1D : BASIS_Q_1D;
100   const CeedInt stride_0 = transpose ? 1 : BASIS_P_1D;
101   const CeedInt stride_1 = transpose ? BASIS_P_1D : 1;
102   const CeedInt u_stride = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
103   const CeedInt v_stride = transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS;
104   const CeedInt u_comp_stride = num_elem * (transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES);
105   const CeedInt v_comp_stride = num_elem * (transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS);
106   const CeedInt u_dim_stride = transpose ? num_elem * BASIS_NUM_QPTS * BASIS_NUM_COMP : 0;
107   const CeedInt v_dim_stride = transpose ? 0 : num_elem * BASIS_NUM_QPTS * BASIS_NUM_COMP;
108 
109   // Apply basis element by element
110   for (CeedInt elem = blockIdx.x; elem < num_elem; elem += gridDim.x) {
111     for (CeedInt comp = 0; comp < BASIS_NUM_COMP; comp++) {
112 
113       // dim*dim contractions for grad
114       for (CeedInt dim_1 = 0; dim_1 < BASIS_DIM; dim_1++) {
115         CeedInt pre = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
116         CeedInt post = 1;
117         const CeedScalar *cur_u = u + elem * u_stride + dim_1 * u_dim_stride +
118                                   comp * u_comp_stride;
119         CeedScalar *cur_v = v + elem * v_stride + dim_1 * v_dim_stride + comp *
120                             v_comp_stride;
121         for (CeedInt dim_2 = 0; dim_2 < BASIS_DIM; dim_2++) {
122           __syncthreads();
123           // Update bufferfers used
124           pre /= P;
125           const CeedScalar *op = dim_1 == dim_2 ? s_grad_1d : s_interp_1d;
126           const CeedScalar *in = dim_2 == 0
127                                  ? cur_u
128                                  : (dim_2 % 2 ? s_buffer_2 : s_buffer_1);
129           CeedScalar *out = dim_2 == BASIS_DIM - 1
130                             ? cur_v
131                             : (dim_2 % 2 ? s_buffer_1 : s_buffer_2);
132 
133           // Contract along middle index
134           const CeedInt writeLen = pre * post * Q;
135           for (CeedInt k = i; k < writeLen; k += blockDim.x) {
136             const CeedInt c = k % post;
137             const CeedInt j = (k / post) % Q;
138             const CeedInt a = k / (post * Q);
139             CeedScalar v_k = 0;
140             for (CeedInt b = 0; b < P; b++)
141               v_k += op[j * stride_0 + b * stride_1] * in[(a * P + b) * post + c];
142 
143             if (transpose && dim_2 == BASIS_DIM - 1)
144               out[k] += v_k;
145             else
146               out[k] = v_k;
147           }
148 
149           post *= Q;
150         }
151       }
152     }
153   }
154 }
155 
156 //------------------------------------------------------------------------------
157 // 1D quadrature weights
158 //------------------------------------------------------------------------------
159 __device__ void Weight1d(const CeedInt num_elem, const CeedScalar *q_weight_1d,
160                          CeedScalar *w) {
161   const CeedInt i = threadIdx.x;
162   if (i < BASIS_Q_1D) {
163     const size_t elem = blockIdx.x;
164     if (elem < num_elem)
165       w[elem*BASIS_Q_1D + i] = q_weight_1d[i];
166   }
167 }
168 
169 //------------------------------------------------------------------------------
170 // 2D quadrature weights
171 //------------------------------------------------------------------------------
172 __device__ void Weight2d(const CeedInt num_elem, const CeedScalar *q_weight_1d,
173                          CeedScalar *w) {
174 
175   const CeedInt i = threadIdx.x;
176   const CeedInt j = threadIdx.y;
177   if (i < BASIS_Q_1D && j < BASIS_Q_1D) {
178     const size_t elem = blockIdx.x;
179     if (elem < num_elem) {
180       const size_t ind = (elem * BASIS_Q_1D + j) * BASIS_Q_1D + i;
181       w[ind] = q_weight_1d[i] * q_weight_1d[j];
182     }
183   }
184 }
185 
186 //------------------------------------------------------------------------------
187 // 3D quadrature weights
188 //------------------------------------------------------------------------------
189 __device__ void Weight3d(const CeedInt num_elem, const CeedScalar *q_weight_1d,
190                          CeedScalar *w) {
191   const CeedInt i = threadIdx.x;
192   const CeedInt j = threadIdx.y;
193   if (i < BASIS_Q_1D && j < BASIS_Q_1D) {
194     const size_t elem = blockIdx.x;
195     if (elem < num_elem) {
196       for (CeedInt k = 0; k < BASIS_Q_1D; k++) {
197         const size_t ind = ((elem * BASIS_Q_1D + k) * BASIS_Q_1D + j) * BASIS_Q_1D + i;
198         w[ind] = q_weight_1d[i] * q_weight_1d[j] * q_weight_1d[k];
199       }
200     }
201   }
202 }
203 
204 //------------------------------------------------------------------------------
205 // Quadrature weights
206 //------------------------------------------------------------------------------
207 extern "C" __global__ void Weight(const CeedInt num_elem,
208                                   const CeedScalar *__restrict__ q_weight_1d,
209                                   CeedScalar *__restrict__ v) {
210   if (BASIS_DIM == 1)
211     Weight1d(num_elem, q_weight_1d, v);
212   else if (BASIS_DIM == 2)
213     Weight2d(num_elem, q_weight_1d, v);
214   else if (BASIS_DIM == 3)
215     Weight3d(num_elem, q_weight_1d, v);
216 }
217 
218 //------------------------------------------------------------------------------
219