xref: /libCEED/include/ceed/jit-source/hip/hip-ref-basis-tensor.h (revision 0e654f564d4c4a0946c9ac3315d078addf8074c0)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include <ceed.h>
9 
10 //------------------------------------------------------------------------------
11 // Tensor Basis Kernels
12 //------------------------------------------------------------------------------
13 
14 //------------------------------------------------------------------------------
15 // Interp
16 //------------------------------------------------------------------------------
17 extern "C" __global__ void Interp(const CeedInt num_elem, const CeedInt transpose, const CeedScalar *__restrict__ interp_1d,
18                                   const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
19   const CeedInt i = threadIdx.x;
20 
21   __shared__ CeedScalar s_mem[BASIS_Q_1D * BASIS_P_1D + 2 * BASIS_BUF_LEN];
22   CeedScalar           *s_interp_1d = s_mem;
23   CeedScalar           *s_buffer_1  = s_mem + BASIS_Q_1D * BASIS_P_1D;
24   CeedScalar           *s_buffer_2  = s_buffer_1 + BASIS_BUF_LEN;
25   for (CeedInt k = i; k < BASIS_Q_1D * BASIS_P_1D; k += blockDim.x) {
26     s_interp_1d[k] = interp_1d[k];
27   }
28 
29   const CeedInt P             = transpose ? BASIS_Q_1D : BASIS_P_1D;
30   const CeedInt Q             = transpose ? BASIS_P_1D : BASIS_Q_1D;
31   const CeedInt stride_0      = transpose ? 1 : BASIS_P_1D;
32   const CeedInt stride_1      = transpose ? BASIS_P_1D : 1;
33   const CeedInt u_stride      = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
34   const CeedInt v_stride      = transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS;
35   const CeedInt u_comp_stride = num_elem * (transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES);
36   const CeedInt v_comp_stride = num_elem * (transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS);
37   const CeedInt u_size        = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
38 
39   // Apply basis element by element
40   for (CeedInt elem = blockIdx.x; elem < num_elem; elem += gridDim.x) {
41     for (CeedInt comp = 0; comp < BASIS_NUM_COMP; comp++) {
42       const CeedScalar *cur_u = u + elem * u_stride + comp * u_comp_stride;
43       CeedScalar       *cur_v = v + elem * v_stride + comp * v_comp_stride;
44       for (CeedInt k = i; k < u_size; k += blockDim.x) {
45         s_buffer_1[k] = cur_u[k];
46       }
47       CeedInt pre  = u_size;
48       CeedInt post = 1;
49       for (CeedInt d = 0; d < BASIS_DIM; d++) {
50         __syncthreads();
51         // Update buffers used
52         pre /= P;
53         const CeedScalar *in  = d % 2 ? s_buffer_2 : s_buffer_1;
54         CeedScalar       *out = d == BASIS_DIM - 1 ? cur_v : (d % 2 ? s_buffer_1 : s_buffer_2);
55 
56         // Contract along middle index
57         const CeedInt writeLen = pre * post * Q;
58         for (CeedInt k = i; k < writeLen; k += blockDim.x) {
59           const CeedInt c = k % post;
60           const CeedInt j = (k / post) % Q;
61           const CeedInt a = k / (post * Q);
62 
63           CeedScalar vk = 0;
64           for (CeedInt b = 0; b < P; b++) vk += s_interp_1d[j * stride_0 + b * stride_1] * in[(a * P + b) * post + c];
65 
66           out[k] = vk;
67         }
68 
69         post *= Q;
70       }
71     }
72   }
73 }
74 
75 //------------------------------------------------------------------------------
76 // Grad
77 //------------------------------------------------------------------------------
78 extern "C" __global__ void Grad(const CeedInt num_elem, const CeedInt transpose, const CeedScalar *__restrict__ interp_1d,
79                                 const CeedScalar *__restrict__ grad_1d, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
80   const CeedInt i = threadIdx.x;
81 
82   __shared__ CeedScalar s_mem[2 * (BASIS_Q_1D * BASIS_P_1D + BASIS_BUF_LEN)];
83   CeedScalar           *s_interp_1d = s_mem;
84   CeedScalar           *s_grad_1d   = s_interp_1d + BASIS_Q_1D * BASIS_P_1D;
85   CeedScalar           *s_buffer_1  = s_grad_1d + BASIS_Q_1D * BASIS_P_1D;
86   CeedScalar           *s_buffer_2  = s_buffer_1 + BASIS_BUF_LEN;
87   for (CeedInt k = i; k < BASIS_Q_1D * BASIS_P_1D; k += blockDim.x) {
88     s_interp_1d[k] = interp_1d[k];
89     s_grad_1d[k]   = grad_1d[k];
90   }
91 
92   const CeedInt P             = transpose ? BASIS_Q_1D : BASIS_P_1D;
93   const CeedInt Q             = transpose ? BASIS_P_1D : BASIS_Q_1D;
94   const CeedInt stride_0      = transpose ? 1 : BASIS_P_1D;
95   const CeedInt stride_1      = transpose ? BASIS_P_1D : 1;
96   const CeedInt u_stride      = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
97   const CeedInt v_stride      = transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS;
98   const CeedInt u_comp_stride = num_elem * (transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES);
99   const CeedInt v_comp_stride = num_elem * (transpose ? BASIS_NUM_NODES : BASIS_NUM_QPTS);
100   const CeedInt u_dim_stride  = transpose ? num_elem * BASIS_NUM_QPTS * BASIS_NUM_COMP : 0;
101   const CeedInt v_dim_stride  = transpose ? 0 : num_elem * BASIS_NUM_QPTS * BASIS_NUM_COMP;
102 
103   // Apply basis element by element
104   for (CeedInt elem = blockIdx.x; elem < num_elem; elem += gridDim.x) {
105     for (CeedInt comp = 0; comp < BASIS_NUM_COMP; comp++) {
106       // dim*dim contractions for grad
107       for (CeedInt dim_1 = 0; dim_1 < BASIS_DIM; dim_1++) {
108         CeedInt           pre   = transpose ? BASIS_NUM_QPTS : BASIS_NUM_NODES;
109         CeedInt           post  = 1;
110         const CeedScalar *cur_u = u + elem * u_stride + dim_1 * u_dim_stride + comp * u_comp_stride;
111         CeedScalar       *cur_v = v + elem * v_stride + dim_1 * v_dim_stride + comp * v_comp_stride;
112         for (CeedInt dim_2 = 0; dim_2 < BASIS_DIM; dim_2++) {
113           __syncthreads();
114           // Update buffers used
115           pre /= P;
116           const CeedScalar *op  = dim_1 == dim_2 ? s_grad_1d : s_interp_1d;
117           const CeedScalar *in  = dim_2 == 0 ? cur_u : (dim_2 % 2 ? s_buffer_2 : s_buffer_1);
118           CeedScalar       *out = dim_2 == BASIS_DIM - 1 ? cur_v : (dim_2 % 2 ? s_buffer_1 : s_buffer_2);
119 
120           // Contract along middle index
121           const CeedInt writeLen = pre * post * Q;
122           for (CeedInt k = i; k < writeLen; k += blockDim.x) {
123             const CeedInt c   = k % post;
124             const CeedInt j   = (k / post) % Q;
125             const CeedInt a   = k / (post * Q);
126             CeedScalar    v_k = 0;
127             for (CeedInt b = 0; b < P; b++) v_k += op[j * stride_0 + b * stride_1] * in[(a * P + b) * post + c];
128 
129             if (transpose && dim_2 == BASIS_DIM - 1) out[k] += v_k;
130             else out[k] = v_k;
131           }
132 
133           post *= Q;
134         }
135       }
136     }
137   }
138 }
139 
140 //------------------------------------------------------------------------------
141 // 1D quadrature weights
142 //------------------------------------------------------------------------------
143 __device__ void Weight1d(const CeedInt num_elem, const CeedScalar *q_weight_1d, CeedScalar *w) {
144   CeedScalar w1d[BASIS_Q_1D];
145   for (CeedInt i = 0; i < BASIS_Q_1D; i++) w1d[i] = q_weight_1d[i];
146 
147   for (CeedInt e = blockIdx.x * blockDim.x + threadIdx.x; e < num_elem; e += blockDim.x * gridDim.x)
148     for (CeedInt i = 0; i < BASIS_Q_1D; i++) {
149       const CeedInt ind = e * BASIS_Q_1D + i;  // sequential
150       w[ind]            = w1d[i];
151     }
152 }
153 
154 //------------------------------------------------------------------------------
155 // 2D quadrature weights
156 //------------------------------------------------------------------------------
157 __device__ void Weight2d(const CeedInt num_elem, const CeedScalar *q_weight_1d, CeedScalar *w) {
158   CeedScalar w1d[BASIS_Q_1D];
159   for (CeedInt i = 0; i < BASIS_Q_1D; i++) w1d[i] = q_weight_1d[i];
160 
161   for (CeedInt e = blockIdx.x * blockDim.x + threadIdx.x; e < num_elem; e += blockDim.x * gridDim.x)
162     for (CeedInt i = 0; i < BASIS_Q_1D; i++)
163       for (CeedInt j = 0; j < BASIS_Q_1D; j++) {
164         const CeedInt ind = e * BASIS_Q_1D * BASIS_Q_1D + i + j * BASIS_Q_1D;  // sequential
165         w[ind]            = w1d[i] * w1d[j];
166       }
167 }
168 
169 //------------------------------------------------------------------------------
170 // 3D quadrature weights
171 //------------------------------------------------------------------------------
172 __device__ void Weight3d(const CeedInt num_elem, const CeedScalar *q_weight_1d, CeedScalar *w) {
173   CeedScalar w1d[BASIS_Q_1D];
174   for (CeedInt i = 0; i < BASIS_Q_1D; i++) w1d[i] = q_weight_1d[i];
175 
176   for (CeedInt e = blockIdx.x * blockDim.x + threadIdx.x; e < num_elem; e += blockDim.x * gridDim.x)
177     for (CeedInt i = 0; i < BASIS_Q_1D; i++)
178       for (CeedInt j = 0; j < BASIS_Q_1D; j++)
179         for (CeedInt k = 0; k < BASIS_Q_1D; k++) {
180           const CeedInt ind = e * BASIS_Q_1D * BASIS_Q_1D * BASIS_Q_1D + i + j * BASIS_Q_1D + k * BASIS_Q_1D * BASIS_Q_1D;  // sequential
181           w[ind]            = w1d[i] * w1d[j] * w1d[k];
182         }
183 }
184 
185 //------------------------------------------------------------------------------
186 // Quadrature weights
187 //------------------------------------------------------------------------------
188 extern "C" __global__ void Weight(const CeedInt num_elem, const CeedScalar *__restrict__ q_weight_1d, CeedScalar *__restrict__ v) {
189   if (BASIS_DIM == 1) Weight1d(num_elem, q_weight_1d, v);
190   else if (BASIS_DIM == 2) Weight2d(num_elem, q_weight_1d, v);
191   else if (BASIS_DIM == 3) Weight3d(num_elem, q_weight_1d, v);
192 }
193 
194 //------------------------------------------------------------------------------
195