1 // Copyright (c) 2017-2026, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED: http://github.com/ceed
7
8 /// @file
9 /// Internal header for HIP curl-oriented element restriction kernels
10 #include <ceed/types.h>
11
12 //------------------------------------------------------------------------------
13 // L-vector -> E-vector, curl-oriented
14 //------------------------------------------------------------------------------
CurlOrientedNoTranspose(const CeedInt * __restrict__ indices,const CeedInt8 * __restrict__ curl_orients,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)15 extern "C" __global__ void CurlOrientedNoTranspose(const CeedInt *__restrict__ indices, const CeedInt8 *__restrict__ curl_orients,
16 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
17 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
18 const CeedInt loc_node = node % RSTR_ELEM_SIZE;
19 const CeedInt elem = node / RSTR_ELEM_SIZE;
20 const CeedInt ind_dl = loc_node > 0 ? indices[node - 1] : 0;
21 const CeedInt ind_d = indices[node];
22 const CeedInt ind_du = loc_node < (RSTR_ELEM_SIZE - 1) ? indices[node + 1] : 0;
23 const CeedInt8 curl_orient_dl = curl_orients[3 * node + 0];
24 const CeedInt8 curl_orient_d = curl_orients[3 * node + 1];
25 const CeedInt8 curl_orient_du = curl_orients[3 * node + 2];
26
27 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
28 CeedScalar value = 0.0;
29 value += loc_node > 0 ? u[ind_dl + comp * RSTR_COMP_STRIDE] * curl_orient_dl : 0.0;
30 value += u[ind_d + comp * RSTR_COMP_STRIDE] * curl_orient_d;
31 value += loc_node < (RSTR_ELEM_SIZE - 1) ? u[ind_du + comp * RSTR_COMP_STRIDE] * curl_orient_du : 0.0;
32 v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = value;
33 }
34 }
35 }
36
37 //------------------------------------------------------------------------------
38 // L-vector -> E-vector, unsigned curl-oriented
39 //------------------------------------------------------------------------------
CurlOrientedUnsignedNoTranspose(const CeedInt * __restrict__ indices,const CeedInt8 * __restrict__ curl_orients,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)40 extern "C" __global__ void CurlOrientedUnsignedNoTranspose(const CeedInt *__restrict__ indices, const CeedInt8 *__restrict__ curl_orients,
41 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
42 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
43 const CeedInt loc_node = node % RSTR_ELEM_SIZE;
44 const CeedInt elem = node / RSTR_ELEM_SIZE;
45 const CeedInt ind_dl = loc_node > 0 ? indices[node - 1] : 0;
46 const CeedInt ind_d = indices[node];
47 const CeedInt ind_du = loc_node < (RSTR_ELEM_SIZE - 1) ? indices[node + 1] : 0;
48 const CeedInt8 curl_orient_dl = abs(curl_orients[3 * node + 0]);
49 const CeedInt8 curl_orient_d = abs(curl_orients[3 * node + 1]);
50 const CeedInt8 curl_orient_du = abs(curl_orients[3 * node + 2]);
51
52 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
53 CeedScalar value = 0.0;
54 value += loc_node > 0 ? u[ind_dl + comp * RSTR_COMP_STRIDE] * curl_orient_dl : 0.0;
55 value += u[ind_d + comp * RSTR_COMP_STRIDE] * curl_orient_d;
56 value += loc_node < (RSTR_ELEM_SIZE - 1) ? u[ind_du + comp * RSTR_COMP_STRIDE] * curl_orient_du : 0.0;
57 v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = value;
58 }
59 }
60 }
61
62 //------------------------------------------------------------------------------
63 // E-vector -> L-vector, curl-oriented
64 //------------------------------------------------------------------------------
65 #if !USE_DETERMINISTIC
CurlOrientedTranspose(const CeedInt * __restrict__ indices,const CeedInt8 * __restrict__ curl_orients,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)66 extern "C" __global__ void CurlOrientedTranspose(const CeedInt *__restrict__ indices, const CeedInt8 *__restrict__ curl_orients,
67 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
68 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
69 const CeedInt ind = indices[node];
70 const CeedInt loc_node = node % RSTR_ELEM_SIZE;
71 const CeedInt elem = node / RSTR_ELEM_SIZE;
72 const CeedInt8 curl_orient_du = loc_node > 0 ? curl_orients[3 * node - 1] : 0.0;
73 const CeedInt8 curl_orient_d = curl_orients[3 * node + 1];
74 const CeedInt8 curl_orient_dl = loc_node < (RSTR_ELEM_SIZE - 1) ? curl_orients[3 * node + 3] : 0.0;
75
76 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
77 CeedScalar value = 0.0;
78 value += loc_node > 0 ? u[loc_node - 1 + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_du : 0.0;
79 value += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_d;
80 value +=
81 loc_node < (RSTR_ELEM_SIZE - 1) ? u[loc_node + 1 + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_dl : 0.0;
82 atomicAdd(&v[ind + comp * RSTR_COMP_STRIDE], value);
83 }
84 }
85 }
86 #else
CurlOrientedTranspose(const CeedInt * __restrict__ l_vec_indices,const CeedInt * __restrict__ t_indices,const CeedInt * __restrict__ t_offsets,const CeedInt8 * __restrict__ curl_orients,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)87 extern "C" __global__ void CurlOrientedTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices,
88 const CeedInt *__restrict__ t_offsets, const CeedInt8 *__restrict__ curl_orients,
89 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
90 CeedScalar value[RSTR_NUM_COMP];
91
92 for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) {
93 const CeedInt ind = l_vec_indices[i];
94 const CeedInt range_1 = t_offsets[i];
95 const CeedInt range_N = t_offsets[i + 1];
96
97 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0;
98
99 for (CeedInt j = range_1; j < range_N; j++) {
100 const CeedInt t_ind = t_indices[j];
101 const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE;
102 const CeedInt elem = t_ind / RSTR_ELEM_SIZE;
103 const CeedInt8 curl_orient_du = loc_node > 0 ? curl_orients[3 * t_ind - 1] : 0.0;
104 const CeedInt8 curl_orient_d = curl_orients[3 * t_ind + 1];
105 const CeedInt8 curl_orient_dl = loc_node < (RSTR_ELEM_SIZE - 1) ? curl_orients[3 * t_ind + 3] : 0.0;
106
107 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
108 value[comp] += loc_node > 0 ? u[loc_node - 1 + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_du : 0.0;
109 value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_d;
110 value[comp] +=
111 loc_node < (RSTR_ELEM_SIZE - 1) ? u[loc_node + 1 + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_dl : 0.0;
112 }
113 }
114
115 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp];
116 }
117 }
118 #endif
119
120 //------------------------------------------------------------------------------
121 // E-vector -> L-vector, unsigned curl-oriented
122 //------------------------------------------------------------------------------
123 #if !USE_DETERMINISTIC
CurlOrientedUnsignedTranspose(const CeedInt * __restrict__ indices,const CeedInt8 * __restrict__ curl_orients,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)124 extern "C" __global__ void CurlOrientedUnsignedTranspose(const CeedInt *__restrict__ indices, const CeedInt8 *__restrict__ curl_orients,
125 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
126 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
127 const CeedInt loc_node = node % RSTR_ELEM_SIZE;
128 const CeedInt elem = node / RSTR_ELEM_SIZE;
129 const CeedInt ind = indices[node];
130 const CeedInt8 curl_orient_du = loc_node > 0 ? abs(curl_orients[3 * node - 1]) : 0.0;
131 const CeedInt8 curl_orient_d = abs(curl_orients[3 * node + 1]);
132 const CeedInt8 curl_orient_dl = loc_node < (RSTR_ELEM_SIZE - 1) ? abs(curl_orients[3 * node + 3]) : 0.0;
133
134 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
135 CeedScalar value = 0.0;
136 value += loc_node > 0 ? u[loc_node - 1 + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_du : 0.0;
137 value += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_d;
138 value +=
139 loc_node < (RSTR_ELEM_SIZE - 1) ? u[loc_node + 1 + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_dl : 0.0;
140 atomicAdd(&v[ind + comp * RSTR_COMP_STRIDE], value);
141 }
142 }
143 }
144 #else
CurlOrientedUnsignedTranspose(const CeedInt * __restrict__ l_vec_indices,const CeedInt * __restrict__ t_indices,const CeedInt * __restrict__ t_offsets,const CeedInt8 * __restrict__ curl_orients,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)145 extern "C" __global__ void CurlOrientedUnsignedTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices,
146 const CeedInt *__restrict__ t_offsets, const CeedInt8 *__restrict__ curl_orients,
147 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
148 CeedScalar value[RSTR_NUM_COMP];
149
150 for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) {
151 const CeedInt ind = l_vec_indices[i];
152 const CeedInt range_1 = t_offsets[i];
153 const CeedInt range_N = t_offsets[i + 1];
154
155 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0;
156
157 for (CeedInt j = range_1; j < range_N; j++) {
158 const CeedInt t_ind = t_indices[j];
159 const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE;
160 const CeedInt elem = t_ind / RSTR_ELEM_SIZE;
161 const CeedInt8 curl_orient_du = loc_node > 0 ? abs(curl_orients[3 * t_ind - 1]) : 0.0;
162 const CeedInt8 curl_orient_d = abs(curl_orients[3 * t_ind + 1]);
163 const CeedInt8 curl_orient_dl = loc_node < (RSTR_ELEM_SIZE - 1) ? abs(curl_orients[3 * t_ind + 3]) : 0.0;
164
165 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
166 value[comp] += loc_node > 0 ? u[loc_node - 1 + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_du : 0.0;
167 value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_d;
168 value[comp] +=
169 loc_node < (RSTR_ELEM_SIZE - 1) ? u[loc_node + 1 + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * curl_orient_dl : 0.0;
170 }
171 }
172
173 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp];
174 }
175 }
176 #endif
177