1 // Copyright (c) 2017-2026, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED: http://github.com/ceed
7
8 /// @file
9 /// Internal header for HIP offset element restriction kernels
10 #include <ceed/types.h>
11
12 //------------------------------------------------------------------------------
13 // L-vector -> E-vector, standard (with offsets)
14 //------------------------------------------------------------------------------
OffsetNoTranspose(const CeedInt * __restrict__ indices,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)15 extern "C" __global__ void OffsetNoTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
16 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
17 const CeedInt ind = indices[node];
18 const CeedInt loc_node = node % RSTR_ELEM_SIZE;
19 const CeedInt elem = node / RSTR_ELEM_SIZE;
20
21 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
22 v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE];
23 }
24 }
25 }
26
27 //------------------------------------------------------------------------------
28 // E-vector -> L-vector, standard (with offsets)
29 //------------------------------------------------------------------------------
30 #if !USE_DETERMINISTIC
OffsetTranspose(const CeedInt * __restrict__ indices,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)31 extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
32 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
33 const CeedInt ind = indices[node];
34 const CeedInt loc_node = node % RSTR_ELEM_SIZE;
35 const CeedInt elem = node / RSTR_ELEM_SIZE;
36
37 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
38 atomicAdd(&v[ind + comp * RSTR_COMP_STRIDE], u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE]);
39 }
40 }
41 }
42 #else
OffsetTranspose(const CeedInt * __restrict__ l_vec_indices,const CeedInt * __restrict__ t_indices,const CeedInt * __restrict__ t_offsets,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)43 extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices,
44 const CeedInt *__restrict__ t_offsets, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
45 CeedScalar value[RSTR_NUM_COMP];
46
47 for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) {
48 const CeedInt ind = l_vec_indices[i];
49 const CeedInt range_1 = t_offsets[i];
50 const CeedInt range_N = t_offsets[i + 1];
51
52 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0;
53
54 for (CeedInt j = range_1; j < range_N; j++) {
55 const CeedInt t_ind = t_indices[j];
56 const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE;
57 const CeedInt elem = t_ind / RSTR_ELEM_SIZE;
58
59 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
60 value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE];
61 }
62 }
63
64 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp];
65 }
66 }
67 #endif
68