1*9ba83ac0SJeremy L Thompson // Copyright (c) 2017-2026, Lawrence Livermore National Security, LLC and other CEED contributors.
2cf8cbdd6SSebastian Grimberg // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3cf8cbdd6SSebastian Grimberg //
4cf8cbdd6SSebastian Grimberg // SPDX-License-Identifier: BSD-2-Clause
5cf8cbdd6SSebastian Grimberg //
6cf8cbdd6SSebastian Grimberg // This file is part of CEED: http://github.com/ceed
7cf8cbdd6SSebastian Grimberg
8cf8cbdd6SSebastian Grimberg /// @file
9cf8cbdd6SSebastian Grimberg /// Internal header for CUDA oriented element restriction kernels
10c0b5abf0SJeremy L Thompson #include <ceed/types.h>
11cf8cbdd6SSebastian Grimberg
12cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
13cf8cbdd6SSebastian Grimberg // L-vector -> E-vector, oriented
14cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
OrientedNoTranspose(const CeedInt * __restrict__ indices,const bool * __restrict__ orients,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)15cf8cbdd6SSebastian Grimberg extern "C" __global__ void OrientedNoTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients,
16cf8cbdd6SSebastian Grimberg const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
17cf8cbdd6SSebastian Grimberg for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
18cf8cbdd6SSebastian Grimberg const CeedInt ind = indices[node];
19cf8cbdd6SSebastian Grimberg const bool orient = orients[node];
20cf8cbdd6SSebastian Grimberg const CeedInt loc_node = node % RSTR_ELEM_SIZE;
21cf8cbdd6SSebastian Grimberg const CeedInt elem = node / RSTR_ELEM_SIZE;
22cf8cbdd6SSebastian Grimberg
23cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
24cf8cbdd6SSebastian Grimberg v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE] * (orient ? -1.0 : 1.0);
25cf8cbdd6SSebastian Grimberg }
26cf8cbdd6SSebastian Grimberg }
27cf8cbdd6SSebastian Grimberg }
28cf8cbdd6SSebastian Grimberg
29cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
30cf8cbdd6SSebastian Grimberg // E-vector -> L-vector, oriented
31cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
32cf8cbdd6SSebastian Grimberg #if !USE_DETERMINISTIC
OrientedTranspose(const CeedInt * __restrict__ indices,const bool * __restrict__ orients,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)33cf8cbdd6SSebastian Grimberg extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients, const CeedScalar *__restrict__ u,
34cf8cbdd6SSebastian Grimberg CeedScalar *__restrict__ v) {
35cf8cbdd6SSebastian Grimberg for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
36cf8cbdd6SSebastian Grimberg const CeedInt ind = indices[node];
37cf8cbdd6SSebastian Grimberg const bool orient = orients[node];
38cf8cbdd6SSebastian Grimberg const CeedInt loc_node = node % RSTR_ELEM_SIZE;
39cf8cbdd6SSebastian Grimberg const CeedInt elem = node / RSTR_ELEM_SIZE;
40cf8cbdd6SSebastian Grimberg
41cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
42db2becc9SJeremy L Thompson atomicAdd(&v[ind + comp * RSTR_COMP_STRIDE],
43cf8cbdd6SSebastian Grimberg u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0));
44cf8cbdd6SSebastian Grimberg }
45cf8cbdd6SSebastian Grimberg }
46cf8cbdd6SSebastian Grimberg }
47cf8cbdd6SSebastian Grimberg #else
OrientedTranspose(const CeedInt * __restrict__ l_vec_indices,const CeedInt * __restrict__ t_indices,const CeedInt * __restrict__ t_offsets,const bool * __restrict__ orients,const CeedScalar * __restrict__ u,CeedScalar * __restrict__ v)48cf8cbdd6SSebastian Grimberg extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices,
49cf8cbdd6SSebastian Grimberg const CeedInt *__restrict__ t_offsets, const bool *__restrict__ orients,
50cf8cbdd6SSebastian Grimberg const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
51cf8cbdd6SSebastian Grimberg CeedScalar value[RSTR_NUM_COMP];
52cf8cbdd6SSebastian Grimberg
53cf8cbdd6SSebastian Grimberg for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) {
54cf8cbdd6SSebastian Grimberg const CeedInt ind = l_vec_indices[i];
55cf8cbdd6SSebastian Grimberg const CeedInt range_1 = t_offsets[i];
56cf8cbdd6SSebastian Grimberg const CeedInt range_N = t_offsets[i + 1];
57cf8cbdd6SSebastian Grimberg
58cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0;
59cf8cbdd6SSebastian Grimberg
60cf8cbdd6SSebastian Grimberg for (CeedInt j = range_1; j < range_N; j++) {
61cf8cbdd6SSebastian Grimberg const CeedInt t_ind = t_indices[j];
62cf8cbdd6SSebastian Grimberg const bool orient = orients[t_ind];
63cf8cbdd6SSebastian Grimberg const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE;
64cf8cbdd6SSebastian Grimberg const CeedInt elem = t_ind / RSTR_ELEM_SIZE;
65cf8cbdd6SSebastian Grimberg
66cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
67cf8cbdd6SSebastian Grimberg value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0);
68cf8cbdd6SSebastian Grimberg }
69cf8cbdd6SSebastian Grimberg }
70cf8cbdd6SSebastian Grimberg
71cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp];
72cf8cbdd6SSebastian Grimberg }
73cf8cbdd6SSebastian Grimberg }
74cf8cbdd6SSebastian Grimberg #endif
75