xref: /libCEED/include/ceed/jit-source/cuda/cuda-ref-restriction-offset.h (revision db2becc9f302fe8eb3a32ace50ce3f3a5d42e6c4)
15aed82e4SJeremy L Thompson // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors.
2cf8cbdd6SSebastian Grimberg // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3cf8cbdd6SSebastian Grimberg //
4cf8cbdd6SSebastian Grimberg // SPDX-License-Identifier: BSD-2-Clause
5cf8cbdd6SSebastian Grimberg //
6cf8cbdd6SSebastian Grimberg // This file is part of CEED:  http://github.com/ceed
7cf8cbdd6SSebastian Grimberg 
8cf8cbdd6SSebastian Grimberg /// @file
9cf8cbdd6SSebastian Grimberg /// Internal header for CUDA offset element restriction kernels
10cf8cbdd6SSebastian Grimberg 
11cf8cbdd6SSebastian Grimberg #include <ceed.h>
12cf8cbdd6SSebastian Grimberg 
13cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
14cf8cbdd6SSebastian Grimberg // L-vector -> E-vector, standard (with offsets)
15cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
16cf8cbdd6SSebastian Grimberg extern "C" __global__ void OffsetNoTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
17cf8cbdd6SSebastian Grimberg   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
18cf8cbdd6SSebastian Grimberg     const CeedInt ind      = indices[node];
19cf8cbdd6SSebastian Grimberg     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
20cf8cbdd6SSebastian Grimberg     const CeedInt elem     = node / RSTR_ELEM_SIZE;
21cf8cbdd6SSebastian Grimberg 
22cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
23cf8cbdd6SSebastian Grimberg       v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE];
24cf8cbdd6SSebastian Grimberg     }
25cf8cbdd6SSebastian Grimberg   }
26cf8cbdd6SSebastian Grimberg }
27cf8cbdd6SSebastian Grimberg 
28cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
29cf8cbdd6SSebastian Grimberg // E-vector -> L-vector, standard (with offsets)
30cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
31cf8cbdd6SSebastian Grimberg #if !USE_DETERMINISTIC
32cf8cbdd6SSebastian Grimberg extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
33cf8cbdd6SSebastian Grimberg   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
34cf8cbdd6SSebastian Grimberg     const CeedInt ind      = indices[node];
35cf8cbdd6SSebastian Grimberg     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
36cf8cbdd6SSebastian Grimberg     const CeedInt elem     = node / RSTR_ELEM_SIZE;
37cf8cbdd6SSebastian Grimberg 
38cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
39*db2becc9SJeremy L Thompson       atomicAdd(&v[ind + comp * RSTR_COMP_STRIDE], u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE]);
40cf8cbdd6SSebastian Grimberg     }
41cf8cbdd6SSebastian Grimberg   }
42cf8cbdd6SSebastian Grimberg }
43cf8cbdd6SSebastian Grimberg #else
44cf8cbdd6SSebastian Grimberg extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices,
45cf8cbdd6SSebastian Grimberg                                            const CeedInt *__restrict__ t_offsets, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
46cf8cbdd6SSebastian Grimberg   CeedScalar value[RSTR_NUM_COMP];
47cf8cbdd6SSebastian Grimberg 
48cf8cbdd6SSebastian Grimberg   for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) {
49cf8cbdd6SSebastian Grimberg     const CeedInt ind     = l_vec_indices[i];
50cf8cbdd6SSebastian Grimberg     const CeedInt range_1 = t_offsets[i];
51cf8cbdd6SSebastian Grimberg     const CeedInt range_N = t_offsets[i + 1];
52cf8cbdd6SSebastian Grimberg 
53cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0;
54cf8cbdd6SSebastian Grimberg 
55cf8cbdd6SSebastian Grimberg     for (CeedInt j = range_1; j < range_N; j++) {
56cf8cbdd6SSebastian Grimberg       const CeedInt t_ind    = t_indices[j];
57cf8cbdd6SSebastian Grimberg       const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE;
58cf8cbdd6SSebastian Grimberg       const CeedInt elem     = t_ind / RSTR_ELEM_SIZE;
59cf8cbdd6SSebastian Grimberg 
60cf8cbdd6SSebastian Grimberg       for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
61cf8cbdd6SSebastian Grimberg         value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE];
62cf8cbdd6SSebastian Grimberg       }
63cf8cbdd6SSebastian Grimberg     }
64cf8cbdd6SSebastian Grimberg 
65cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp];
66cf8cbdd6SSebastian Grimberg   }
67cf8cbdd6SSebastian Grimberg }
68cf8cbdd6SSebastian Grimberg #endif
69