xref: /libCEED/include/ceed/jit-source/cuda/cuda-ref-restriction-oriented.h (revision d06a2c125c427f0694fb06710c5ee86c8219d1b1)
1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for CUDA oriented element restriction kernels
10 
11 #include <ceed.h>
12 
13 //------------------------------------------------------------------------------
14 // L-vector -> E-vector, oriented
15 //------------------------------------------------------------------------------
16 extern "C" __global__ void OrientedNoTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients,
17                                                const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
18   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
19     const CeedInt ind      = indices[node];
20     const bool    orient   = orients[node];
21     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
22     const CeedInt elem     = node / RSTR_ELEM_SIZE;
23 
24     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
25       v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE] * (orient ? -1.0 : 1.0);
26     }
27   }
28 }
29 
30 //------------------------------------------------------------------------------
31 // E-vector -> L-vector, oriented
32 //------------------------------------------------------------------------------
33 #if !USE_DETERMINISTIC
34 extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients, const CeedScalar *__restrict__ u,
35                                              CeedScalar *__restrict__ v) {
36   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
37     const CeedInt ind      = indices[node];
38     const bool    orient   = orients[node];
39     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
40     const CeedInt elem     = node / RSTR_ELEM_SIZE;
41 
42     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
43       atomicAdd(&v[ind + comp * RSTR_COMP_STRIDE],
44                 u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0));
45     }
46   }
47 }
48 #else
49 extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices,
50                                              const CeedInt *__restrict__ t_offsets, const bool *__restrict__ orients,
51                                              const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
52   CeedScalar value[RSTR_NUM_COMP];
53 
54   for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) {
55     const CeedInt ind     = l_vec_indices[i];
56     const CeedInt range_1 = t_offsets[i];
57     const CeedInt range_N = t_offsets[i + 1];
58 
59     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0;
60 
61     for (CeedInt j = range_1; j < range_N; j++) {
62       const CeedInt t_ind    = t_indices[j];
63       const bool    orient   = orients[t_ind];
64       const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE;
65       const CeedInt elem     = t_ind / RSTR_ELEM_SIZE;
66 
67       for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
68         value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0);
69       }
70     }
71 
72     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp];
73   }
74 }
75 #endif
76