xref: /libCEED/include/ceed/jit-source/cuda/cuda-ref-restriction-oriented.h (revision cf8cbdd67b26059bcd4f184d0db9bf05297bc21d)
1*cf8cbdd6SSebastian Grimberg // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2*cf8cbdd6SSebastian Grimberg // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3*cf8cbdd6SSebastian Grimberg //
4*cf8cbdd6SSebastian Grimberg // SPDX-License-Identifier: BSD-2-Clause
5*cf8cbdd6SSebastian Grimberg //
6*cf8cbdd6SSebastian Grimberg // This file is part of CEED:  http://github.com/ceed
7*cf8cbdd6SSebastian Grimberg 
8*cf8cbdd6SSebastian Grimberg /// @file
9*cf8cbdd6SSebastian Grimberg /// Internal header for CUDA oriented element restriction kernels
10*cf8cbdd6SSebastian Grimberg #ifndef CEED_CUDA_REF_RESTRICTION_ORIENTED_H
11*cf8cbdd6SSebastian Grimberg #define CEED_CUDA_REF_RESTRICTION_ORIENTED_H
12*cf8cbdd6SSebastian Grimberg 
13*cf8cbdd6SSebastian Grimberg #include <ceed.h>
14*cf8cbdd6SSebastian Grimberg 
15*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
16*cf8cbdd6SSebastian Grimberg // L-vector -> E-vector, oriented
17*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
18*cf8cbdd6SSebastian Grimberg extern "C" __global__ void OrientedNoTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients,
19*cf8cbdd6SSebastian Grimberg                                                const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
20*cf8cbdd6SSebastian Grimberg   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
21*cf8cbdd6SSebastian Grimberg     const CeedInt ind      = indices[node];
22*cf8cbdd6SSebastian Grimberg     const bool    orient   = orients[node];
23*cf8cbdd6SSebastian Grimberg     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
24*cf8cbdd6SSebastian Grimberg     const CeedInt elem     = node / RSTR_ELEM_SIZE;
25*cf8cbdd6SSebastian Grimberg 
26*cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
27*cf8cbdd6SSebastian Grimberg       v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE] * (orient ? -1.0 : 1.0);
28*cf8cbdd6SSebastian Grimberg     }
29*cf8cbdd6SSebastian Grimberg   }
30*cf8cbdd6SSebastian Grimberg }
31*cf8cbdd6SSebastian Grimberg 
32*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
33*cf8cbdd6SSebastian Grimberg // E-vector -> L-vector, oriented
34*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
35*cf8cbdd6SSebastian Grimberg #if !USE_DETERMINISTIC
36*cf8cbdd6SSebastian Grimberg extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients, const CeedScalar *__restrict__ u,
37*cf8cbdd6SSebastian Grimberg                                              CeedScalar *__restrict__ v) {
38*cf8cbdd6SSebastian Grimberg   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
39*cf8cbdd6SSebastian Grimberg     const CeedInt ind      = indices[node];
40*cf8cbdd6SSebastian Grimberg     const bool    orient   = orients[node];
41*cf8cbdd6SSebastian Grimberg     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
42*cf8cbdd6SSebastian Grimberg     const CeedInt elem     = node / RSTR_ELEM_SIZE;
43*cf8cbdd6SSebastian Grimberg 
44*cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
45*cf8cbdd6SSebastian Grimberg       atomicAdd(v + ind + comp * RSTR_COMP_STRIDE,
46*cf8cbdd6SSebastian Grimberg                 u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0));
47*cf8cbdd6SSebastian Grimberg     }
48*cf8cbdd6SSebastian Grimberg   }
49*cf8cbdd6SSebastian Grimberg }
50*cf8cbdd6SSebastian Grimberg #else
51*cf8cbdd6SSebastian Grimberg extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices,
52*cf8cbdd6SSebastian Grimberg                                              const CeedInt *__restrict__ t_offsets, const bool *__restrict__ orients,
53*cf8cbdd6SSebastian Grimberg                                              const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
54*cf8cbdd6SSebastian Grimberg   CeedScalar value[RSTR_NUM_COMP];
55*cf8cbdd6SSebastian Grimberg 
56*cf8cbdd6SSebastian Grimberg   for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) {
57*cf8cbdd6SSebastian Grimberg     const CeedInt ind     = l_vec_indices[i];
58*cf8cbdd6SSebastian Grimberg     const CeedInt range_1 = t_offsets[i];
59*cf8cbdd6SSebastian Grimberg     const CeedInt range_N = t_offsets[i + 1];
60*cf8cbdd6SSebastian Grimberg 
61*cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0;
62*cf8cbdd6SSebastian Grimberg 
63*cf8cbdd6SSebastian Grimberg     for (CeedInt j = range_1; j < range_N; j++) {
64*cf8cbdd6SSebastian Grimberg       const CeedInt t_ind    = t_indices[j];
65*cf8cbdd6SSebastian Grimberg       const bool    orient   = orients[t_ind];
66*cf8cbdd6SSebastian Grimberg       const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE;
67*cf8cbdd6SSebastian Grimberg       const CeedInt elem     = t_ind / RSTR_ELEM_SIZE;
68*cf8cbdd6SSebastian Grimberg 
69*cf8cbdd6SSebastian Grimberg       for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
70*cf8cbdd6SSebastian Grimberg         value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0);
71*cf8cbdd6SSebastian Grimberg       }
72*cf8cbdd6SSebastian Grimberg     }
73*cf8cbdd6SSebastian Grimberg 
74*cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp];
75*cf8cbdd6SSebastian Grimberg   }
76*cf8cbdd6SSebastian Grimberg }
77*cf8cbdd6SSebastian Grimberg #endif
78*cf8cbdd6SSebastian Grimberg 
79*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
80*cf8cbdd6SSebastian Grimberg 
81*cf8cbdd6SSebastian Grimberg #endif  // CEED_CUDA_REF_RESTRICTION_ORIENTED_H
82