xref: /libCEED/include/ceed/jit-source/cuda/cuda-ref-restriction-strided.h (revision cf8cbdd67b26059bcd4f184d0db9bf05297bc21d)
1*cf8cbdd6SSebastian Grimberg // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2*cf8cbdd6SSebastian Grimberg // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3*cf8cbdd6SSebastian Grimberg //
4*cf8cbdd6SSebastian Grimberg // SPDX-License-Identifier: BSD-2-Clause
5*cf8cbdd6SSebastian Grimberg //
6*cf8cbdd6SSebastian Grimberg // This file is part of CEED:  http://github.com/ceed
7*cf8cbdd6SSebastian Grimberg 
8*cf8cbdd6SSebastian Grimberg /// @file
9*cf8cbdd6SSebastian Grimberg /// Internal header for CUDA strided element restriction kernels
10*cf8cbdd6SSebastian Grimberg #ifndef CEED_CUDA_REF_RESTRICTION_STRIDED_H
11*cf8cbdd6SSebastian Grimberg #define CEED_CUDA_REF_RESTRICTION_STRIDED_H
12*cf8cbdd6SSebastian Grimberg 
13*cf8cbdd6SSebastian Grimberg #include <ceed.h>
14*cf8cbdd6SSebastian Grimberg 
15*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
16*cf8cbdd6SSebastian Grimberg // L-vector -> E-vector, strided
17*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
18*cf8cbdd6SSebastian Grimberg extern "C" __global__ void StridedNoTranspose(const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
19*cf8cbdd6SSebastian Grimberg   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
20*cf8cbdd6SSebastian Grimberg     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
21*cf8cbdd6SSebastian Grimberg     const CeedInt elem     = node / RSTR_ELEM_SIZE;
22*cf8cbdd6SSebastian Grimberg 
23*cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
24*cf8cbdd6SSebastian Grimberg       v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] =
25*cf8cbdd6SSebastian Grimberg           u[loc_node * RSTR_STRIDE_NODES + comp * RSTR_STRIDE_COMP + elem * RSTR_STRIDE_ELEM];
26*cf8cbdd6SSebastian Grimberg     }
27*cf8cbdd6SSebastian Grimberg   }
28*cf8cbdd6SSebastian Grimberg }
29*cf8cbdd6SSebastian Grimberg 
30*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
31*cf8cbdd6SSebastian Grimberg // E-vector -> L-vector, strided
32*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
33*cf8cbdd6SSebastian Grimberg extern "C" __global__ void StridedTranspose(const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
34*cf8cbdd6SSebastian Grimberg   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
35*cf8cbdd6SSebastian Grimberg     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
36*cf8cbdd6SSebastian Grimberg     const CeedInt elem     = node / RSTR_ELEM_SIZE;
37*cf8cbdd6SSebastian Grimberg 
38*cf8cbdd6SSebastian Grimberg     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
39*cf8cbdd6SSebastian Grimberg       v[loc_node * RSTR_STRIDE_NODES + comp * RSTR_STRIDE_COMP + elem * RSTR_STRIDE_ELEM] +=
40*cf8cbdd6SSebastian Grimberg           u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE];
41*cf8cbdd6SSebastian Grimberg     }
42*cf8cbdd6SSebastian Grimberg   }
43*cf8cbdd6SSebastian Grimberg }
44*cf8cbdd6SSebastian Grimberg 
45*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------
46*cf8cbdd6SSebastian Grimberg 
47*cf8cbdd6SSebastian Grimberg #endif  // CEED_CUDA_REF_RESTRICTION_STRIDED_H
48