1*cf8cbdd6SSebastian Grimberg // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2*cf8cbdd6SSebastian Grimberg // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3*cf8cbdd6SSebastian Grimberg // 4*cf8cbdd6SSebastian Grimberg // SPDX-License-Identifier: BSD-2-Clause 5*cf8cbdd6SSebastian Grimberg // 6*cf8cbdd6SSebastian Grimberg // This file is part of CEED: http://github.com/ceed 7*cf8cbdd6SSebastian Grimberg 8*cf8cbdd6SSebastian Grimberg /// @file 9*cf8cbdd6SSebastian Grimberg /// Internal header for CUDA offset element restriction kernels 10*cf8cbdd6SSebastian Grimberg #ifndef CEED_CUDA_REF_RESTRICTION_OFFSET_H 11*cf8cbdd6SSebastian Grimberg #define CEED_CUDA_REF_RESTRICTION_OFFSET_H 12*cf8cbdd6SSebastian Grimberg 13*cf8cbdd6SSebastian Grimberg #include <ceed.h> 14*cf8cbdd6SSebastian Grimberg 15*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------ 16*cf8cbdd6SSebastian Grimberg // L-vector -> E-vector, standard (with offsets) 17*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------ 18*cf8cbdd6SSebastian Grimberg extern "C" __global__ void OffsetNoTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 19*cf8cbdd6SSebastian Grimberg for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 20*cf8cbdd6SSebastian Grimberg const CeedInt ind = indices[node]; 21*cf8cbdd6SSebastian Grimberg const CeedInt loc_node = node % RSTR_ELEM_SIZE; 22*cf8cbdd6SSebastian Grimberg const CeedInt elem = node / RSTR_ELEM_SIZE; 23*cf8cbdd6SSebastian Grimberg 24*cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 25*cf8cbdd6SSebastian Grimberg v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE]; 26*cf8cbdd6SSebastian Grimberg } 27*cf8cbdd6SSebastian Grimberg } 28*cf8cbdd6SSebastian Grimberg } 29*cf8cbdd6SSebastian Grimberg 30*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------ 31*cf8cbdd6SSebastian Grimberg // E-vector -> L-vector, standard (with offsets) 32*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------ 33*cf8cbdd6SSebastian Grimberg #if !USE_DETERMINISTIC 34*cf8cbdd6SSebastian Grimberg extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 35*cf8cbdd6SSebastian Grimberg for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 36*cf8cbdd6SSebastian Grimberg const CeedInt ind = indices[node]; 37*cf8cbdd6SSebastian Grimberg const CeedInt loc_node = node % RSTR_ELEM_SIZE; 38*cf8cbdd6SSebastian Grimberg const CeedInt elem = node / RSTR_ELEM_SIZE; 39*cf8cbdd6SSebastian Grimberg 40*cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 41*cf8cbdd6SSebastian Grimberg atomicAdd(v + ind + comp * RSTR_COMP_STRIDE, u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE]); 42*cf8cbdd6SSebastian Grimberg } 43*cf8cbdd6SSebastian Grimberg } 44*cf8cbdd6SSebastian Grimberg } 45*cf8cbdd6SSebastian Grimberg #else 46*cf8cbdd6SSebastian Grimberg extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices, 47*cf8cbdd6SSebastian Grimberg const CeedInt *__restrict__ t_offsets, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 48*cf8cbdd6SSebastian Grimberg CeedScalar value[RSTR_NUM_COMP]; 49*cf8cbdd6SSebastian Grimberg 50*cf8cbdd6SSebastian Grimberg for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) { 51*cf8cbdd6SSebastian Grimberg const CeedInt ind = l_vec_indices[i]; 52*cf8cbdd6SSebastian Grimberg const CeedInt range_1 = t_offsets[i]; 53*cf8cbdd6SSebastian Grimberg const CeedInt range_N = t_offsets[i + 1]; 54*cf8cbdd6SSebastian Grimberg 55*cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0; 56*cf8cbdd6SSebastian Grimberg 57*cf8cbdd6SSebastian Grimberg for (CeedInt j = range_1; j < range_N; j++) { 58*cf8cbdd6SSebastian Grimberg const CeedInt t_ind = t_indices[j]; 59*cf8cbdd6SSebastian Grimberg const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE; 60*cf8cbdd6SSebastian Grimberg const CeedInt elem = t_ind / RSTR_ELEM_SIZE; 61*cf8cbdd6SSebastian Grimberg 62*cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 63*cf8cbdd6SSebastian Grimberg value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE]; 64*cf8cbdd6SSebastian Grimberg } 65*cf8cbdd6SSebastian Grimberg } 66*cf8cbdd6SSebastian Grimberg 67*cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp]; 68*cf8cbdd6SSebastian Grimberg } 69*cf8cbdd6SSebastian Grimberg } 70*cf8cbdd6SSebastian Grimberg #endif 71*cf8cbdd6SSebastian Grimberg 72*cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------ 73*cf8cbdd6SSebastian Grimberg 74*cf8cbdd6SSebastian Grimberg #endif // CEED_CUDA_REF_RESTRICTION_OFFSET_H 75