1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 /// @file 9 /// Internal header for CUDA offset element restriction kernels 10 #ifndef CEED_CUDA_REF_RESTRICTION_OFFSET_H 11 #define CEED_CUDA_REF_RESTRICTION_OFFSET_H 12 13 #include <ceed.h> 14 15 //------------------------------------------------------------------------------ 16 // L-vector -> E-vector, standard (with offsets) 17 //------------------------------------------------------------------------------ 18 extern "C" __global__ void OffsetNoTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 19 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 20 const CeedInt ind = indices[node]; 21 const CeedInt loc_node = node % RSTR_ELEM_SIZE; 22 const CeedInt elem = node / RSTR_ELEM_SIZE; 23 24 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 25 v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE]; 26 } 27 } 28 } 29 30 //------------------------------------------------------------------------------ 31 // E-vector -> L-vector, standard (with offsets) 32 //------------------------------------------------------------------------------ 33 #if !USE_DETERMINISTIC 34 extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 35 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 36 const CeedInt ind = indices[node]; 37 const CeedInt loc_node = node % RSTR_ELEM_SIZE; 38 const CeedInt elem = node / RSTR_ELEM_SIZE; 39 40 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 41 atomicAdd(v + ind + comp * RSTR_COMP_STRIDE, u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE]); 42 } 43 } 44 } 45 #else 46 extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices, 47 const CeedInt *__restrict__ t_offsets, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 48 CeedScalar value[RSTR_NUM_COMP]; 49 50 for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) { 51 const CeedInt ind = l_vec_indices[i]; 52 const CeedInt range_1 = t_offsets[i]; 53 const CeedInt range_N = t_offsets[i + 1]; 54 55 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0; 56 57 for (CeedInt j = range_1; j < range_N; j++) { 58 const CeedInt t_ind = t_indices[j]; 59 const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE; 60 const CeedInt elem = t_ind / RSTR_ELEM_SIZE; 61 62 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 63 value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE]; 64 } 65 } 66 67 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp]; 68 } 69 } 70 #endif 71 72 //------------------------------------------------------------------------------ 73 74 #endif // CEED_CUDA_REF_RESTRICTION_OFFSET_H 75