1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 /// @file 9 /// Internal header for CUDA oriented element restriction kernels 10 #ifndef CEED_CUDA_REF_RESTRICTION_ORIENTED_H 11 #define CEED_CUDA_REF_RESTRICTION_ORIENTED_H 12 13 #include <ceed.h> 14 15 //------------------------------------------------------------------------------ 16 // L-vector -> E-vector, oriented 17 //------------------------------------------------------------------------------ 18 extern "C" __global__ void OrientedNoTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients, 19 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 20 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 21 const CeedInt ind = indices[node]; 22 const bool orient = orients[node]; 23 const CeedInt loc_node = node % RSTR_ELEM_SIZE; 24 const CeedInt elem = node / RSTR_ELEM_SIZE; 25 26 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 27 v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE] * (orient ? -1.0 : 1.0); 28 } 29 } 30 } 31 32 //------------------------------------------------------------------------------ 33 // E-vector -> L-vector, oriented 34 //------------------------------------------------------------------------------ 35 #if !USE_DETERMINISTIC 36 extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients, const CeedScalar *__restrict__ u, 37 CeedScalar *__restrict__ v) { 38 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 39 const CeedInt ind = indices[node]; 40 const bool orient = orients[node]; 41 const CeedInt loc_node = node % RSTR_ELEM_SIZE; 42 const CeedInt elem = node / RSTR_ELEM_SIZE; 43 44 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 45 atomicAdd(v + ind + comp * RSTR_COMP_STRIDE, 46 u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0)); 47 } 48 } 49 } 50 #else 51 extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices, 52 const CeedInt *__restrict__ t_offsets, const bool *__restrict__ orients, 53 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 54 CeedScalar value[RSTR_NUM_COMP]; 55 56 for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) { 57 const CeedInt ind = l_vec_indices[i]; 58 const CeedInt range_1 = t_offsets[i]; 59 const CeedInt range_N = t_offsets[i + 1]; 60 61 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0; 62 63 for (CeedInt j = range_1; j < range_N; j++) { 64 const CeedInt t_ind = t_indices[j]; 65 const bool orient = orients[t_ind]; 66 const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE; 67 const CeedInt elem = t_ind / RSTR_ELEM_SIZE; 68 69 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 70 value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0); 71 } 72 } 73 74 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp]; 75 } 76 } 77 #endif 78 79 //------------------------------------------------------------------------------ 80 81 #endif // CEED_CUDA_REF_RESTRICTION_ORIENTED_H 82