1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 /// @file 9 /// Internal header for CUDA oriented element restriction kernels 10 #include <ceed/types.h> 11 12 //------------------------------------------------------------------------------ 13 // L-vector -> E-vector, oriented 14 //------------------------------------------------------------------------------ 15 extern "C" __global__ void OrientedNoTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients, 16 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 17 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 18 const CeedInt ind = indices[node]; 19 const bool orient = orients[node]; 20 const CeedInt loc_node = node % RSTR_ELEM_SIZE; 21 const CeedInt elem = node / RSTR_ELEM_SIZE; 22 23 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 24 v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE] * (orient ? -1.0 : 1.0); 25 } 26 } 27 } 28 29 //------------------------------------------------------------------------------ 30 // E-vector -> L-vector, oriented 31 //------------------------------------------------------------------------------ 32 #if !USE_DETERMINISTIC 33 extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients, const CeedScalar *__restrict__ u, 34 CeedScalar *__restrict__ v) { 35 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 36 const CeedInt ind = indices[node]; 37 const bool orient = orients[node]; 38 const CeedInt loc_node = node % RSTR_ELEM_SIZE; 39 const CeedInt elem = node / RSTR_ELEM_SIZE; 40 41 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 42 atomicAdd(&v[ind + comp * RSTR_COMP_STRIDE], 43 u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0)); 44 } 45 } 46 } 47 #else 48 extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices, 49 const CeedInt *__restrict__ t_offsets, const bool *__restrict__ orients, 50 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 51 CeedScalar value[RSTR_NUM_COMP]; 52 53 for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) { 54 const CeedInt ind = l_vec_indices[i]; 55 const CeedInt range_1 = t_offsets[i]; 56 const CeedInt range_N = t_offsets[i + 1]; 57 58 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0; 59 60 for (CeedInt j = range_1; j < range_N; j++) { 61 const CeedInt t_ind = t_indices[j]; 62 const bool orient = orients[t_ind]; 63 const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE; 64 const CeedInt elem = t_ind / RSTR_ELEM_SIZE; 65 66 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 67 value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0); 68 } 69 } 70 71 for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp]; 72 } 73 } 74 #endif 75