15aed82e4SJeremy L Thompson // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2cf8cbdd6SSebastian Grimberg // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3cf8cbdd6SSebastian Grimberg // 4cf8cbdd6SSebastian Grimberg // SPDX-License-Identifier: BSD-2-Clause 5cf8cbdd6SSebastian Grimberg // 6cf8cbdd6SSebastian Grimberg // This file is part of CEED: http://github.com/ceed 7cf8cbdd6SSebastian Grimberg 8cf8cbdd6SSebastian Grimberg /// @file 9cf8cbdd6SSebastian Grimberg /// Internal header for CUDA oriented element restriction kernels 10cf8cbdd6SSebastian Grimberg 11cf8cbdd6SSebastian Grimberg #include <ceed.h> 12cf8cbdd6SSebastian Grimberg 13cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------ 14cf8cbdd6SSebastian Grimberg // L-vector -> E-vector, oriented 15cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------ 16cf8cbdd6SSebastian Grimberg extern "C" __global__ void OrientedNoTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients, 17cf8cbdd6SSebastian Grimberg const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 18cf8cbdd6SSebastian Grimberg for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 19cf8cbdd6SSebastian Grimberg const CeedInt ind = indices[node]; 20cf8cbdd6SSebastian Grimberg const bool orient = orients[node]; 21cf8cbdd6SSebastian Grimberg const CeedInt loc_node = node % RSTR_ELEM_SIZE; 22cf8cbdd6SSebastian Grimberg const CeedInt elem = node / RSTR_ELEM_SIZE; 23cf8cbdd6SSebastian Grimberg 24cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 25cf8cbdd6SSebastian Grimberg v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE] * (orient ? -1.0 : 1.0); 26cf8cbdd6SSebastian Grimberg } 27cf8cbdd6SSebastian Grimberg } 28cf8cbdd6SSebastian Grimberg } 29cf8cbdd6SSebastian Grimberg 30cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------ 31cf8cbdd6SSebastian Grimberg // E-vector -> L-vector, oriented 32cf8cbdd6SSebastian Grimberg //------------------------------------------------------------------------------ 33cf8cbdd6SSebastian Grimberg #if !USE_DETERMINISTIC 34cf8cbdd6SSebastian Grimberg extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ indices, const bool *__restrict__ orients, const CeedScalar *__restrict__ u, 35cf8cbdd6SSebastian Grimberg CeedScalar *__restrict__ v) { 36cf8cbdd6SSebastian Grimberg for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 37cf8cbdd6SSebastian Grimberg const CeedInt ind = indices[node]; 38cf8cbdd6SSebastian Grimberg const bool orient = orients[node]; 39cf8cbdd6SSebastian Grimberg const CeedInt loc_node = node % RSTR_ELEM_SIZE; 40cf8cbdd6SSebastian Grimberg const CeedInt elem = node / RSTR_ELEM_SIZE; 41cf8cbdd6SSebastian Grimberg 42cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 43*db2becc9SJeremy L Thompson atomicAdd(&v[ind + comp * RSTR_COMP_STRIDE], 44cf8cbdd6SSebastian Grimberg u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0)); 45cf8cbdd6SSebastian Grimberg } 46cf8cbdd6SSebastian Grimberg } 47cf8cbdd6SSebastian Grimberg } 48cf8cbdd6SSebastian Grimberg #else 49cf8cbdd6SSebastian Grimberg extern "C" __global__ void OrientedTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices, 50cf8cbdd6SSebastian Grimberg const CeedInt *__restrict__ t_offsets, const bool *__restrict__ orients, 51cf8cbdd6SSebastian Grimberg const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 52cf8cbdd6SSebastian Grimberg CeedScalar value[RSTR_NUM_COMP]; 53cf8cbdd6SSebastian Grimberg 54cf8cbdd6SSebastian Grimberg for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) { 55cf8cbdd6SSebastian Grimberg const CeedInt ind = l_vec_indices[i]; 56cf8cbdd6SSebastian Grimberg const CeedInt range_1 = t_offsets[i]; 57cf8cbdd6SSebastian Grimberg const CeedInt range_N = t_offsets[i + 1]; 58cf8cbdd6SSebastian Grimberg 59cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0; 60cf8cbdd6SSebastian Grimberg 61cf8cbdd6SSebastian Grimberg for (CeedInt j = range_1; j < range_N; j++) { 62cf8cbdd6SSebastian Grimberg const CeedInt t_ind = t_indices[j]; 63cf8cbdd6SSebastian Grimberg const bool orient = orients[t_ind]; 64cf8cbdd6SSebastian Grimberg const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE; 65cf8cbdd6SSebastian Grimberg const CeedInt elem = t_ind / RSTR_ELEM_SIZE; 66cf8cbdd6SSebastian Grimberg 67cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 68cf8cbdd6SSebastian Grimberg value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] * (orient ? -1.0 : 1.0); 69cf8cbdd6SSebastian Grimberg } 70cf8cbdd6SSebastian Grimberg } 71cf8cbdd6SSebastian Grimberg 72cf8cbdd6SSebastian Grimberg for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp]; 73cf8cbdd6SSebastian Grimberg } 74cf8cbdd6SSebastian Grimberg } 75cf8cbdd6SSebastian Grimberg #endif 76