1*0b63de31SJeremy L Thompson // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2*0b63de31SJeremy L Thompson // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3*0b63de31SJeremy L Thompson // 4*0b63de31SJeremy L Thompson // SPDX-License-Identifier: BSD-2-Clause 5*0b63de31SJeremy L Thompson // 6*0b63de31SJeremy L Thompson // This file is part of CEED: http://github.com/ceed 7*0b63de31SJeremy L Thompson 8*0b63de31SJeremy L Thompson /// @file 9*0b63de31SJeremy L Thompson /// Internal header for CUDA offset element restriction kernels 10*0b63de31SJeremy L Thompson 11*0b63de31SJeremy L Thompson #include <ceed.h> 12*0b63de31SJeremy L Thompson 13*0b63de31SJeremy L Thompson //------------------------------------------------------------------------------ 14*0b63de31SJeremy L Thompson // E-vector -> L-vector, standard (with offsets) 15*0b63de31SJeremy L Thompson //------------------------------------------------------------------------------ 16*0b63de31SJeremy L Thompson #if !USE_DETERMINISTIC 17*0b63de31SJeremy L Thompson extern "C" __global__ void AtPointsTranspose(const CeedInt *__restrict__ indices, const CeedInt *__restrict__ points_per_elem, 18*0b63de31SJeremy L Thompson const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 19*0b63de31SJeremy L Thompson for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) { 20*0b63de31SJeremy L Thompson const CeedInt ind = indices[node]; 21*0b63de31SJeremy L Thompson const CeedInt loc_node = node % RSTR_ELEM_SIZE; 22*0b63de31SJeremy L Thompson const CeedInt elem = node / RSTR_ELEM_SIZE; 23*0b63de31SJeremy L Thompson 24*0b63de31SJeremy L Thompson if (loc_node >= points_per_elem[elem]) continue; 25*0b63de31SJeremy L Thompson for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 26*0b63de31SJeremy L Thompson atomicAdd(v + ind + comp * RSTR_COMP_STRIDE, u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE]); 27*0b63de31SJeremy L Thompson } 28*0b63de31SJeremy L Thompson } 29*0b63de31SJeremy L Thompson } 30*0b63de31SJeremy L Thompson #else 31*0b63de31SJeremy L Thompson extern "C" __global__ void AtPointsTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices, 32*0b63de31SJeremy L Thompson const CeedInt *__restrict__ points_per_elem, const CeedInt *__restrict__ t_offsets, 33*0b63de31SJeremy L Thompson const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 34*0b63de31SJeremy L Thompson CeedScalar value[RSTR_NUM_COMP]; 35*0b63de31SJeremy L Thompson 36*0b63de31SJeremy L Thompson for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) { 37*0b63de31SJeremy L Thompson const CeedInt ind = l_vec_indices[i]; 38*0b63de31SJeremy L Thompson const CeedInt range_1 = t_offsets[i]; 39*0b63de31SJeremy L Thompson const CeedInt range_N = t_offsets[i + 1]; 40*0b63de31SJeremy L Thompson 41*0b63de31SJeremy L Thompson for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0; 42*0b63de31SJeremy L Thompson 43*0b63de31SJeremy L Thompson for (CeedInt j = range_1; j < range_N; j++) { 44*0b63de31SJeremy L Thompson const CeedInt t_ind = t_indices[j]; 45*0b63de31SJeremy L Thompson const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE; 46*0b63de31SJeremy L Thompson const CeedInt elem = t_ind / RSTR_ELEM_SIZE; 47*0b63de31SJeremy L Thompson 48*0b63de31SJeremy L Thompson if (loc_node >= points_per_elem[elem]) continue; 49*0b63de31SJeremy L Thompson for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) { 50*0b63de31SJeremy L Thompson value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE]; 51*0b63de31SJeremy L Thompson } 52*0b63de31SJeremy L Thompson } 53*0b63de31SJeremy L Thompson 54*0b63de31SJeremy L Thompson for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp]; 55*0b63de31SJeremy L Thompson } 56*0b63de31SJeremy L Thompson } 57*0b63de31SJeremy L Thompson #endif 58