1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 /// @file 9 /// Internal header for CUDA non-tensor product basis 10 11 #include <ceed.h> 12 13 #include "cuda-ref-basis-nontensor-templates.h" 14 15 //------------------------------------------------------------------------------ 16 // Non-Tensor Basis Kernels 17 //------------------------------------------------------------------------------ 18 19 //------------------------------------------------------------------------------ 20 // Interp 21 //------------------------------------------------------------------------------ 22 extern "C" __global__ void Interp(const CeedInt num_elem, const CeedScalar *__restrict__ d_B, const CeedScalar *__restrict__ d_U, 23 CeedScalar *__restrict__ d_V) { 24 for (CeedInt elem = blockIdx.x * blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x * blockDim.z) { 25 Contract<BASIS_NUM_COMP, BASIS_Q_COMP_INTERP, BASIS_P, BASIS_Q>(elem, BASIS_P, BASIS_Q, BASIS_P * num_elem, BASIS_Q * num_elem, 26 BASIS_NUM_COMP * BASIS_Q * num_elem, d_B, d_U, d_V); 27 } 28 } 29 30 extern "C" __global__ void InterpTranspose(const CeedInt num_elem, const CeedScalar *__restrict__ d_B, const CeedScalar *__restrict__ d_U, 31 CeedScalar *__restrict__ d_V) { 32 for (CeedInt elem = blockIdx.x * blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x * blockDim.z) { 33 ContractTranspose<BASIS_NUM_COMP, BASIS_Q_COMP_INTERP, BASIS_P, BASIS_Q>(elem, BASIS_Q, BASIS_P, BASIS_Q * num_elem, BASIS_P * num_elem, 34 BASIS_NUM_COMP * BASIS_Q * num_elem, d_B, d_U, d_V); 35 } 36 } 37 38 //------------------------------------------------------------------------------ 39 // Deriv 40 //------------------------------------------------------------------------------ 41 extern "C" __global__ void Deriv(const CeedInt num_elem, const CeedScalar *__restrict__ d_B, const CeedScalar *__restrict__ d_U, 42 CeedScalar *__restrict__ d_V) { 43 for (CeedInt elem = blockIdx.x * blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x * blockDim.z) { 44 Contract<BASIS_NUM_COMP, BASIS_Q_COMP_DERIV, BASIS_P, BASIS_Q>(elem, BASIS_P, BASIS_Q, BASIS_P * num_elem, BASIS_Q * num_elem, 45 BASIS_NUM_COMP * BASIS_Q * num_elem, d_B, d_U, d_V); 46 } 47 } 48 49 extern "C" __global__ void DerivTranspose(const CeedInt num_elem, const CeedScalar *__restrict__ d_B, const CeedScalar *__restrict__ d_U, 50 CeedScalar *__restrict__ d_V) { 51 for (CeedInt elem = blockIdx.x * blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x * blockDim.z) { 52 ContractTranspose<BASIS_NUM_COMP, BASIS_Q_COMP_DERIV, BASIS_P, BASIS_Q>(elem, BASIS_Q, BASIS_P, BASIS_Q * num_elem, BASIS_P * num_elem, 53 BASIS_NUM_COMP * BASIS_Q * num_elem, d_B, d_U, d_V); 54 } 55 } 56 57 //------------------------------------------------------------------------------ 58 // Weight 59 //------------------------------------------------------------------------------ 60 extern "C" __global__ void Weight(const CeedInt num_elem, const CeedScalar *__restrict__ q_weight, CeedScalar *__restrict__ d_V) { 61 const CeedInt t_id = threadIdx.x; 62 // TODO load q_weight in shared memory if blockDim.z > 1? 63 64 for (CeedInt elem = blockIdx.x * blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x * blockDim.z) { 65 d_V[elem * BASIS_Q + t_id] = q_weight[t_id]; 66 } 67 } 68