1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 /// @file 9 /// Internal header for MAGMA tensor basis interpolation in 1D 10 11 #include "magma-common-tensor.h" 12 13 // macros to abstract access of shared memory and reg. file 14 #define sT(i, j) sT[(j) * P + (i)] 15 16 //////////////////////////////////////////////////////////////////////////////// 17 // interp basis action (1D) 18 template <typename T, int DIM, int NUM_COMP, int P, int Q> 19 static __device__ __inline__ void magma_interp_1d_device(const T *sT, T *sU[NUM_COMP], T *sV[NUM_COMP], const int tx) { 20 // Assumptions 21 // 1. 1D threads of size max(P,Q) 22 // 2. sU[i] is 1xP: in shared memory 23 // 3. sV[i] is 1xQ: in shared memory 24 // 4. P_roduct per component is one row (1xP) times T matrix (PxQ) => one row (1xQ) 25 // 5. Each thread computes one entry in sV[i] 26 // 6. Must sync before and after call 27 // 7. Note that the layout for U and V is different from 2D/3D problem 28 29 if (tx < Q) { 30 for (int comp = 0; comp < NUM_COMP; comp++) { 31 T rv = 0.0; 32 for (int i = 0; i < P; i++) { 33 rv += sU[comp][i] * sT(i, tx); // sT[tx * P + i]; 34 } 35 sV[comp][tx] = rv; 36 } 37 } 38 } 39 40 //////////////////////////////////////////////////////////////////////////////// 41 extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_1D)) __global__ 42 void magma_interpn_1d_kernel(const CeedScalar *dT, const CeedScalar *dU, const int estrdU, const int cstrdU, CeedScalar *dV, const int estrdV, 43 const int cstrdV, const int nelem) { 44 MAGMA_DEVICE_SHARED(CeedScalar, shared_data) 45 46 const int tx = threadIdx.x; 47 const int ty = threadIdx.y; 48 const int elem_id = (blockIdx.x * blockDim.y) + ty; 49 50 if (elem_id >= nelem) return; 51 52 CeedScalar *sU[BASIS_NUM_COMP]; 53 CeedScalar *sV[BASIS_NUM_COMP]; 54 55 // shift global memory pointers by elem stride 56 dU += elem_id * estrdU; 57 dV += elem_id * estrdV; 58 59 // assign shared memory pointers 60 CeedScalar *sT = (CeedScalar *)shared_data; 61 CeedScalar *sW = sT + BASIS_P * BASIS_Q; 62 sU[0] = sW + ty * BASIS_NUM_COMP * (BASIS_P + BASIS_Q); 63 sV[0] = sU[0] + (BASIS_NUM_COMP * 1 * BASIS_P); 64 for (int comp = 1; comp < BASIS_NUM_COMP; comp++) { 65 sU[comp] = sU[comp - 1] + (1 * BASIS_P); 66 sV[comp] = sV[comp - 1] + (1 * BASIS_Q); 67 } 68 69 // read T 70 if (ty == 0) { 71 read_T_notrans_gm2sm<BASIS_P, BASIS_Q>(tx, dT, sT); 72 } 73 74 // read U 75 read_1d<CeedScalar, BASIS_P, BASIS_NUM_COMP>(dU, cstrdU, sU, tx); 76 77 __syncthreads(); 78 magma_interp_1d_device<CeedScalar, BASIS_DIM, BASIS_NUM_COMP, BASIS_P, BASIS_Q>(sT, sU, sV, tx); 79 __syncthreads(); 80 81 // write V 82 write_1d<CeedScalar, BASIS_Q, BASIS_NUM_COMP>(sV, dV, cstrdV, tx); 83 } 84 85 //////////////////////////////////////////////////////////////////////////////// 86 extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_1D)) __global__ 87 void magma_interpt_1d_kernel(const CeedScalar *dT, const CeedScalar *dU, const int estrdU, const int cstrdU, CeedScalar *dV, const int estrdV, 88 const int cstrdV, const int nelem) { 89 MAGMA_DEVICE_SHARED(CeedScalar, shared_data) 90 91 const int tx = threadIdx.x; 92 const int ty = threadIdx.y; 93 const int elem_id = (blockIdx.x * blockDim.y) + ty; 94 95 if (elem_id >= nelem) return; 96 97 CeedScalar *sU[BASIS_NUM_COMP]; 98 CeedScalar *sV[BASIS_NUM_COMP]; 99 100 // shift global memory pointers by elem stride 101 dU += elem_id * estrdU; 102 dV += elem_id * estrdV; 103 104 // assign shared memory pointers 105 CeedScalar *sT = (CeedScalar *)shared_data; 106 CeedScalar *sW = sT + BASIS_Q * BASIS_P; 107 sU[0] = sW + ty * BASIS_NUM_COMP * (BASIS_Q + BASIS_P); 108 sV[0] = sU[0] + (BASIS_NUM_COMP * 1 * BASIS_Q); 109 for (int comp = 1; comp < BASIS_NUM_COMP; comp++) { 110 sU[comp] = sU[comp - 1] + (1 * BASIS_Q); 111 sV[comp] = sV[comp - 1] + (1 * BASIS_P); 112 } 113 114 // read T 115 if (ty == 0) { 116 read_T_trans_gm2sm<BASIS_Q, BASIS_P>(tx, dT, sT); 117 } 118 119 // read U 120 read_1d<CeedScalar, BASIS_Q, BASIS_NUM_COMP>(dU, cstrdU, sU, tx); 121 122 __syncthreads(); 123 magma_interp_1d_device<CeedScalar, BASIS_DIM, BASIS_NUM_COMP, BASIS_Q, BASIS_P>(sT, sU, sV, tx); 124 __syncthreads(); 125 126 // write V 127 write_1d<CeedScalar, BASIS_P, BASIS_NUM_COMP>(sV, dV, cstrdV, tx); 128 } 129 130 //////////////////////////////////////////////////////////////////////////////// 131 extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_1D)) __global__ 132 void magma_interpta_1d_kernel(const CeedScalar *dT, const CeedScalar *dU, const int estrdU, const int cstrdU, CeedScalar *dV, const int estrdV, 133 const int cstrdV, const int nelem) { 134 MAGMA_DEVICE_SHARED(CeedScalar, shared_data) 135 136 const int tx = threadIdx.x; 137 const int ty = threadIdx.y; 138 const int elem_id = (blockIdx.x * blockDim.y) + ty; 139 140 if (elem_id >= nelem) return; 141 142 CeedScalar *sU[BASIS_NUM_COMP]; 143 CeedScalar *sV[BASIS_NUM_COMP]; 144 145 // shift global memory pointers by elem stride 146 dU += elem_id * estrdU; 147 dV += elem_id * estrdV; 148 149 // assign shared memory pointers 150 CeedScalar *sT = (CeedScalar *)shared_data; 151 CeedScalar *sW = sT + BASIS_Q * BASIS_P; 152 sU[0] = sW + ty * BASIS_NUM_COMP * (BASIS_Q + BASIS_P); 153 sV[0] = sU[0] + (BASIS_NUM_COMP * 1 * BASIS_Q); 154 for (int comp = 1; comp < BASIS_NUM_COMP; comp++) { 155 sU[comp] = sU[comp - 1] + (1 * BASIS_Q); 156 sV[comp] = sV[comp - 1] + (1 * BASIS_P); 157 } 158 159 // read T 160 if (ty == 0) { 161 read_T_trans_gm2sm<BASIS_Q, BASIS_P>(tx, dT, sT); 162 } 163 164 // read U 165 read_1d<CeedScalar, BASIS_Q, BASIS_NUM_COMP>(dU, cstrdU, sU, tx); 166 167 __syncthreads(); 168 magma_interp_1d_device<CeedScalar, BASIS_DIM, BASIS_NUM_COMP, BASIS_Q, BASIS_P>(sT, sU, sV, tx); 169 __syncthreads(); 170 171 // sum into V 172 sum_1d<CeedScalar, BASIS_P, BASIS_NUM_COMP>(sV, dV, cstrdV, tx); 173 } 174