xref: /libCEED/include/ceed/jit-source/magma/magma-basis-interp-1d.h (revision 715f9ba89a309f24226005ca1fbb9f59fe9eac68)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for MAGMA tensor basis interpolation in 1D
10 #ifndef CEED_MAGMA_BASIS_INTERP_1D_H
11 #define CEED_MAGMA_BASIS_INTERP_1D_H
12 
13 #include "magma-common-tensor.h"
14 
15 // macros to abstract access of shared memory and reg. file
16 #define sT(i, j) sT[(j)*P + (i)]
17 
18 //////////////////////////////////////////////////////////////////////////////////////////
19 // interp basis action (1D)
20 template <typename T, int DIM, int NUM_COMP, int P, int Q>
21 static __device__ __inline__ void magma_interp_1d_device(const T *sT, magma_trans_t transT, T *sU[NUM_COMP], T *sV[NUM_COMP], const int tx) {
22   // Assumptions
23   // 1. 1D threads of size max(P,Q)
24   // 2. sU[i] is 1xP: in shared memory
25   // 3. sV[i] is 1xQ: in shared memory
26   // 4. P_roduct per component is one row (1xP) times T matrix (PxQ) => one row (1xQ)
27   // 5. Each thread computes one entry in sV[i]
28   // 6. Must sync before and after call
29   // 7. Note that the layout for U and V is different from 2D/3D problem
30 
31   T rv;
32   if (tx < Q) {
33     for (int comp = 0; comp < NUM_COMP; comp++) {
34       rv = (transT == MagmaTrans) ? sV[comp][tx] : 0.0;
35       for (int i = 0; i < P; i++) {
36         rv += sU[comp][i] * sT(i, tx);  // sT[tx * P + i];
37       }
38       sV[comp][tx] = rv;
39     }
40   }
41 }
42 
43 //////////////////////////////////////////////////////////////////////////////////////////
44 extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_1D)) __global__
45     void magma_interpn_1d_kernel(const CeedScalar *dT, const CeedScalar *dU, const int estrdU, const int cstrdU, CeedScalar *dV, const int estrdV,
46                                  const int cstrdV, const int nelem) {
47   MAGMA_DEVICE_SHARED(CeedScalar, shared_data)
48 
49   const int     tx      = threadIdx.x;
50   const int     ty      = threadIdx.y;
51   const int     elem_id = (blockIdx.x * blockDim.y) + ty;
52   magma_trans_t transT  = MagmaNoTrans;
53 
54   if (elem_id >= nelem) return;
55 
56   CeedScalar *sU[BASIS_NUM_COMP];
57   CeedScalar *sV[BASIS_NUM_COMP];
58 
59   // shift global memory pointers by elem stride
60   dU += elem_id * estrdU;
61   dV += elem_id * estrdV;
62 
63   // assign shared memory pointers
64   CeedScalar *sT = (CeedScalar *)shared_data;
65   CeedScalar *sW = sT + BASIS_P * BASIS_Q;
66   sU[0]          = sW + ty * BASIS_NUM_COMP * (BASIS_P + BASIS_Q);
67   sV[0]          = sU[0] + (BASIS_NUM_COMP * 1 * BASIS_P);
68   for (int comp = 1; comp < BASIS_NUM_COMP; comp++) {
69     sU[comp] = sU[comp - 1] + (1 * BASIS_P);
70     sV[comp] = sV[comp - 1] + (1 * BASIS_Q);
71   }
72 
73   // read T
74   if (ty == 0) {
75     dread_T_gm2sm<BASIS_P, BASIS_Q>(tx, transT, dT, sT);
76   }
77 
78   // read U
79   read_1d<CeedScalar, BASIS_P, BASIS_NUM_COMP>(dU, cstrdU, sU, tx);
80 
81   __syncthreads();
82   magma_interp_1d_device<CeedScalar, BASIS_DIM, BASIS_NUM_COMP, BASIS_P, BASIS_Q>(sT, transT, sU, sV, tx);
83   __syncthreads();
84 
85   // write V
86   write_1d<CeedScalar, BASIS_Q, BASIS_NUM_COMP>(sV, dV, cstrdV, tx);
87 }
88 
89 //////////////////////////////////////////////////////////////////////////////////////////
90 extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_1D)) __global__
91     void magma_interpt_1d_kernel(const CeedScalar *dT, const CeedScalar *dU, const int estrdU, const int cstrdU, CeedScalar *dV, const int estrdV,
92                                  const int cstrdV, const int nelem) {
93   MAGMA_DEVICE_SHARED(CeedScalar, shared_data)
94 
95   const int     tx      = threadIdx.x;
96   const int     ty      = threadIdx.y;
97   const int     elem_id = (blockIdx.x * blockDim.y) + ty;
98   magma_trans_t transT  = MagmaTrans;
99 
100   if (elem_id >= nelem) return;
101 
102   CeedScalar *sU[BASIS_NUM_COMP];
103   CeedScalar *sV[BASIS_NUM_COMP];
104 
105   // shift global memory pointers by elem stride
106   dU += elem_id * estrdU;
107   dV += elem_id * estrdV;
108 
109   // assign shared memory pointers
110   CeedScalar *sT = (CeedScalar *)shared_data;
111   CeedScalar *sW = sT + BASIS_Q * BASIS_P;
112   sU[0]          = sW + ty * BASIS_NUM_COMP * (BASIS_Q + BASIS_P);
113   sV[0]          = sU[0] + (BASIS_NUM_COMP * 1 * BASIS_Q);
114   for (int comp = 1; comp < BASIS_NUM_COMP; comp++) {
115     sU[comp] = sU[comp - 1] + (1 * BASIS_Q);
116     sV[comp] = sV[comp - 1] + (1 * BASIS_P);
117   }
118 
119   // read T
120   if (ty == 0) {
121     dread_T_gm2sm<BASIS_Q, BASIS_P>(tx, transT, dT, sT);
122   }
123 
124   // read U
125   read_1d<CeedScalar, BASIS_Q, BASIS_NUM_COMP>(dU, cstrdU, sU, tx);
126 
127   // read V
128   read_1d<CeedScalar, BASIS_P, BASIS_NUM_COMP>(dV, cstrdV, sV, tx);
129 
130   __syncthreads();
131   magma_interp_1d_device<CeedScalar, BASIS_DIM, BASIS_NUM_COMP, BASIS_Q, BASIS_P>(sT, transT, sU, sV, tx);
132   __syncthreads();
133 
134   // write V
135   write_1d<CeedScalar, BASIS_P, BASIS_NUM_COMP>(sV, dV, cstrdV, tx);
136 }
137 
138 #endif  // CEED_MAGMA_BASIS_INTERP_1D_H
139