1*9ba83ac0SJeremy L Thompson // Copyright (c) 2017-2026, Lawrence Livermore National Security, LLC and other CEED contributors.
2f80f4a74SSebastian Grimberg // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3f80f4a74SSebastian Grimberg //
4f80f4a74SSebastian Grimberg // SPDX-License-Identifier: BSD-2-Clause
5f80f4a74SSebastian Grimberg //
6f80f4a74SSebastian Grimberg // This file is part of CEED: http://github.com/ceed
7f80f4a74SSebastian Grimberg
83c1e2affSSebastian Grimberg /// @file
93c1e2affSSebastian Grimberg /// Internal header for MAGMA tensor basis gradient in 1D
103c1e2affSSebastian Grimberg #include "magma-common-tensor.h"
113c1e2affSSebastian Grimberg
12f80f4a74SSebastian Grimberg // macros to abstract access of shared memory and reg. file
133c1e2affSSebastian Grimberg #define sT(i, j) sT[(j) * P + (i)]
14f80f4a74SSebastian Grimberg
159e0c01faSSebastian Grimberg ////////////////////////////////////////////////////////////////////////////////
16f80f4a74SSebastian Grimberg // grad basis action (1D)
173c1e2affSSebastian Grimberg template <typename T, int DIM, int NUM_COMP, int P, int Q>
magma_grad_1d_device(const T * sT,T * sU[NUM_COMP],T * sV[NUM_COMP],const int tx)187132caa0SSebastian Grimberg static __device__ __inline__ void magma_grad_1d_device(const T *sT, T *sU[NUM_COMP], T *sV[NUM_COMP], const int tx) {
19f80f4a74SSebastian Grimberg // Assumptions
203c1e2affSSebastian Grimberg // 1. 1D threads of size max(P,Q)
213c1e2affSSebastian Grimberg // 2. sU[i] is 1xP: in shared memory
223c1e2affSSebastian Grimberg // 3. sV[i] is 1xQ: in shared memory
233c1e2affSSebastian Grimberg // 4. P_roduct per component is one row (1xP) times T matrix (PxQ) => one row (1xQ)
24f80f4a74SSebastian Grimberg // 5. Each thread computes one entry in sV[i]
25f80f4a74SSebastian Grimberg // 6. Must sync before and after call
26f80f4a74SSebastian Grimberg // 7. Note that the layout for U and V is different from 2D/3D problem
27f80f4a74SSebastian Grimberg
283c1e2affSSebastian Grimberg if (tx < Q) {
293c1e2affSSebastian Grimberg for (int comp = 0; comp < NUM_COMP; comp++) {
307132caa0SSebastian Grimberg T rv = 0.0;
313c1e2affSSebastian Grimberg for (int i = 0; i < P; i++) {
323c1e2affSSebastian Grimberg rv += sU[comp][i] * sT(i, tx);
33f80f4a74SSebastian Grimberg }
343c1e2affSSebastian Grimberg sV[comp][tx] = rv;
35f80f4a74SSebastian Grimberg }
36f80f4a74SSebastian Grimberg }
37f80f4a74SSebastian Grimberg }
38f80f4a74SSebastian Grimberg
399e0c01faSSebastian Grimberg ////////////////////////////////////////////////////////////////////////////////
__launch_bounds__(MAGMA_BASIS_BOUNDS (BASIS_MAX_P_Q,MAGMA_MAXTHREADS_1D))403c1e2affSSebastian Grimberg extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_1D)) __global__
41f80f4a74SSebastian Grimberg void magma_gradn_1d_kernel(const CeedScalar *dTinterp, const CeedScalar *dTgrad, const CeedScalar *dU, const int estrdU, const int cstrdU,
42f80f4a74SSebastian Grimberg const int dstrdU, CeedScalar *dV, const int estrdV, const int cstrdV, const int dstrdV, const int nelem) {
43f80f4a74SSebastian Grimberg MAGMA_DEVICE_SHARED(CeedScalar, shared_data)
44f80f4a74SSebastian Grimberg
45f80f4a74SSebastian Grimberg const int tx = threadIdx.x;
46f80f4a74SSebastian Grimberg const int ty = threadIdx.y;
47f80f4a74SSebastian Grimberg const int elem_id = (blockIdx.x * blockDim.y) + ty;
48f80f4a74SSebastian Grimberg
49f80f4a74SSebastian Grimberg if (elem_id >= nelem) return;
50f80f4a74SSebastian Grimberg
513c1e2affSSebastian Grimberg CeedScalar *sU[BASIS_NUM_COMP];
523c1e2affSSebastian Grimberg CeedScalar *sV[BASIS_NUM_COMP];
53f80f4a74SSebastian Grimberg
54f80f4a74SSebastian Grimberg // shift global memory pointers by elem stride
55f80f4a74SSebastian Grimberg dU += elem_id * estrdU;
56f80f4a74SSebastian Grimberg dV += elem_id * estrdV;
57f80f4a74SSebastian Grimberg
58f80f4a74SSebastian Grimberg // assign shared memory pointers
593c1e2affSSebastian Grimberg CeedScalar *sT = (CeedScalar *)shared_data;
603c1e2affSSebastian Grimberg CeedScalar *sW = sT + BASIS_P * BASIS_Q;
613c1e2affSSebastian Grimberg sU[0] = sW + ty * BASIS_NUM_COMP * (BASIS_P + BASIS_Q);
623c1e2affSSebastian Grimberg sV[0] = sU[0] + (BASIS_NUM_COMP * 1 * BASIS_P);
633c1e2affSSebastian Grimberg for (int comp = 1; comp < BASIS_NUM_COMP; comp++) {
643c1e2affSSebastian Grimberg sU[comp] = sU[comp - 1] + (1 * BASIS_P);
653c1e2affSSebastian Grimberg sV[comp] = sV[comp - 1] + (1 * BASIS_Q);
66f80f4a74SSebastian Grimberg }
67f80f4a74SSebastian Grimberg
68f80f4a74SSebastian Grimberg // read T
69f80f4a74SSebastian Grimberg if (ty == 0) {
709e0c01faSSebastian Grimberg read_T_notrans_gm2sm<BASIS_P, BASIS_Q>(tx, dTgrad, sT);
71f80f4a74SSebastian Grimberg }
72f80f4a74SSebastian Grimberg
73f80f4a74SSebastian Grimberg // read U
743c1e2affSSebastian Grimberg read_1d<CeedScalar, BASIS_P, BASIS_NUM_COMP>(dU, cstrdU, sU, tx);
75f80f4a74SSebastian Grimberg
76f80f4a74SSebastian Grimberg __syncthreads();
777132caa0SSebastian Grimberg magma_grad_1d_device<CeedScalar, BASIS_DIM, BASIS_NUM_COMP, BASIS_P, BASIS_Q>(sT, sU, sV, tx);
78f80f4a74SSebastian Grimberg __syncthreads();
79f80f4a74SSebastian Grimberg
80f80f4a74SSebastian Grimberg // write V
813c1e2affSSebastian Grimberg write_1d<CeedScalar, BASIS_Q, BASIS_NUM_COMP>(sV, dV, cstrdV, tx);
82f80f4a74SSebastian Grimberg }
83f80f4a74SSebastian Grimberg
849e0c01faSSebastian Grimberg ////////////////////////////////////////////////////////////////////////////////
__launch_bounds__(MAGMA_BASIS_BOUNDS (BASIS_MAX_P_Q,MAGMA_MAXTHREADS_1D))853c1e2affSSebastian Grimberg extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_1D)) __global__
86f80f4a74SSebastian Grimberg void magma_gradt_1d_kernel(const CeedScalar *dTinterp, const CeedScalar *dTgrad, const CeedScalar *dU, const int estrdU, const int cstrdU,
87f80f4a74SSebastian Grimberg const int dstrdU, CeedScalar *dV, const int estrdV, const int cstrdV, const int dstrdV, const int nelem) {
88f80f4a74SSebastian Grimberg MAGMA_DEVICE_SHARED(CeedScalar, shared_data)
89f80f4a74SSebastian Grimberg
90f80f4a74SSebastian Grimberg const int tx = threadIdx.x;
91f80f4a74SSebastian Grimberg const int ty = threadIdx.y;
92f80f4a74SSebastian Grimberg const int elem_id = (blockIdx.x * blockDim.y) + ty;
93f80f4a74SSebastian Grimberg
94f80f4a74SSebastian Grimberg if (elem_id >= nelem) return;
95f80f4a74SSebastian Grimberg
963c1e2affSSebastian Grimberg CeedScalar *sU[BASIS_NUM_COMP];
973c1e2affSSebastian Grimberg CeedScalar *sV[BASIS_NUM_COMP];
98f80f4a74SSebastian Grimberg
99f80f4a74SSebastian Grimberg // shift global memory pointers by elem stride
100f80f4a74SSebastian Grimberg dU += elem_id * estrdU;
101f80f4a74SSebastian Grimberg dV += elem_id * estrdV;
102f80f4a74SSebastian Grimberg
103f80f4a74SSebastian Grimberg // assign shared memory pointers
1043c1e2affSSebastian Grimberg CeedScalar *sT = (CeedScalar *)shared_data;
1053c1e2affSSebastian Grimberg CeedScalar *sW = sT + BASIS_Q * BASIS_P;
1063c1e2affSSebastian Grimberg sU[0] = sW + ty * BASIS_NUM_COMP * (BASIS_Q + BASIS_P);
1073c1e2affSSebastian Grimberg sV[0] = sU[0] + (BASIS_NUM_COMP * 1 * BASIS_Q);
1083c1e2affSSebastian Grimberg for (int comp = 1; comp < BASIS_NUM_COMP; comp++) {
1093c1e2affSSebastian Grimberg sU[comp] = sU[comp - 1] + (1 * BASIS_Q);
1103c1e2affSSebastian Grimberg sV[comp] = sV[comp - 1] + (1 * BASIS_P);
111f80f4a74SSebastian Grimberg }
112f80f4a74SSebastian Grimberg
113f80f4a74SSebastian Grimberg // read T
114f80f4a74SSebastian Grimberg if (ty == 0) {
1159e0c01faSSebastian Grimberg read_T_trans_gm2sm<BASIS_Q, BASIS_P>(tx, dTgrad, sT);
116f80f4a74SSebastian Grimberg }
117f80f4a74SSebastian Grimberg
118f80f4a74SSebastian Grimberg // read U
1193c1e2affSSebastian Grimberg read_1d<CeedScalar, BASIS_Q, BASIS_NUM_COMP>(dU, cstrdU, sU, tx);
120f80f4a74SSebastian Grimberg
121f80f4a74SSebastian Grimberg __syncthreads();
1227132caa0SSebastian Grimberg magma_grad_1d_device<CeedScalar, BASIS_DIM, BASIS_NUM_COMP, BASIS_Q, BASIS_P>(sT, sU, sV, tx);
123f80f4a74SSebastian Grimberg __syncthreads();
124f80f4a74SSebastian Grimberg
125f80f4a74SSebastian Grimberg // write V
1263c1e2affSSebastian Grimberg write_1d<CeedScalar, BASIS_P, BASIS_NUM_COMP>(sV, dV, cstrdV, tx);
127f80f4a74SSebastian Grimberg }
128db2becc9SJeremy L Thompson
129db2becc9SJeremy L Thompson ////////////////////////////////////////////////////////////////////////////////
__launch_bounds__(MAGMA_BASIS_BOUNDS (BASIS_MAX_P_Q,MAGMA_MAXTHREADS_1D))130db2becc9SJeremy L Thompson extern "C" __launch_bounds__(MAGMA_BASIS_BOUNDS(BASIS_MAX_P_Q, MAGMA_MAXTHREADS_1D)) __global__
131db2becc9SJeremy L Thompson void magma_gradta_1d_kernel(const CeedScalar *dTinterp, const CeedScalar *dTgrad, const CeedScalar *dU, const int estrdU, const int cstrdU,
132db2becc9SJeremy L Thompson const int dstrdU, CeedScalar *dV, const int estrdV, const int cstrdV, const int dstrdV, const int nelem) {
133db2becc9SJeremy L Thompson MAGMA_DEVICE_SHARED(CeedScalar, shared_data)
134db2becc9SJeremy L Thompson
135db2becc9SJeremy L Thompson const int tx = threadIdx.x;
136db2becc9SJeremy L Thompson const int ty = threadIdx.y;
137db2becc9SJeremy L Thompson const int elem_id = (blockIdx.x * blockDim.y) + ty;
138db2becc9SJeremy L Thompson
139db2becc9SJeremy L Thompson if (elem_id >= nelem) return;
140db2becc9SJeremy L Thompson
141db2becc9SJeremy L Thompson CeedScalar *sU[BASIS_NUM_COMP];
142db2becc9SJeremy L Thompson CeedScalar *sV[BASIS_NUM_COMP];
143db2becc9SJeremy L Thompson
144db2becc9SJeremy L Thompson // shift global memory pointers by elem stride
145db2becc9SJeremy L Thompson dU += elem_id * estrdU;
146db2becc9SJeremy L Thompson dV += elem_id * estrdV;
147db2becc9SJeremy L Thompson
148db2becc9SJeremy L Thompson // assign shared memory pointers
149db2becc9SJeremy L Thompson CeedScalar *sT = (CeedScalar *)shared_data;
150db2becc9SJeremy L Thompson CeedScalar *sW = sT + BASIS_Q * BASIS_P;
151db2becc9SJeremy L Thompson sU[0] = sW + ty * BASIS_NUM_COMP * (BASIS_Q + BASIS_P);
152db2becc9SJeremy L Thompson sV[0] = sU[0] + (BASIS_NUM_COMP * 1 * BASIS_Q);
153db2becc9SJeremy L Thompson for (int comp = 1; comp < BASIS_NUM_COMP; comp++) {
154db2becc9SJeremy L Thompson sU[comp] = sU[comp - 1] + (1 * BASIS_Q);
155db2becc9SJeremy L Thompson sV[comp] = sV[comp - 1] + (1 * BASIS_P);
156db2becc9SJeremy L Thompson }
157db2becc9SJeremy L Thompson
158db2becc9SJeremy L Thompson // read T
159db2becc9SJeremy L Thompson if (ty == 0) {
160db2becc9SJeremy L Thompson read_T_trans_gm2sm<BASIS_Q, BASIS_P>(tx, dTgrad, sT);
161db2becc9SJeremy L Thompson }
162db2becc9SJeremy L Thompson
163db2becc9SJeremy L Thompson // read U
164db2becc9SJeremy L Thompson read_1d<CeedScalar, BASIS_Q, BASIS_NUM_COMP>(dU, cstrdU, sU, tx);
165db2becc9SJeremy L Thompson
166db2becc9SJeremy L Thompson __syncthreads();
167db2becc9SJeremy L Thompson magma_grad_1d_device<CeedScalar, BASIS_DIM, BASIS_NUM_COMP, BASIS_Q, BASIS_P>(sT, sU, sV, tx);
168db2becc9SJeremy L Thompson __syncthreads();
169db2becc9SJeremy L Thompson
170db2becc9SJeremy L Thompson // sum into V
171db2becc9SJeremy L Thompson sum_1d<CeedScalar, BASIS_P, BASIS_NUM_COMP>(sV, dV, cstrdV, tx);
172db2becc9SJeremy L Thompson }
173