xref: /libCEED/include/ceed/jit-source/hip/hip-ref-operator-assemble.h (revision 5cd6c1fb67d52eb6a42b887bb79c183682dd86ca)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for HIP operator full assembly
10 #ifndef CEED_HIP_REF_OPERATOR_ASSEMBLE_H
11 #define CEED_HIP_REF_OPERATOR_ASSEMBLE_H
12 
13 #include <ceed.h>
14 
15 #if CEEDSIZE
16 typedef CeedSize IndexType;
17 #else
18 typedef CeedInt IndexType;
19 #endif
20 
21 //------------------------------------------------------------------------------
22 // Matrix assembly kernel for low-order elements (2D thread block)
23 //------------------------------------------------------------------------------
24 extern "C" __launch_bounds__(BLOCK_SIZE) __global__
25     void linearAssemble(const CeedScalar *B_in, const CeedScalar *B_out, const CeedScalar *__restrict__ qf_array,
26                         CeedScalar *__restrict__ values_array) {
27   // This kernel assumes B_in and B_out have the same number of quadrature points and basis points.
28   // TODO: expand to more general cases
29   const int i = threadIdx.x;  // The output row index of each B^TDB operation
30   const int l = threadIdx.y;  // The output column index of each B^TDB operation
31                               // such that we have (Bout^T)_ij D_jk Bin_kl = C_il
32 
33   // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: element,
34   // comp_in, comp_out, node_row, node_col
35   const IndexType comp_out_stride = NNODES * NNODES;
36   const IndexType comp_in_stride  = comp_out_stride * NCOMP;
37   const IndexType e_stride        = comp_in_stride * NCOMP;
38   // Strides for QF array, slowest --> fastest:  emode_in, comp_in, emode_out, comp_out, elem, qpt
39   const IndexType qe_stride         = NQPTS;
40   const IndexType qcomp_out_stride  = NELEM * qe_stride;
41   const IndexType qemode_out_stride = qcomp_out_stride * NCOMP;
42   const IndexType qcomp_in_stride   = qemode_out_stride * NUMEMODEOUT;
43   const IndexType qemode_in_stride  = qcomp_in_stride * NCOMP;
44 
45   // Loop over each element (if necessary)
46   for (IndexType e = blockIdx.x * blockDim.z + threadIdx.z; e < NELEM; e += gridDim.x * blockDim.z) {
47     for (IndexType comp_in = 0; comp_in < NCOMP; comp_in++) {
48       for (IndexType comp_out = 0; comp_out < NCOMP; comp_out++) {
49         CeedScalar result        = 0.0;
50         IndexType  qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e;
51         for (IndexType emode_in = 0; emode_in < NUMEMODEIN; emode_in++) {
52           IndexType b_in_index = emode_in * NQPTS * NNODES;
53           for (IndexType emode_out = 0; emode_out < NUMEMODEOUT; emode_out++) {
54             IndexType b_out_index = emode_out * NQPTS * NNODES;
55             IndexType qf_index    = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in;
56             // Perform the B^T D B operation for this 'chunk' of D (the qf_array)
57             for (IndexType j = 0; j < NQPTS; j++) {
58               result += B_out[b_out_index + j * NNODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NNODES + l];
59             }
60           }  // end of emode_out
61         }    // end of emode_in
62         IndexType val_index     = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NNODES * i + l;
63         values_array[val_index] = result;
64       }  // end of out component
65     }    // end of in component
66   }      // end of element loop
67 }
68 
69 //------------------------------------------------------------------------------
70 // Fallback kernel for larger orders (1D thread block)
71 //------------------------------------------------------------------------------
72 extern "C" __launch_bounds__(BLOCK_SIZE) __global__
73     void linearAssembleFallback(const CeedScalar *B_in, const CeedScalar *B_out, const CeedScalar *__restrict__ qf_array,
74                                 CeedScalar *__restrict__ values_array) {
75   // This kernel assumes B_in and B_out have the same number of quadrature points and basis points.
76   // TODO: expand to more general cases
77   const int l = threadIdx.x;  // The output column index of each B^TDB operation
78                               // such that we have (Bout^T)_ij D_jk Bin_kl = C_il
79 
80   // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: element,
81   // comp_in, comp_out, node_row, node_col
82   const IndexType comp_out_stride = NNODES * NNODES;
83   const IndexType comp_in_stride  = comp_out_stride * NCOMP;
84   const IndexType e_stride        = comp_in_stride * NCOMP;
85   // Strides for QF array, slowest --> fastest:  emode_in, comp_in, emode_out, comp_out, elem, qpt
86   const IndexType qe_stride         = NQPTS;
87   const IndexType qcomp_out_stride  = NELEM * qe_stride;
88   const IndexType qemode_out_stride = qcomp_out_stride * NCOMP;
89   const IndexType qcomp_in_stride   = qemode_out_stride * NUMEMODEOUT;
90   const IndexType qemode_in_stride  = qcomp_in_stride * NCOMP;
91 
92   // Loop over each element (if necessary)
93   for (IndexType e = blockIdx.x * blockDim.z + threadIdx.z; e < NELEM; e += gridDim.x * blockDim.z) {
94     for (IndexType comp_in = 0; comp_in < NCOMP; comp_in++) {
95       for (IndexType comp_out = 0; comp_out < NCOMP; comp_out++) {
96         for (IndexType i = 0; i < NNODES; i++) {
97           CeedScalar result        = 0.0;
98           IndexType  qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e;
99           for (IndexType emode_in = 0; emode_in < NUMEMODEIN; emode_in++) {
100             IndexType b_in_index = emode_in * NQPTS * NNODES;
101             for (IndexType emode_out = 0; emode_out < NUMEMODEOUT; emode_out++) {
102               IndexType b_out_index = emode_out * NQPTS * NNODES;
103               IndexType qf_index    = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in;
104               // Perform the B^T D B operation for this 'chunk' of D (the qf_array)
105               for (IndexType j = 0; j < NQPTS; j++) {
106                 result += B_out[b_out_index + j * NNODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NNODES + l];
107               }
108             }  // end of emode_out
109           }    // end of emode_in
110           IndexType val_index     = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NNODES * i + l;
111           values_array[val_index] = result;
112         }  // end of loop over element node index, i
113       }    // end of out component
114     }      // end of in component
115   }        // end of element loop
116 }
117 
118 //------------------------------------------------------------------------------
119 
120 #endif  // CEED_HIP_REF_OPERATOR_ASSEMBLE_H
121