xref: /libCEED/include/ceed/jit-source/cuda/cuda-ref-operator-assemble.h (revision ca7355308a14805110a7d98dabf1f658d5ec16d5)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for CUDA operator full assembly
10 #ifndef CEED_CUDA_REF_OPERATOR_ASSEMBLE_H
11 #define CEED_CUDA_REF_OPERATOR_ASSEMBLE_H
12 
13 #include <ceed.h>
14 
15 #if USE_CEEDSIZE
16 typedef CeedSize IndexType;
17 #else
18 typedef CeedInt IndexType;
19 #endif
20 
21 //------------------------------------------------------------------------------
22 // Matrix assembly kernel for low-order elements (2D thread block)
23 //------------------------------------------------------------------------------
24 extern "C" __launch_bounds__(BLOCK_SIZE) __global__
25     void linearAssemble(const CeedScalar *B_in, const CeedScalar *B_out, const CeedScalar *__restrict__ qf_array,
26                         CeedScalar *__restrict__ values_array) {
27   // This kernel assumes B_in and B_out have the same number of quadrature points and basis points.
28   // TODO: expand to more general cases
29   const int i = threadIdx.x;  // The output row index of each B^TDB operation
30   const int l = threadIdx.y;  // The output column index of each B^TDB operation
31                               // such that we have (Bout^T)_ij D_jk Bin_kl = C_il
32 
33   // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: element,
34   // comp_in, comp_out, node_row, node_col
35   const IndexType comp_out_stride = NUM_NODES * NUM_NODES;
36   const IndexType comp_in_stride  = comp_out_stride * NUM_COMP;
37   const IndexType e_stride        = comp_in_stride * NUM_COMP;
38   // Strides for QF array, slowest --> fastest:  e_mode_in, comp_in, e_mode_out, comp_out, elem, qpt
39   const IndexType q_e_stride          = NUM_QPTS;
40   const IndexType q_comp_out_stride   = NUM_ELEM * q_e_stride;
41   const IndexType q_e_mode_out_stride = q_comp_out_stride * NUM_COMP;
42   const IndexType q_comp_in_stride    = q_e_mode_out_stride * NUM_E_MODE_OUT;
43   const IndexType q_e_mode_in_stride  = q_comp_in_stride * NUM_COMP;
44 
45   // Loop over each element (if necessary)
46   for (IndexType e = blockIdx.x * blockDim.z + threadIdx.z; e < NUM_ELEM; e += gridDim.x * blockDim.z) {
47     for (IndexType comp_in = 0; comp_in < NUM_COMP; comp_in++) {
48       for (IndexType comp_out = 0; comp_out < NUM_COMP; comp_out++) {
49         CeedScalar result        = 0.0;
50         IndexType  qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e;
51 
52         for (IndexType e_mode_in = 0; e_mode_in < NUM_E_MODE_IN; e_mode_in++) {
53           IndexType b_in_index = e_mode_in * NUM_QPTS * NUM_NODES;
54 
55           for (IndexType e_mode_out = 0; e_mode_out < NUM_E_MODE_OUT; e_mode_out++) {
56             IndexType b_out_index = e_mode_out * NUM_QPTS * NUM_NODES;
57             IndexType qf_index    = qf_index_comp + q_e_mode_out_stride * e_mode_out + q_e_mode_in_stride * e_mode_in;
58 
59             // Perform the B^T D B operation for this 'chunk' of D (the qf_array)
60             for (IndexType j = 0; j < NUM_QPTS; j++) {
61               result += B_out[b_out_index + j * NUM_NODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NUM_NODES + l];
62             }
63           }  // end of e_mode_out
64         }    // end of e_mode_in
65         IndexType val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NUM_NODES * i + l;
66 
67         values_array[val_index] = result;
68       }  // end of out component
69     }    // end of in component
70   }      // end of element loop
71 }
72 
73 //------------------------------------------------------------------------------
74 // Fallback kernel for larger orders (1D thread block)
75 //------------------------------------------------------------------------------
76 extern "C" __launch_bounds__(BLOCK_SIZE) __global__
77     void linearAssembleFallback(const CeedScalar *B_in, const CeedScalar *B_out, const CeedScalar *__restrict__ qf_array,
78                                 CeedScalar *__restrict__ values_array) {
79   // This kernel assumes B_in and B_out have the same number of quadrature points and basis points.
80   // TODO: expand to more general cases
81   const int l = threadIdx.x;  // The output column index of each B^TDB operation
82                               // such that we have (Bout^T)_ij D_jk Bin_kl = C_il
83 
84   // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: element,
85   // comp_in, comp_out, node_row, node_col
86   const IndexType comp_out_stride = NUM_NODES * NUM_NODES;
87   const IndexType comp_in_stride  = comp_out_stride * NUM_COMP;
88   const IndexType e_stride        = comp_in_stride * NUM_COMP;
89   // Strides for QF array, slowest --> fastest:  e_mode_in, comp_in, e_mode_out, comp_out, elem, qpt
90   const IndexType q_e_stride          = NUM_QPTS;
91   const IndexType q_comp_out_stride   = NUM_ELEM * q_e_stride;
92   const IndexType q_e_mode_out_stride = q_comp_out_stride * NUM_COMP;
93   const IndexType q_comp_in_stride    = q_e_mode_out_stride * NUM_E_MODE_OUT;
94   const IndexType q_e_mode_in_stride  = q_comp_in_stride * NUM_COMP;
95 
96   // Loop over each element (if necessary)
97   for (IndexType e = blockIdx.x * blockDim.z + threadIdx.z; e < NUM_ELEM; e += gridDim.x * blockDim.z) {
98     for (IndexType comp_in = 0; comp_in < NUM_COMP; comp_in++) {
99       for (IndexType comp_out = 0; comp_out < NUM_COMP; comp_out++) {
100         for (IndexType i = 0; i < NUM_NODES; i++) {
101           CeedScalar result        = 0.0;
102           IndexType  qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e;
103 
104           for (IndexType e_mode_in = 0; e_mode_in < NUM_E_MODE_IN; e_mode_in++) {
105             IndexType b_in_index = e_mode_in * NUM_QPTS * NUM_NODES;
106 
107             for (IndexType e_mode_out = 0; e_mode_out < NUM_E_MODE_OUT; e_mode_out++) {
108               IndexType b_out_index = e_mode_out * NUM_QPTS * NUM_NODES;
109               IndexType qf_index    = qf_index_comp + q_e_mode_out_stride * e_mode_out + q_e_mode_in_stride * e_mode_in;
110 
111               // Perform the B^T D B operation for this 'chunk' of D (the qf_array)
112               for (IndexType j = 0; j < NUM_QPTS; j++) {
113                 result += B_out[b_out_index + j * NUM_NODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NUM_NODES + l];
114               }
115             }  // end of e_mode_out
116           }    // end of e_mode_in
117           IndexType val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NUM_NODES * i + l;
118 
119           values_array[val_index] = result;
120         }  // end of loop over element node index, i
121       }    // end of out component
122     }      // end of in component
123   }        // end of element loop
124 }
125 
126 //------------------------------------------------------------------------------
127 
128 #endif  // CEED_CUDA_REF_OPERATOR_ASSEMBLE_H
129