107b31e0eSJeremy L Thompson // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 207b31e0eSJeremy L Thompson // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 307b31e0eSJeremy L Thompson // 407b31e0eSJeremy L Thompson // SPDX-License-Identifier: BSD-2-Clause 507b31e0eSJeremy L Thompson // 607b31e0eSJeremy L Thompson // This file is part of CEED: http://github.com/ceed 707b31e0eSJeremy L Thompson 8b2165e7aSSebastian Grimberg /// @file 9b2165e7aSSebastian Grimberg /// Internal header for CUDA operator full assembly 1094b7b29bSJeremy L Thompson #ifndef CEED_CUDA_REF_OPERATOR_ASSEMBLE_H 1194b7b29bSJeremy L Thompson #define CEED_CUDA_REF_OPERATOR_ASSEMBLE_H 12b2165e7aSSebastian Grimberg 13c9c2c079SJeremy L Thompson #include <ceed.h> 1407b31e0eSJeremy L Thompson 15*ca735530SJeremy L Thompson #if USE_CEEDSIZE 16f7c1b517Snbeams typedef CeedSize IndexType; 17f7c1b517Snbeams #else 18f7c1b517Snbeams typedef CeedInt IndexType; 19f7c1b517Snbeams #endif 20f7c1b517Snbeams 2107b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 2207b31e0eSJeremy L Thompson // Matrix assembly kernel for low-order elements (2D thread block) 2307b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 242b730f8bSJeremy L Thompson extern "C" __launch_bounds__(BLOCK_SIZE) __global__ 252b730f8bSJeremy L Thompson void linearAssemble(const CeedScalar *B_in, const CeedScalar *B_out, const CeedScalar *__restrict__ qf_array, 2607b31e0eSJeremy L Thompson CeedScalar *__restrict__ values_array) { 27ea61e9acSJeremy L Thompson // This kernel assumes B_in and B_out have the same number of quadrature points and basis points. 2807b31e0eSJeremy L Thompson // TODO: expand to more general cases 2907b31e0eSJeremy L Thompson const int i = threadIdx.x; // The output row index of each B^TDB operation 3007b31e0eSJeremy L Thompson const int l = threadIdx.y; // The output column index of each B^TDB operation 3107b31e0eSJeremy L Thompson // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 3207b31e0eSJeremy L Thompson 33ea61e9acSJeremy L Thompson // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: element, 34ea61e9acSJeremy L Thompson // comp_in, comp_out, node_row, node_col 35*ca735530SJeremy L Thompson const IndexType comp_out_stride = NUM_NODES * NUM_NODES; 36*ca735530SJeremy L Thompson const IndexType comp_in_stride = comp_out_stride * NUM_COMP; 37*ca735530SJeremy L Thompson const IndexType e_stride = comp_in_stride * NUM_COMP; 38*ca735530SJeremy L Thompson // Strides for QF array, slowest --> fastest: e_mode_in, comp_in, e_mode_out, comp_out, elem, qpt 39*ca735530SJeremy L Thompson const IndexType q_e_stride = NUM_QPTS; 40*ca735530SJeremy L Thompson const IndexType q_comp_out_stride = NUM_ELEM * q_e_stride; 41*ca735530SJeremy L Thompson const IndexType q_e_mode_out_stride = q_comp_out_stride * NUM_COMP; 42*ca735530SJeremy L Thompson const IndexType q_comp_in_stride = q_e_mode_out_stride * NUM_E_MODE_OUT; 43*ca735530SJeremy L Thompson const IndexType q_e_mode_in_stride = q_comp_in_stride * NUM_COMP; 4407b31e0eSJeremy L Thompson 4507b31e0eSJeremy L Thompson // Loop over each element (if necessary) 46*ca735530SJeremy L Thompson for (IndexType e = blockIdx.x * blockDim.z + threadIdx.z; e < NUM_ELEM; e += gridDim.x * blockDim.z) { 47*ca735530SJeremy L Thompson for (IndexType comp_in = 0; comp_in < NUM_COMP; comp_in++) { 48*ca735530SJeremy L Thompson for (IndexType comp_out = 0; comp_out < NUM_COMP; comp_out++) { 4907b31e0eSJeremy L Thompson CeedScalar result = 0.0; 50*ca735530SJeremy L Thompson IndexType qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e; 51*ca735530SJeremy L Thompson 52*ca735530SJeremy L Thompson for (IndexType e_mode_in = 0; e_mode_in < NUM_E_MODE_IN; e_mode_in++) { 53*ca735530SJeremy L Thompson IndexType b_in_index = e_mode_in * NUM_QPTS * NUM_NODES; 54*ca735530SJeremy L Thompson 55*ca735530SJeremy L Thompson for (IndexType e_mode_out = 0; e_mode_out < NUM_E_MODE_OUT; e_mode_out++) { 56*ca735530SJeremy L Thompson IndexType b_out_index = e_mode_out * NUM_QPTS * NUM_NODES; 57*ca735530SJeremy L Thompson IndexType qf_index = qf_index_comp + q_e_mode_out_stride * e_mode_out + q_e_mode_in_stride * e_mode_in; 58*ca735530SJeremy L Thompson 5907b31e0eSJeremy L Thompson // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 60*ca735530SJeremy L Thompson for (IndexType j = 0; j < NUM_QPTS; j++) { 61*ca735530SJeremy L Thompson result += B_out[b_out_index + j * NUM_NODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NUM_NODES + l]; 6207b31e0eSJeremy L Thompson } 63*ca735530SJeremy L Thompson } // end of e_mode_out 64*ca735530SJeremy L Thompson } // end of e_mode_in 65*ca735530SJeremy L Thompson IndexType val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NUM_NODES * i + l; 66*ca735530SJeremy L Thompson 6707b31e0eSJeremy L Thompson values_array[val_index] = result; 6807b31e0eSJeremy L Thompson } // end of out component 6907b31e0eSJeremy L Thompson } // end of in component 7007b31e0eSJeremy L Thompson } // end of element loop 7107b31e0eSJeremy L Thompson } 7207b31e0eSJeremy L Thompson 7307b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 7407b31e0eSJeremy L Thompson // Fallback kernel for larger orders (1D thread block) 7507b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 762b730f8bSJeremy L Thompson extern "C" __launch_bounds__(BLOCK_SIZE) __global__ 772b730f8bSJeremy L Thompson void linearAssembleFallback(const CeedScalar *B_in, const CeedScalar *B_out, const CeedScalar *__restrict__ qf_array, 7807b31e0eSJeremy L Thompson CeedScalar *__restrict__ values_array) { 79ea61e9acSJeremy L Thompson // This kernel assumes B_in and B_out have the same number of quadrature points and basis points. 8007b31e0eSJeremy L Thompson // TODO: expand to more general cases 8107b31e0eSJeremy L Thompson const int l = threadIdx.x; // The output column index of each B^TDB operation 8207b31e0eSJeremy L Thompson // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 8307b31e0eSJeremy L Thompson 84ea61e9acSJeremy L Thompson // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: element, 85ea61e9acSJeremy L Thompson // comp_in, comp_out, node_row, node_col 86*ca735530SJeremy L Thompson const IndexType comp_out_stride = NUM_NODES * NUM_NODES; 87*ca735530SJeremy L Thompson const IndexType comp_in_stride = comp_out_stride * NUM_COMP; 88*ca735530SJeremy L Thompson const IndexType e_stride = comp_in_stride * NUM_COMP; 89*ca735530SJeremy L Thompson // Strides for QF array, slowest --> fastest: e_mode_in, comp_in, e_mode_out, comp_out, elem, qpt 90*ca735530SJeremy L Thompson const IndexType q_e_stride = NUM_QPTS; 91*ca735530SJeremy L Thompson const IndexType q_comp_out_stride = NUM_ELEM * q_e_stride; 92*ca735530SJeremy L Thompson const IndexType q_e_mode_out_stride = q_comp_out_stride * NUM_COMP; 93*ca735530SJeremy L Thompson const IndexType q_comp_in_stride = q_e_mode_out_stride * NUM_E_MODE_OUT; 94*ca735530SJeremy L Thompson const IndexType q_e_mode_in_stride = q_comp_in_stride * NUM_COMP; 9507b31e0eSJeremy L Thompson 9607b31e0eSJeremy L Thompson // Loop over each element (if necessary) 97*ca735530SJeremy L Thompson for (IndexType e = blockIdx.x * blockDim.z + threadIdx.z; e < NUM_ELEM; e += gridDim.x * blockDim.z) { 98*ca735530SJeremy L Thompson for (IndexType comp_in = 0; comp_in < NUM_COMP; comp_in++) { 99*ca735530SJeremy L Thompson for (IndexType comp_out = 0; comp_out < NUM_COMP; comp_out++) { 100*ca735530SJeremy L Thompson for (IndexType i = 0; i < NUM_NODES; i++) { 10107b31e0eSJeremy L Thompson CeedScalar result = 0.0; 102*ca735530SJeremy L Thompson IndexType qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e; 103*ca735530SJeremy L Thompson 104*ca735530SJeremy L Thompson for (IndexType e_mode_in = 0; e_mode_in < NUM_E_MODE_IN; e_mode_in++) { 105*ca735530SJeremy L Thompson IndexType b_in_index = e_mode_in * NUM_QPTS * NUM_NODES; 106*ca735530SJeremy L Thompson 107*ca735530SJeremy L Thompson for (IndexType e_mode_out = 0; e_mode_out < NUM_E_MODE_OUT; e_mode_out++) { 108*ca735530SJeremy L Thompson IndexType b_out_index = e_mode_out * NUM_QPTS * NUM_NODES; 109*ca735530SJeremy L Thompson IndexType qf_index = qf_index_comp + q_e_mode_out_stride * e_mode_out + q_e_mode_in_stride * e_mode_in; 110*ca735530SJeremy L Thompson 11107b31e0eSJeremy L Thompson // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 112*ca735530SJeremy L Thompson for (IndexType j = 0; j < NUM_QPTS; j++) { 113*ca735530SJeremy L Thompson result += B_out[b_out_index + j * NUM_NODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NUM_NODES + l]; 11407b31e0eSJeremy L Thompson } 115*ca735530SJeremy L Thompson } // end of e_mode_out 116*ca735530SJeremy L Thompson } // end of e_mode_in 117*ca735530SJeremy L Thompson IndexType val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NUM_NODES * i + l; 118*ca735530SJeremy L Thompson 11907b31e0eSJeremy L Thompson values_array[val_index] = result; 12007b31e0eSJeremy L Thompson } // end of loop over element node index, i 12107b31e0eSJeremy L Thompson } // end of out component 12207b31e0eSJeremy L Thompson } // end of in component 12307b31e0eSJeremy L Thompson } // end of element loop 12407b31e0eSJeremy L Thompson } 12507b31e0eSJeremy L Thompson 12607b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 127b2165e7aSSebastian Grimberg 12894b7b29bSJeremy L Thompson #endif // CEED_CUDA_REF_OPERATOR_ASSEMBLE_H 129