1*07b31e0eSJeremy L Thompson // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2*07b31e0eSJeremy L Thompson // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3*07b31e0eSJeremy L Thompson // 4*07b31e0eSJeremy L Thompson // SPDX-License-Identifier: BSD-2-Clause 5*07b31e0eSJeremy L Thompson // 6*07b31e0eSJeremy L Thompson // This file is part of CEED: http://github.com/ceed 7*07b31e0eSJeremy L Thompson 8*07b31e0eSJeremy L Thompson #include <ceed/ceed.h> 9*07b31e0eSJeremy L Thompson 10*07b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 11*07b31e0eSJeremy L Thompson // Matrix assembly kernel for low-order elements (2D thread block) 12*07b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 13*07b31e0eSJeremy L Thompson extern "C" __launch_bounds__(BLOCK_SIZE) 14*07b31e0eSJeremy L Thompson __global__ void linearAssemble(const CeedScalar *B_in, const CeedScalar *B_out, 15*07b31e0eSJeremy L Thompson const CeedScalar *__restrict__ qf_array, 16*07b31e0eSJeremy L Thompson CeedScalar *__restrict__ values_array) { 17*07b31e0eSJeremy L Thompson 18*07b31e0eSJeremy L Thompson // This kernel assumes B_in and B_out have the same number of quadrature points and 19*07b31e0eSJeremy L Thompson // basis points. 20*07b31e0eSJeremy L Thompson // TODO: expand to more general cases 21*07b31e0eSJeremy L Thompson const int i = threadIdx.x; // The output row index of each B^TDB operation 22*07b31e0eSJeremy L Thompson const int l = threadIdx.y; // The output column index of each B^TDB operation 23*07b31e0eSJeremy L Thompson // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 24*07b31e0eSJeremy L Thompson 25*07b31e0eSJeremy L Thompson // Strides for final output ordering, determined by the reference (interface) implementation of 26*07b31e0eSJeremy L Thompson // the symbolic assembly, slowest --> fastest: element, comp_in, comp_out, node_row, node_col 27*07b31e0eSJeremy L Thompson const CeedInt comp_out_stride = NNODES * NNODES; 28*07b31e0eSJeremy L Thompson const CeedInt comp_in_stride = comp_out_stride * NCOMP; 29*07b31e0eSJeremy L Thompson const CeedInt e_stride = comp_in_stride * NCOMP; 30*07b31e0eSJeremy L Thompson // Strides for QF array, slowest --> fastest: emode_in, comp_in, emode_out, comp_out, elem, qpt 31*07b31e0eSJeremy L Thompson const CeedInt qe_stride = NQPTS; 32*07b31e0eSJeremy L Thompson const CeedInt qcomp_out_stride = NELEM * qe_stride; 33*07b31e0eSJeremy L Thompson const CeedInt qemode_out_stride = qcomp_out_stride * NCOMP; 34*07b31e0eSJeremy L Thompson const CeedInt qcomp_in_stride = qemode_out_stride * NUMEMODEOUT; 35*07b31e0eSJeremy L Thompson const CeedInt qemode_in_stride = qcomp_in_stride * NCOMP; 36*07b31e0eSJeremy L Thompson 37*07b31e0eSJeremy L Thompson // Loop over each element (if necessary) 38*07b31e0eSJeremy L Thompson for (CeedInt e = blockIdx.x*blockDim.z + threadIdx.z; e < NELEM; 39*07b31e0eSJeremy L Thompson e += gridDim.x*blockDim.z) { 40*07b31e0eSJeremy L Thompson for (CeedInt comp_in = 0; comp_in < NCOMP; comp_in++) { 41*07b31e0eSJeremy L Thompson for (CeedInt comp_out = 0; comp_out < NCOMP; comp_out++) { 42*07b31e0eSJeremy L Thompson CeedScalar result = 0.0; 43*07b31e0eSJeremy L Thompson CeedInt qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e; 44*07b31e0eSJeremy L Thompson for (CeedInt emode_in = 0; emode_in < NUMEMODEIN; emode_in++) { 45*07b31e0eSJeremy L Thompson CeedInt b_in_index = emode_in * NQPTS * NNODES; 46*07b31e0eSJeremy L Thompson for (CeedInt emode_out = 0; emode_out < NUMEMODEOUT; emode_out++) { 47*07b31e0eSJeremy L Thompson CeedInt b_out_index = emode_out * NQPTS * NNODES; 48*07b31e0eSJeremy L Thompson CeedInt qf_index = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in; 49*07b31e0eSJeremy L Thompson // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 50*07b31e0eSJeremy L Thompson for (CeedInt j = 0; j < NQPTS; j++) { 51*07b31e0eSJeremy L Thompson result += B_out[b_out_index + j * NNODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NNODES + l]; 52*07b31e0eSJeremy L Thompson } 53*07b31e0eSJeremy L Thompson 54*07b31e0eSJeremy L Thompson }// end of emode_out 55*07b31e0eSJeremy L Thompson } // end of emode_in 56*07b31e0eSJeremy L Thompson CeedInt val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NNODES * i + l; 57*07b31e0eSJeremy L Thompson values_array[val_index] = result; 58*07b31e0eSJeremy L Thompson } // end of out component 59*07b31e0eSJeremy L Thompson } // end of in component 60*07b31e0eSJeremy L Thompson } // end of element loop 61*07b31e0eSJeremy L Thompson } 62*07b31e0eSJeremy L Thompson 63*07b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 64*07b31e0eSJeremy L Thompson // Fallback kernel for larger orders (1D thread block) 65*07b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 66*07b31e0eSJeremy L Thompson extern "C" __launch_bounds__(BLOCK_SIZE) 67*07b31e0eSJeremy L Thompson __global__ void linearAssembleFallback(const CeedScalar *B_in, const CeedScalar *B_out, 68*07b31e0eSJeremy L Thompson const CeedScalar *__restrict__ qf_array, 69*07b31e0eSJeremy L Thompson CeedScalar *__restrict__ values_array) { 70*07b31e0eSJeremy L Thompson 71*07b31e0eSJeremy L Thompson // This kernel assumes B_in and B_out have the same number of quadrature points and 72*07b31e0eSJeremy L Thompson // basis points. 73*07b31e0eSJeremy L Thompson // TODO: expand to more general cases 74*07b31e0eSJeremy L Thompson const int l = threadIdx.x; // The output column index of each B^TDB operation 75*07b31e0eSJeremy L Thompson // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 76*07b31e0eSJeremy L Thompson 77*07b31e0eSJeremy L Thompson // Strides for final output ordering, determined by the reference (interface) implementation of 78*07b31e0eSJeremy L Thompson // the symbolic assembly, slowest --> fastest: element, comp_in, comp_out, node_row, node_col 79*07b31e0eSJeremy L Thompson const CeedInt comp_out_stride = NNODES * NNODES; 80*07b31e0eSJeremy L Thompson const CeedInt comp_in_stride = comp_out_stride * NCOMP; 81*07b31e0eSJeremy L Thompson const CeedInt e_stride = comp_in_stride * NCOMP; 82*07b31e0eSJeremy L Thompson // Strides for QF array, slowest --> fastest: emode_in, comp_in, emode_out, comp_out, elem, qpt 83*07b31e0eSJeremy L Thompson const CeedInt qe_stride = NQPTS; 84*07b31e0eSJeremy L Thompson const CeedInt qcomp_out_stride = NELEM * qe_stride; 85*07b31e0eSJeremy L Thompson const CeedInt qemode_out_stride = qcomp_out_stride * NCOMP; 86*07b31e0eSJeremy L Thompson const CeedInt qcomp_in_stride = qemode_out_stride * NUMEMODEOUT; 87*07b31e0eSJeremy L Thompson const CeedInt qemode_in_stride = qcomp_in_stride * NCOMP; 88*07b31e0eSJeremy L Thompson 89*07b31e0eSJeremy L Thompson // Loop over each element (if necessary) 90*07b31e0eSJeremy L Thompson for (CeedInt e = blockIdx.x*blockDim.z + threadIdx.z; e < NELEM; 91*07b31e0eSJeremy L Thompson e += gridDim.x*blockDim.z) { 92*07b31e0eSJeremy L Thompson for (CeedInt comp_in = 0; comp_in < NCOMP; comp_in++) { 93*07b31e0eSJeremy L Thompson for (CeedInt comp_out = 0; comp_out < NCOMP; comp_out++) { 94*07b31e0eSJeremy L Thompson for (CeedInt i = 0; i < NNODES; i++) { 95*07b31e0eSJeremy L Thompson CeedScalar result = 0.0; 96*07b31e0eSJeremy L Thompson CeedInt qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e; 97*07b31e0eSJeremy L Thompson for (CeedInt emode_in = 0; emode_in < NUMEMODEIN; emode_in++) { 98*07b31e0eSJeremy L Thompson CeedInt b_in_index = emode_in * NQPTS * NNODES; 99*07b31e0eSJeremy L Thompson for (CeedInt emode_out = 0; emode_out < NUMEMODEOUT; emode_out++) { 100*07b31e0eSJeremy L Thompson CeedInt b_out_index = emode_out * NQPTS * NNODES; 101*07b31e0eSJeremy L Thompson CeedInt qf_index = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in; 102*07b31e0eSJeremy L Thompson // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 103*07b31e0eSJeremy L Thompson for (CeedInt j = 0; j < NQPTS; j++) { 104*07b31e0eSJeremy L Thompson result += B_out[b_out_index + j * NNODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NNODES + l]; 105*07b31e0eSJeremy L Thompson } 106*07b31e0eSJeremy L Thompson 107*07b31e0eSJeremy L Thompson }// end of emode_out 108*07b31e0eSJeremy L Thompson } // end of emode_in 109*07b31e0eSJeremy L Thompson CeedInt val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NNODES * i + l; 110*07b31e0eSJeremy L Thompson values_array[val_index] = result; 111*07b31e0eSJeremy L Thompson } // end of loop over element node index, i 112*07b31e0eSJeremy L Thompson } // end of out component 113*07b31e0eSJeremy L Thompson } // end of in component 114*07b31e0eSJeremy L Thompson } // end of element loop 115*07b31e0eSJeremy L Thompson } 116*07b31e0eSJeremy L Thompson 117*07b31e0eSJeremy L Thompson //------------------------------------------------------------------------------ 118