1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #include <ceed/ceed.h> 9 10 //------------------------------------------------------------------------------ 11 // Matrix assembly kernel for low-order elements (2D thread block) 12 //------------------------------------------------------------------------------ 13 extern "C" __launch_bounds__(BLOCK_SIZE) 14 __global__ void linearAssemble(const CeedScalar *B_in, const CeedScalar *B_out, 15 const CeedScalar *__restrict__ qf_array, 16 CeedScalar *__restrict__ values_array) { 17 18 // This kernel assumes B_in and B_out have the same number of quadrature points and 19 // basis points. 20 // TODO: expand to more general cases 21 const int i = threadIdx.x; // The output row index of each B^TDB operation 22 const int l = threadIdx.y; // The output column index of each B^TDB operation 23 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 24 25 // Strides for final output ordering, determined by the reference (interface) implementation of 26 // the symbolic assembly, slowest --> fastest: element, comp_in, comp_out, node_row, node_col 27 const CeedInt comp_out_stride = NNODES * NNODES; 28 const CeedInt comp_in_stride = comp_out_stride * NCOMP; 29 const CeedInt e_stride = comp_in_stride * NCOMP; 30 // Strides for QF array, slowest --> fastest: emode_in, comp_in, emode_out, comp_out, elem, qpt 31 const CeedInt qe_stride = NQPTS; 32 const CeedInt qcomp_out_stride = NELEM * qe_stride; 33 const CeedInt qemode_out_stride = qcomp_out_stride * NCOMP; 34 const CeedInt qcomp_in_stride = qemode_out_stride * NUMEMODEOUT; 35 const CeedInt qemode_in_stride = qcomp_in_stride * NCOMP; 36 37 // Loop over each element (if necessary) 38 for (CeedInt e = blockIdx.x*blockDim.z + threadIdx.z; e < NELEM; 39 e += gridDim.x*blockDim.z) { 40 for (CeedInt comp_in = 0; comp_in < NCOMP; comp_in++) { 41 for (CeedInt comp_out = 0; comp_out < NCOMP; comp_out++) { 42 CeedScalar result = 0.0; 43 CeedInt qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e; 44 for (CeedInt emode_in = 0; emode_in < NUMEMODEIN; emode_in++) { 45 CeedInt b_in_index = emode_in * NQPTS * NNODES; 46 for (CeedInt emode_out = 0; emode_out < NUMEMODEOUT; emode_out++) { 47 CeedInt b_out_index = emode_out * NQPTS * NNODES; 48 CeedInt qf_index = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in; 49 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 50 for (CeedInt j = 0; j < NQPTS; j++) { 51 result += B_out[b_out_index + j * NNODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NNODES + l]; 52 } 53 54 }// end of emode_out 55 } // end of emode_in 56 CeedInt val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NNODES * i + l; 57 values_array[val_index] = result; 58 } // end of out component 59 } // end of in component 60 } // end of element loop 61 } 62 63 //------------------------------------------------------------------------------ 64 // Fallback kernel for larger orders (1D thread block) 65 //------------------------------------------------------------------------------ 66 extern "C" __launch_bounds__(BLOCK_SIZE) 67 __global__ void linearAssembleFallback(const CeedScalar *B_in, const CeedScalar *B_out, 68 const CeedScalar *__restrict__ qf_array, 69 CeedScalar *__restrict__ values_array) { 70 71 // This kernel assumes B_in and B_out have the same number of quadrature points and 72 // basis points. 73 // TODO: expand to more general cases 74 const int l = threadIdx.x; // The output column index of each B^TDB operation 75 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 76 77 // Strides for final output ordering, determined by the reference (interface) implementation of 78 // the symbolic assembly, slowest --> fastest: element, comp_in, comp_out, node_row, node_col 79 const CeedInt comp_out_stride = NNODES * NNODES; 80 const CeedInt comp_in_stride = comp_out_stride * NCOMP; 81 const CeedInt e_stride = comp_in_stride * NCOMP; 82 // Strides for QF array, slowest --> fastest: emode_in, comp_in, emode_out, comp_out, elem, qpt 83 const CeedInt qe_stride = NQPTS; 84 const CeedInt qcomp_out_stride = NELEM * qe_stride; 85 const CeedInt qemode_out_stride = qcomp_out_stride * NCOMP; 86 const CeedInt qcomp_in_stride = qemode_out_stride * NUMEMODEOUT; 87 const CeedInt qemode_in_stride = qcomp_in_stride * NCOMP; 88 89 // Loop over each element (if necessary) 90 for (CeedInt e = blockIdx.x*blockDim.z + threadIdx.z; e < NELEM; 91 e += gridDim.x*blockDim.z) { 92 for (CeedInt comp_in = 0; comp_in < NCOMP; comp_in++) { 93 for (CeedInt comp_out = 0; comp_out < NCOMP; comp_out++) { 94 for (CeedInt i = 0; i < NNODES; i++) { 95 CeedScalar result = 0.0; 96 CeedInt qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e; 97 for (CeedInt emode_in = 0; emode_in < NUMEMODEIN; emode_in++) { 98 CeedInt b_in_index = emode_in * NQPTS * NNODES; 99 for (CeedInt emode_out = 0; emode_out < NUMEMODEOUT; emode_out++) { 100 CeedInt b_out_index = emode_out * NQPTS * NNODES; 101 CeedInt qf_index = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in; 102 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 103 for (CeedInt j = 0; j < NQPTS; j++) { 104 result += B_out[b_out_index + j * NNODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NNODES + l]; 105 } 106 107 }// end of emode_out 108 } // end of emode_in 109 CeedInt val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NNODES * i + l; 110 values_array[val_index] = result; 111 } // end of loop over element node index, i 112 } // end of out component 113 } // end of in component 114 } // end of element loop 115 } 116 117 //------------------------------------------------------------------------------ 118