xref: /libCEED/include/ceed/jit-source/cuda/cuda-ref-operator-assemble.h (revision 004e49868906b3e3ec4a252ac682c88f9414881a)
107b31e0eSJeremy L Thompson // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
207b31e0eSJeremy L Thompson // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
307b31e0eSJeremy L Thompson //
407b31e0eSJeremy L Thompson // SPDX-License-Identifier: BSD-2-Clause
507b31e0eSJeremy L Thompson //
607b31e0eSJeremy L Thompson // This file is part of CEED:  http://github.com/ceed
707b31e0eSJeremy L Thompson 
8b2165e7aSSebastian Grimberg /// @file
9b2165e7aSSebastian Grimberg /// Internal header for CUDA operator full assembly
1094b7b29bSJeremy L Thompson #ifndef CEED_CUDA_REF_OPERATOR_ASSEMBLE_H
1194b7b29bSJeremy L Thompson #define CEED_CUDA_REF_OPERATOR_ASSEMBLE_H
12b2165e7aSSebastian Grimberg 
13c9c2c079SJeremy L Thompson #include <ceed.h>
1407b31e0eSJeremy L Thompson 
15ca735530SJeremy L Thompson #if USE_CEEDSIZE
16f7c1b517Snbeams typedef CeedSize IndexType;
17f7c1b517Snbeams #else
18f7c1b517Snbeams typedef CeedInt IndexType;
19f7c1b517Snbeams #endif
20f7c1b517Snbeams 
2107b31e0eSJeremy L Thompson //------------------------------------------------------------------------------
22*004e4986SSebastian Grimberg // Matrix assembly kernel
2307b31e0eSJeremy L Thompson //------------------------------------------------------------------------------
242b730f8bSJeremy L Thompson extern "C" __launch_bounds__(BLOCK_SIZE) __global__
25*004e4986SSebastian Grimberg     void LinearAssemble(const CeedInt num_elem, const CeedScalar *B_in, const CeedScalar *B_out, const bool *orients_in,
26*004e4986SSebastian Grimberg                         const CeedInt8 *curl_orients_in, const bool *orients_out, const CeedInt8 *curl_orients_out,
27*004e4986SSebastian Grimberg                         const CeedScalar *__restrict__ qf_array, CeedScalar *__restrict__ values_array) {
28*004e4986SSebastian Grimberg   extern __shared__ CeedScalar s_CT[];
29*004e4986SSebastian Grimberg   CeedScalar                  *s_C = s_CT + NUM_NODES_OUT * NUM_NODES_IN;
3007b31e0eSJeremy L Thompson 
3107b31e0eSJeremy L Thompson   const int l = threadIdx.x;  // The output column index of each B^T D B operation
3207b31e0eSJeremy L Thompson                               // such that we have (Bout^T)_ij D_jk Bin_kl = C_il
3307b31e0eSJeremy L Thompson 
34*004e4986SSebastian Grimberg   // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: e,
35ea61e9acSJeremy L Thompson   // comp_in, comp_out, node_row, node_col
36*004e4986SSebastian Grimberg   const IndexType comp_out_stride = NUM_NODES_OUT * NUM_NODES_IN;
37*004e4986SSebastian Grimberg   const IndexType comp_in_stride  = comp_out_stride * NUM_COMP_OUT;
38*004e4986SSebastian Grimberg   const IndexType e_stride        = comp_in_stride * NUM_COMP_IN;
39*004e4986SSebastian Grimberg 
40*004e4986SSebastian Grimberg   // Strides for QF array, slowest --> fastest: e_in, comp_in, e_out, comp_out, e, q
41ca735530SJeremy L Thompson   const IndexType q_e_stride             = NUM_QPTS;
42*004e4986SSebastian Grimberg   const IndexType q_comp_out_stride      = num_elem * q_e_stride;
43*004e4986SSebastian Grimberg   const IndexType q_eval_mode_out_stride = q_comp_out_stride * NUM_COMP_OUT;
44*004e4986SSebastian Grimberg   const IndexType q_comp_in_stride       = q_eval_mode_out_stride * NUM_EVAL_MODES_OUT;
45*004e4986SSebastian Grimberg   const IndexType q_eval_mode_in_stride  = q_comp_in_stride * NUM_COMP_IN;
4607b31e0eSJeremy L Thompson 
4707b31e0eSJeremy L Thompson   // Loop over each element (if necessary)
48*004e4986SSebastian Grimberg   for (IndexType e = blockIdx.x * blockDim.z + threadIdx.z; e < num_elem; e += gridDim.x * blockDim.z) {
49*004e4986SSebastian Grimberg     for (IndexType comp_in = 0; comp_in < NUM_COMP_IN; comp_in++) {
50*004e4986SSebastian Grimberg       for (IndexType comp_out = 0; comp_out < NUM_COMP_OUT; comp_out++) {
51*004e4986SSebastian Grimberg         for (IndexType i = threadIdx.y; i < NUM_NODES_OUT; i += BLOCK_SIZE_Y) {
5207b31e0eSJeremy L Thompson           CeedScalar result        = 0.0;
53ca735530SJeremy L Thompson           IndexType  qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e;
54ca735530SJeremy L Thompson 
55*004e4986SSebastian Grimberg           for (IndexType e_in = 0; e_in < NUM_EVAL_MODES_IN; e_in++) {
56*004e4986SSebastian Grimberg             IndexType b_in_index = e_in * NUM_QPTS * NUM_NODES_IN;
57ca735530SJeremy L Thompson 
58*004e4986SSebastian Grimberg             for (IndexType e_out = 0; e_out < NUM_EVAL_MODES_OUT; e_out++) {
59*004e4986SSebastian Grimberg               IndexType b_out_index = e_out * NUM_QPTS * NUM_NODES_OUT;
60*004e4986SSebastian Grimberg               IndexType qf_index    = qf_index_comp + q_eval_mode_out_stride * e_out + q_eval_mode_in_stride * e_in;
61ca735530SJeremy L Thompson 
6207b31e0eSJeremy L Thompson               // Perform the B^T D B operation for this 'chunk' of D (the qf_array)
63ca735530SJeremy L Thompson               for (IndexType j = 0; j < NUM_QPTS; j++) {
64*004e4986SSebastian Grimberg                 result += B_out[b_out_index + j * NUM_NODES_OUT + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NUM_NODES_IN + l];
6507b31e0eSJeremy L Thompson               }
66*004e4986SSebastian Grimberg             }  // end of out eval mode
67*004e4986SSebastian Grimberg           }    // end of in eval mode
68*004e4986SSebastian Grimberg           if (orients_in) {
69*004e4986SSebastian Grimberg             result *= orients_in[NUM_NODES_IN * e + l] ? -1.0 : 1.0;
70*004e4986SSebastian Grimberg           }
71*004e4986SSebastian Grimberg           if (orients_out) {
72*004e4986SSebastian Grimberg             result *= orients_out[NUM_NODES_OUT * e + i] ? -1.0 : 1.0;
73*004e4986SSebastian Grimberg           }
74*004e4986SSebastian Grimberg           if (!curl_orients_in && !curl_orients_out) {
75*004e4986SSebastian Grimberg             IndexType val_index = e_stride * e + comp_in_stride * comp_in + comp_out_stride * comp_out + NUM_NODES_IN * i + l;
76ca735530SJeremy L Thompson 
7707b31e0eSJeremy L Thompson             values_array[val_index] = result;
78*004e4986SSebastian Grimberg           } else if (curl_orients_in) {
79*004e4986SSebastian Grimberg             s_C[NUM_NODES_IN * threadIdx.y + l] = result;
80*004e4986SSebastian Grimberg             __syncthreads();
81*004e4986SSebastian Grimberg             s_CT[NUM_NODES_IN * i + l] =
82*004e4986SSebastian Grimberg                 (l > 0 ? s_C[NUM_NODES_IN * threadIdx.y + l - 1] * curl_orients_in[3 * NUM_NODES_IN * e + 3 * l - 1] : 0.0) +
83*004e4986SSebastian Grimberg                 s_C[NUM_NODES_IN * threadIdx.y + l] * curl_orients_in[3 * NUM_NODES_IN * e + 3 * l + 1] +
84*004e4986SSebastian Grimberg                 (l < (NUM_NODES_IN - 1) ? s_C[NUM_NODES_IN * threadIdx.y + l + 1] * curl_orients_in[3 * NUM_NODES_IN * e + 3 * l + 3] : 0.0);
85*004e4986SSebastian Grimberg           } else {
86*004e4986SSebastian Grimberg             s_CT[NUM_NODES_IN * i + l] = result;
87*004e4986SSebastian Grimberg           }
8807b31e0eSJeremy L Thompson         }  // end of loop over element node index, i
89*004e4986SSebastian Grimberg         if (curl_orients_in || curl_orients_out) {
90*004e4986SSebastian Grimberg           // Compute and store the final T^T (B^T D B T) using the fully computed C T product in shared memory
91*004e4986SSebastian Grimberg           if (curl_orients_out) __syncthreads();
92*004e4986SSebastian Grimberg           for (IndexType i = threadIdx.y; i < NUM_NODES_OUT; i += BLOCK_SIZE_Y) {
93*004e4986SSebastian Grimberg             IndexType val_index = e_stride * e + comp_in_stride * comp_in + comp_out_stride * comp_out + NUM_NODES_IN * i + l;
94*004e4986SSebastian Grimberg 
95*004e4986SSebastian Grimberg             if (curl_orients_out) {
96*004e4986SSebastian Grimberg               values_array[val_index] =
97*004e4986SSebastian Grimberg                   (i > 0 ? s_CT[NUM_NODES_IN * (i - 1) + l] * curl_orients_out[3 * NUM_NODES_OUT * e + 3 * i - 1] : 0.0) +
98*004e4986SSebastian Grimberg                   s_CT[NUM_NODES_IN * i + l] * curl_orients_out[3 * NUM_NODES_OUT * e + 3 * i + 1] +
99*004e4986SSebastian Grimberg                   (i < (NUM_NODES_OUT - 1) ? s_CT[NUM_NODES_IN * (i + 1) + l] * curl_orients_out[3 * NUM_NODES_OUT * e + 3 * i + 3] : 0.0);
100*004e4986SSebastian Grimberg             } else {
101*004e4986SSebastian Grimberg               values_array[val_index] = s_CT[NUM_NODES_IN * i + l];
102*004e4986SSebastian Grimberg             }
103*004e4986SSebastian Grimberg           }
104*004e4986SSebastian Grimberg         }
10507b31e0eSJeremy L Thompson       }  // end of out component
10607b31e0eSJeremy L Thompson     }    // end of in component
10707b31e0eSJeremy L Thompson   }      // end of element loop
10807b31e0eSJeremy L Thompson }
10907b31e0eSJeremy L Thompson 
11007b31e0eSJeremy L Thompson //------------------------------------------------------------------------------
111b2165e7aSSebastian Grimberg 
11294b7b29bSJeremy L Thompson #endif  // CEED_CUDA_REF_OPERATOR_ASSEMBLE_H
113