1 // Copyright (c) 2017-2026, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED: http://github.com/ceed
7
8 /// @file
9 /// Internal header for CUDA operator full assembly
10 #include <ceed/types.h>
11
12 #if USE_CEEDSIZE
13 typedef CeedSize IndexType;
14 #else
15 typedef CeedInt IndexType;
16 #endif
17
18 //------------------------------------------------------------------------------
19 // Matrix assembly kernel
20 //------------------------------------------------------------------------------
__launch_bounds__(BLOCK_SIZE)21 extern "C" __launch_bounds__(BLOCK_SIZE) __global__
22 void LinearAssemble(const CeedInt num_elem, const CeedScalar *B_in, const CeedScalar *B_out, const bool *orients_in,
23 const CeedInt8 *curl_orients_in, const bool *orients_out, const CeedInt8 *curl_orients_out,
24 const CeedScalar *__restrict__ qf_array, CeedScalar *__restrict__ values_array) {
25 extern __shared__ CeedScalar s_CT[];
26 CeedScalar *s_C = &s_CT[NUM_NODES_OUT * NUM_NODES_IN];
27
28 const int l = threadIdx.x; // The output column index of each B^T D B operation
29 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il
30
31 // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: e,
32 // comp_in, comp_out, node_row, node_col
33 const IndexType comp_out_stride = NUM_NODES_OUT * NUM_NODES_IN;
34 const IndexType comp_in_stride = comp_out_stride * NUM_COMP_OUT;
35 const IndexType e_stride = comp_in_stride * NUM_COMP_IN;
36
37 // Strides for QF array, slowest --> fastest: e_in, comp_in, e_out, comp_out, e, q
38 const IndexType q_e_stride = NUM_QPTS;
39 const IndexType q_comp_out_stride = num_elem * q_e_stride;
40 const IndexType q_eval_mode_out_stride = q_comp_out_stride * NUM_COMP_OUT;
41 const IndexType q_comp_in_stride = q_eval_mode_out_stride * NUM_EVAL_MODES_OUT;
42 const IndexType q_eval_mode_in_stride = q_comp_in_stride * NUM_COMP_IN;
43
44 // Loop over each element (if necessary)
45 for (IndexType e = blockIdx.x * blockDim.z + threadIdx.z; e < num_elem; e += gridDim.x * blockDim.z) {
46 for (IndexType comp_in = 0; comp_in < NUM_COMP_IN; comp_in++) {
47 for (IndexType comp_out = 0; comp_out < NUM_COMP_OUT; comp_out++) {
48 for (IndexType i = threadIdx.y; i < NUM_NODES_OUT; i += BLOCK_SIZE_Y) {
49 CeedScalar result = 0.0;
50 IndexType qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e;
51
52 for (IndexType e_in = 0; e_in < NUM_EVAL_MODES_IN; e_in++) {
53 IndexType b_in_index = e_in * NUM_QPTS * NUM_NODES_IN;
54
55 for (IndexType e_out = 0; e_out < NUM_EVAL_MODES_OUT; e_out++) {
56 IndexType b_out_index = e_out * NUM_QPTS * NUM_NODES_OUT;
57 IndexType qf_index = qf_index_comp + q_eval_mode_out_stride * e_out + q_eval_mode_in_stride * e_in;
58
59 // Perform the B^T D B operation for this 'chunk' of D (the qf_array)
60 for (IndexType j = 0; j < NUM_QPTS; j++) {
61 result += B_out[b_out_index + j * NUM_NODES_OUT + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NUM_NODES_IN + l];
62 }
63 } // end of out eval mode
64 } // end of in eval mode
65 if (orients_in) {
66 result *= orients_in[NUM_NODES_IN * e + l] ? -1.0 : 1.0;
67 }
68 if (orients_out) {
69 result *= orients_out[NUM_NODES_OUT * e + i] ? -1.0 : 1.0;
70 }
71 if (!curl_orients_in && !curl_orients_out) {
72 IndexType val_index = e_stride * e + comp_in_stride * comp_in + comp_out_stride * comp_out + NUM_NODES_IN * i + l;
73
74 values_array[val_index] = result;
75 } else if (curl_orients_in) {
76 s_C[NUM_NODES_IN * threadIdx.y + l] = result;
77 __syncthreads();
78 s_CT[NUM_NODES_IN * i + l] =
79 (l > 0 ? s_C[NUM_NODES_IN * threadIdx.y + l - 1] * curl_orients_in[3 * NUM_NODES_IN * e + 3 * l - 1] : 0.0) +
80 s_C[NUM_NODES_IN * threadIdx.y + l] * curl_orients_in[3 * NUM_NODES_IN * e + 3 * l + 1] +
81 (l < (NUM_NODES_IN - 1) ? s_C[NUM_NODES_IN * threadIdx.y + l + 1] * curl_orients_in[3 * NUM_NODES_IN * e + 3 * l + 3] : 0.0);
82 } else {
83 s_CT[NUM_NODES_IN * i + l] = result;
84 }
85 } // end of loop over element node index, i
86 if (curl_orients_in || curl_orients_out) {
87 // Compute and store the final T^T (B^T D B T) using the fully computed C T product in shared memory
88 if (curl_orients_out) __syncthreads();
89 for (IndexType i = threadIdx.y; i < NUM_NODES_OUT; i += BLOCK_SIZE_Y) {
90 IndexType val_index = e_stride * e + comp_in_stride * comp_in + comp_out_stride * comp_out + NUM_NODES_IN * i + l;
91
92 if (curl_orients_out) {
93 values_array[val_index] =
94 (i > 0 ? s_CT[NUM_NODES_IN * (i - 1) + l] * curl_orients_out[3 * NUM_NODES_OUT * e + 3 * i - 1] : 0.0) +
95 s_CT[NUM_NODES_IN * i + l] * curl_orients_out[3 * NUM_NODES_OUT * e + 3 * i + 1] +
96 (i < (NUM_NODES_OUT - 1) ? s_CT[NUM_NODES_IN * (i + 1) + l] * curl_orients_out[3 * NUM_NODES_OUT * e + 3 * i + 3] : 0.0);
97 } else {
98 values_array[val_index] = s_CT[NUM_NODES_IN * i + l];
99 }
100 }
101 }
102 } // end of out component
103 } // end of in component
104 } // end of element loop
105 }
106