xref: /libCEED/backends/cuda-gen/ceed-cuda-gen-operator-build.cpp (revision c70a677ffe38ebccc3bd6f00f9d560759c662f96)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #define CEED_DEBUG_COLOR 12
9 
10 #include <ceed.h>
11 #include <ceed/backend.h>
12 #include <ceed/jit-source/cuda/cuda-types.h>
13 #include <ceed/jit-tools.h>
14 #include <cuda_runtime.h>
15 
16 #include <iostream>
17 #include <sstream>
18 
19 #include "../cuda-ref/ceed-cuda-ref.h"
20 #include "../cuda-shared/ceed-cuda-shared.h"
21 #include "../cuda/ceed-cuda-common.h"
22 #include "../cuda/ceed-cuda-compile.h"
23 #include "ceed-cuda-gen.h"
24 
25 //------------------------------------------------------------------------------
26 // Build singe operator kernel
27 //------------------------------------------------------------------------------
28 extern "C" int CeedCudaGenOperatorBuild(CeedOperator op) {
29   using std::ostringstream;
30   using std::string;
31   bool is_setup_done;
32   CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done));
33   if (is_setup_done) return CEED_ERROR_SUCCESS;
34   Ceed ceed;
35   CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
36   CeedOperator_Cuda_gen *data;
37   CeedCallBackend(CeedOperatorGetData(op, &data));
38   CeedQFunction           qf;
39   CeedQFunction_Cuda_gen *qf_data;
40   CeedCallBackend(CeedOperatorGetQFunction(op, &qf));
41   CeedCallBackend(CeedQFunctionGetData(qf, &qf_data));
42   CeedSize lsize;
43   CeedInt  Q, P_1d = 0, Q_1d = 0, elem_size, num_input_fields, num_output_fields, num_comp, dim = 1;
44   CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q));
45   Q_1d = Q;
46   CeedOperatorField *op_input_fields, *op_output_fields;
47   CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields));
48   CeedQFunctionField *qf_input_fields, *qf_output_fields;
49   CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields));
50   CeedEvalMode              eval_mode;
51   CeedBasis                 basis;
52   CeedBasis_Cuda_shared    *basis_data;
53   CeedElemRestriction       Erestrict;
54   CeedElemRestriction_Cuda *restr_data;
55 
56   // TODO: put in a function?
57   // Check for restriction only identity operator
58   bool is_identity_qf;
59   CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf));
60   if (is_identity_qf) {
61     CeedEvalMode eval_mode_in, eval_mode_out;
62     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in));
63     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out));
64     CeedCheck(eval_mode_in != CEED_EVAL_NONE || eval_mode_out != CEED_EVAL_NONE, ceed, CEED_ERROR_BACKEND,
65               "Backend does not implement restriction only identity operators");
66   }
67 
68   ostringstream code;
69 
70   // TODO: put in a function?
71   // Add atomicAdd function for old NVidia architectures
72   struct cudaDeviceProp prop;
73   Ceed_Cuda            *ceed_data;
74   CeedCallBackend(CeedGetData(ceed, &ceed_data));
75   CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id));
76   if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) {
77     char *atomic_add_path, *atomic_add_source;
78     CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-atomic-add-fallback.h", &atomic_add_path));
79     CeedDebug256(ceed, 2, "----- Loading Atomic Add Source -----\n");
80     CeedCallBackend(CeedLoadSourceToBuffer(ceed, atomic_add_path, &atomic_add_source));
81     code << atomic_add_source;
82     CeedCallBackend(CeedFree(&atomic_add_path));
83     CeedCallBackend(CeedFree(&atomic_add_source));
84   }
85 
86   // Load basis source files
87   // TODO: generalize to accept different device functions?
88   {
89     char *tensor_basis_kernel_path, *tensor_basis_kernel_source;
90     CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h", &tensor_basis_kernel_path));
91     CeedDebug256(ceed, 2, "----- Loading Tensor Basis Kernel Source -----\n");
92     CeedCallBackend(CeedLoadSourceToBuffer(ceed, tensor_basis_kernel_path, &tensor_basis_kernel_source));
93     code << tensor_basis_kernel_source;
94     CeedCallBackend(CeedFree(&tensor_basis_kernel_path));
95     CeedCallBackend(CeedFree(&tensor_basis_kernel_source));
96   }
97   {
98     char *cuda_gen_template_path, *cuda_gen_template_source;
99     CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-gen-templates.h", &cuda_gen_template_path));
100     CeedDebug256(ceed, 2, "----- Loading Cuda-Gen Template Source -----\n");
101     CeedCallBackend(CeedLoadSourceToBuffer(ceed, cuda_gen_template_path, &cuda_gen_template_source));
102     code << cuda_gen_template_source;
103     CeedCallBackend(CeedFree(&cuda_gen_template_path));
104     CeedCallBackend(CeedFree(&cuda_gen_template_source));
105   }
106 
107   // Get QFunction source and name
108   string q_function_source(qf_data->q_function_source);
109   string q_function_name(qf_data->q_function_name);
110   string operator_name;
111   operator_name = "CeedKernelCudaGenOperator_" + q_function_name;
112 
113   // Find dim, P_1d, Q_1d
114   data->max_P_1d = 0;
115   for (CeedInt i = 0; i < num_input_fields; i++) {
116     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
117     if (basis != CEED_BASIS_COLLOCATED) {
118       CeedCallBackend(CeedBasisGetData(basis, &basis_data));
119       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
120 
121       // Collect dim, P_1d, and Q_1d
122       CeedCallBackend(CeedBasisGetDimension(basis, &dim));
123       bool isTensor;
124       CeedCallBackend(CeedBasisIsTensor(basis, &isTensor));
125       CeedCheck(isTensor, ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis");
126       CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
127       CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
128       if (P_1d > data->max_P_1d) data->max_P_1d = P_1d;
129     }
130   }
131   // Check output bases for Q_1d, dim as well
132   //   The only input basis might be CEED_BASIS_COLLOCATED
133   for (CeedInt i = 0; i < num_output_fields; i++) {
134     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
135 
136     if (basis != CEED_BASIS_COLLOCATED) {
137       CeedCallBackend(CeedBasisGetData(basis, &basis_data));
138       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
139 
140       // Collect Q_1d
141       CeedCallBackend(CeedBasisGetDimension(basis, &dim));
142       bool isTensor;
143       CeedCallBackend(CeedBasisIsTensor(basis, &isTensor));
144       CeedCheck(isTensor, ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis");
145       CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
146     }
147   }
148   data->dim  = dim;
149   data->Q_1d = Q_1d;
150 
151   // Only use 3D collocated gradient parallelization strategy when gradient is computed
152   // TODO: put in a function?
153   bool use_collograd_parallelization = false;
154   if (dim == 3) {
155     bool was_grad_found = false;
156     for (CeedInt i = 0; i < num_input_fields; i++) {
157       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
158       if (eval_mode == CEED_EVAL_GRAD) {
159         CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
160         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
161         use_collograd_parallelization = !!basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true);
162         was_grad_found                = true;
163       }
164     }
165     for (CeedInt i = 0; i < num_output_fields; i++) {
166       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
167       if (eval_mode == CEED_EVAL_GRAD) {
168         CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
169         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
170         use_collograd_parallelization = !!basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true);
171         was_grad_found                = true;
172       }
173     }
174   }
175 
176   // Define CEED_Q_VLA
177   code << "\n#undef CEED_Q_VLA\n";
178   if (dim != 3 || use_collograd_parallelization) {
179     code << "#define CEED_Q_VLA 1\n\n";
180   } else {
181     code << "#define CEED_Q_VLA " << Q_1d << "\n\n";
182   }
183 
184   code << q_function_source;
185 
186   // Setup
187   code << "\n// -----------------------------------------------------------------------------\n";
188   code << "\nextern \"C\" __global__ void " << operator_name
189        << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar* W) {\n";
190   for (CeedInt i = 0; i < num_input_fields; i++) {
191     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
192     if (eval_mode != CEED_EVAL_WEIGHT) {  // Skip CEED_EVAL_WEIGHT
193       code << "  const CeedScalar* d_u_" << i << " = fields.inputs[" << i << "];\n";
194     }
195   }
196 
197   for (CeedInt i = 0; i < num_output_fields; i++) {
198     code << "  CeedScalar* d_v_" << i << " = fields.outputs[" << i << "];\n";
199   }
200 
201   code << "  const CeedInt dim = " << dim << ";\n";
202   code << "  const CeedInt Q_1d = " << Q_1d << ";\n";
203 
204   code << "  extern __shared__ CeedScalar slice[];\n";
205   // TODO put in a function? InitSharedData_Cuda?
206   code << "  SharedData_Cuda data;\n";
207   code << "  data.t_id_x = threadIdx.x;\n";
208   code << "  data.t_id_y = threadIdx.y;\n";
209   code << "  data.t_id_z = threadIdx.z;\n";
210   code << "  data.t_id  = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n";
211   code << "  data.slice = slice+data.t_id_z*T_1D" << (dim > 1 ? "*T_1D" : "") << ";\n";
212 
213   code << "\n  // -- Input field constants and basis data --\n";
214   // TODO: Put in a function?
215   // Initialize constants, and matrices B and G
216   for (CeedInt i = 0; i < num_input_fields; i++) {
217     code << "  // ---- Input field " << i << " ----\n";
218     // Get elem_size, eval_mode, num_comp
219     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &Erestrict));
220     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
221     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
222     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
223 
224     // Set field constants
225     if (eval_mode != CEED_EVAL_WEIGHT) {
226       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
227       if (basis != CEED_BASIS_COLLOCATED) {
228         CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
229         code << "  const CeedInt P_in_" << i << " = " << P_1d << ";\n";
230       } else {
231         code << "  const CeedInt P_in_" << i << " = " << Q_1d << ";\n";
232       }
233       code << "  const CeedInt num_comp_in_" << i << " = " << num_comp << ";\n";
234     }
235 
236     // Load basis data
237     code << "  // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
238     switch (eval_mode) {
239       case CEED_EVAL_NONE:
240         break;
241       case CEED_EVAL_INTERP:
242         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
243         data->B.inputs[i] = basis_data->d_interp_1d;
244         code << "  __shared__ CeedScalar s_B_in_" << i << "[" << P_1d * Q_1d << "];\n";
245         code << "  loadMatrix<P_in_" << i << ",Q_1d>(data, B.inputs[" << i << "], s_B_in_" << i << ");\n";
246         break;
247       case CEED_EVAL_GRAD:
248         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
249         data->B.inputs[i] = basis_data->d_interp_1d;
250         code << "  __shared__ CeedScalar s_B_in_" << i << "[" << P_1d * Q_1d << "];\n";
251         code << "  loadMatrix<P_in_" << i << ",Q_1d>(data, B.inputs[" << i << "], s_B_in_" << i << ");\n";
252         if (use_collograd_parallelization) {
253           data->G.inputs[i] = basis_data->d_collo_grad_1d;
254           code << "  __shared__ CeedScalar s_G_in_" << i << "[" << Q_1d * Q_1d << "];\n";
255           code << "  loadMatrix<Q_1d,Q_1d>(data, G.inputs[" << i << "], s_G_in_" << i << ");\n";
256         } else {
257           bool has_collo_grad = !!basis_data->d_collo_grad_1d;
258           data->G.inputs[i]   = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
259           code << "  __shared__ CeedScalar s_G_in_" << i << "[" << Q_1d * (has_collo_grad ? Q_1d : P_1d) << "];\n";
260           code << "  loadMatrix<" << (has_collo_grad ? "Q_1d" : ("P_in_" + std::to_string(i))) << ",Q_1d>(data, G.inputs[" << i << "], s_G_in_" << i
261                << ");\n";
262         }
263         break;
264       case CEED_EVAL_WEIGHT:
265         break;  // No action
266       case CEED_EVAL_DIV:
267         break;  // TODO: Not implemented
268       case CEED_EVAL_CURL:
269         break;  // TODO: Not implemented
270     }
271   }
272 
273   code << "\n  // -- Output field constants and basis data --\n";
274   for (CeedInt i = 0; i < num_output_fields; i++) {
275     code << "  // ---- Output field " << i << " ----\n";
276     // Get elem_size, eval_mode, num_comp
277     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &Erestrict));
278     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
279     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
280     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
281 
282     // Set field constants
283     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
284     if (basis != CEED_BASIS_COLLOCATED) {
285       CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
286       code << "  const CeedInt P_out_" << i << " = " << P_1d << ";\n";
287     } else {
288       code << "  const CeedInt P_out_" << i << " = " << Q_1d << ";\n";
289     }
290     code << "  const CeedInt num_comp_out_" << i << " = " << num_comp << ";\n";
291 
292     // Load basis data
293     code << "  // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
294     switch (eval_mode) {
295       case CEED_EVAL_NONE:
296         break;  // No action
297       case CEED_EVAL_INTERP:
298         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
299         data->B.outputs[i] = basis_data->d_interp_1d;
300         code << "  __shared__ CeedScalar s_B_out_" << i << "[" << P_1d * Q_1d << "];\n";
301         code << "  loadMatrix<P_out_" << i << ",Q_1d>(data, B.outputs[" << i << "], s_B_out_" << i << ");\n";
302         break;
303       case CEED_EVAL_GRAD:
304         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
305         data->B.outputs[i] = basis_data->d_interp_1d;
306         code << "  __shared__ CeedScalar s_B_out_" << i << "[" << P_1d * Q_1d << "];\n";
307         code << "  loadMatrix<P_out_" << i << ",Q_1d>(data, B.outputs[" << i << "], s_B_out_" << i << ");\n";
308         if (use_collograd_parallelization) {
309           data->G.outputs[i] = basis_data->d_collo_grad_1d;
310           code << "  __shared__ CeedScalar s_G_out_" << i << "[" << Q_1d * Q_1d << "];\n";
311           code << "  loadMatrix<Q_1d,Q_1d>(data, G.outputs[" << i << "], s_G_out_" << i << ");\n";
312         } else {
313           bool has_collo_grad = !!basis_data->d_collo_grad_1d;
314           data->G.outputs[i]  = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
315           code << "  __shared__ CeedScalar s_G_out_" << i << "[" << Q_1d * (has_collo_grad ? Q_1d : P_1d) << "];\n";
316           code << "  loadMatrix<" << (has_collo_grad ? "Q_1d" : ("P_out_" + std::to_string(i))) << ",Q_1d>(data, G.outputs[" << i << "], s_G_out_"
317                << i << ");\n";
318         }
319         break;
320       // LCOV_EXCL_START
321       case CEED_EVAL_WEIGHT: {
322         Ceed ceed;
323         CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
324         return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode");
325         break;  // Should not occur
326       }
327       case CEED_EVAL_DIV:
328         break;  // TODO: Not implemented
329       case CEED_EVAL_CURL:
330         break;  // TODO: Not implemented
331                 // LCOV_EXCL_STOP
332     }
333   }
334   code << "\n  // -- Element loop --\n";
335   code << "  __syncthreads();\n";
336   code << "  for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n";
337   // Input basis apply if needed
338   // Generate the correct eval mode code for each input
339   code << "    // -- Input field restrictions and basis actions --\n";
340   for (CeedInt i = 0; i < num_input_fields; i++) {
341     code << "    // ---- Input field " << i << " ----\n";
342     // Get elem_size, eval_mode, num_comp
343     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &Erestrict));
344     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
345     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
346     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
347 
348     // TODO: put in a function?
349     // Restriction
350     if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_collograd_parallelization)) {
351       code << "    CeedScalar r_u_" << i << "[num_comp_in_" << i << "*P_in_" << i << "];\n";
352 
353       bool is_strided;
354       CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &is_strided));
355       if (!is_strided) {
356         CeedCallBackend(CeedElemRestrictionGetLVectorSize(Erestrict, &lsize));
357         code << "    const CeedInt lsize_in_" << i << " = " << lsize << ";\n";
358         CeedInt comp_stride;
359         CeedCallBackend(CeedElemRestrictionGetCompStride(Erestrict, &comp_stride));
360         code << "    // CompStride: " << comp_stride << "\n";
361         CeedCallBackend(CeedElemRestrictionGetData(Erestrict, &restr_data));
362         data->indices.inputs[i] = restr_data->d_ind;
363         code << "    readDofsOffset" << dim << "d<num_comp_in_" << i << ", " << comp_stride << ", P_in_" << i << ">(data, lsize_in_" << i
364              << ", elem, indices.inputs[" << i << "], d_u_" << i << ", r_u_" << i << ");\n";
365       } else {
366         bool backendstrides;
367         CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &backendstrides));
368         CeedInt num_elem;
369         CeedCallBackend(CeedElemRestrictionGetNumElements(Erestrict, &num_elem));
370         CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
371         if (!backendstrides) {
372           CeedCallBackend(CeedElemRestrictionGetStrides(Erestrict, &strides));
373         }
374         code << "    // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
375         code << "    readDofsStrided" << dim << "d<num_comp_in_" << i << ",P_in_" << i << "," << strides[0] << "," << strides[1] << "," << strides[2]
376              << ">(data, elem, d_u_" << i << ", r_u_" << i << ");\n";
377       }
378     }
379 
380     // TODO: put in a function?
381     // Basis action
382     code << "    // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
383     switch (eval_mode) {
384       case CEED_EVAL_NONE:
385         if (!use_collograd_parallelization) {
386           code << "    CeedScalar* r_t_" << i << " = r_u_" << i << ";\n";
387         }
388         break;
389       case CEED_EVAL_INTERP:
390         code << "    CeedScalar r_t_" << i << "[num_comp_in_" << i << "*Q_1d];\n";
391         code << "    Interp" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_in_" << i << ",P_in_" << i << ",Q_1d>(data, r_u_" << i << ", s_B_in_"
392              << i << ", r_t_" << i << ");\n";
393         break;
394       case CEED_EVAL_GRAD:
395         if (use_collograd_parallelization) {
396           code << "    CeedScalar r_t_" << i << "[num_comp_in_" << i << "*Q_1d];\n";
397           code << "    Interp" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_in_" << i << ",P_in_" << i << ",Q_1d>(data, r_u_" << i
398                << ", s_B_in_" << i << ", r_t_" << i << ");\n";
399         } else {
400           CeedInt P_1d;
401           CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
402           CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
403           code << "    CeedScalar r_t_" << i << "[num_comp_in_" << i << "*dim*Q_1d];\n";
404           code << "    Grad" << (dim > 1 ? "Tensor" : "") << (dim == 3 && Q_1d >= P_1d ? "Collocated" : "") << dim << "d<num_comp_in_" << i
405                << ",P_in_" << i << ",Q_1d>(data, r_u_" << i << ", s_B_in_" << i << ", s_G_in_" << i << ", r_t_" << i << ");\n";
406         }
407         break;
408       case CEED_EVAL_WEIGHT:
409         code << "    CeedScalar r_t_" << i << "[Q_1d];\n";
410         CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
411         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
412         data->W = basis_data->d_q_weight_1d;
413         code << "    Weight" << (dim > 1 ? "Tensor" : "") << dim << "d<Q_1d>(data, W, r_t_" << i << ");\n";
414         break;  // No action
415       case CEED_EVAL_DIV:
416         break;  // TODO: Not implemented
417       case CEED_EVAL_CURL:
418         break;  // TODO: Not implemented
419     }
420   }
421 
422   // TODO: put in a function + separate collograd logic
423   // Q function
424   code << "\n    // -- Output field setup --\n";
425   for (CeedInt i = 0; i < num_output_fields; i++) {
426     code << "\n    // ---- Output field " << i << " ----\n";
427     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
428     if (eval_mode == CEED_EVAL_GRAD) {
429       if (use_collograd_parallelization) {
430         // Accumulator for gradient slices
431         code << "    CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*Q_1d];\n";
432         code << "    for (CeedInt i = 0; i < num_comp_out_" << i << "; i++) {\n";
433         code << "      for (CeedInt j = 0; j < Q_1d; ++j) {\n";
434         code << "        r_tt_" << i << "[j + i*Q_1d] = 0.0;\n";
435         code << "      }\n";
436         code << "    }\n";
437       } else {
438         code << "    CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*dim*Q_1d];\n";
439       }
440     }
441     if (eval_mode == CEED_EVAL_NONE || eval_mode == CEED_EVAL_INTERP) {
442       code << "    CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*Q_1d];\n";
443     }
444   }
445   // We treat quadrature points per slice in 3d to save registers
446   if (use_collograd_parallelization) {
447     code << "\n    // Note: Using planes of 3D elements\n";
448     code << "#pragma unroll\n";
449     code << "    for (CeedInt q = 0; q < Q_1d; q++) {\n";
450     code << "      // -- Input fields --\n";
451     for (CeedInt i = 0; i < num_input_fields; i++) {
452       code << "      // ---- Input field " << i << " ----\n";
453       // Get elem_size, eval_mode, num_comp
454       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
455       // Basis action
456       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
457       switch (eval_mode) {
458         case CEED_EVAL_NONE:
459           code << "      CeedScalar r_q_" << i << "[num_comp_in_" << i << "];\n";
460 
461           bool is_strided;
462           CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &Erestrict));
463           CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &is_strided));
464           if (!is_strided) {
465             CeedCallBackend(CeedElemRestrictionGetLVectorSize(Erestrict, &lsize));
466             code << "      const CeedInt lsize_in_" << i << " = " << lsize << ";\n";
467             CeedInt comp_stride;
468             CeedCallBackend(CeedElemRestrictionGetCompStride(Erestrict, &comp_stride));
469             code << "      // CompStride: " << comp_stride << "\n";
470             CeedCallBackend(CeedElemRestrictionGetData(Erestrict, &restr_data));
471             data->indices.inputs[i] = restr_data->d_ind;
472             code << "      readSliceQuadsOffset"
473                  << "3d<num_comp_in_" << i << ", " << comp_stride << ", Q_1d>(data, lsize_in_" << i << ", elem, q, indices.inputs[" << i << "], d_u_"
474                  << i << ", r_q_" << i << ");\n";
475           } else {
476             CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
477             bool backendstrides;
478             CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &backendstrides));
479             CeedInt num_elem;
480             CeedCallBackend(CeedElemRestrictionGetNumElements(Erestrict, &num_elem));
481             CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
482             if (!backendstrides) {
483               CeedCallBackend(CeedElemRestrictionGetStrides(Erestrict, &strides));
484             }
485             code << "      // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
486             code << "      readSliceQuadsStrided"
487                  << "3d<num_comp_in_" << i
488                  << ",Q_1d"
489                     ","
490                  << strides[0] << "," << strides[1] << "," << strides[2] << ">(data, elem, q, d_u_" << i << ", r_q_" << i << ");\n";
491           }
492           break;
493         case CEED_EVAL_INTERP:
494           code << "      CeedScalar r_q_" << i << "[num_comp_in_" << i << "];\n";
495           code << "      for (CeedInt j = 0; j < num_comp_in_" << i << " ; ++j) {\n";
496           code << "        r_q_" << i << "[j] = r_t_" << i << "[q + j*Q_1d];\n";
497           code << "      }\n";
498           break;
499         case CEED_EVAL_GRAD:
500           code << "      CeedScalar r_q_" << i << "[num_comp_in_" << i << "*dim];\n";
501           code << "      gradCollo3d<num_comp_in_" << i << ",Q_1d>(data, q, r_t_" << i << ", s_G_in_" << i << ", r_q_" << i << ");\n";
502           break;
503         case CEED_EVAL_WEIGHT:
504           code << "      CeedScalar r_q_" << i << "[1];\n";
505           code << "      r_q_" << i << "[0] = r_t_" << i << "[q];\n";
506           break;  // No action
507         case CEED_EVAL_DIV:
508           break;  // TODO: Not implemented
509         case CEED_EVAL_CURL:
510           break;  // TODO: Not implemented
511       }
512     }
513     code << "\n      // -- Output fields --\n";
514     for (CeedInt i = 0; i < num_output_fields; i++) {
515       code << "      // ---- Output field " << i << " ----\n";
516       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
517       // Basis action
518       switch (eval_mode) {
519         case CEED_EVAL_NONE:
520           code << "      CeedScalar r_qq_" << i << "[num_comp_out_" << i << "];\n";
521           break;  // No action
522         case CEED_EVAL_INTERP:
523           code << "      CeedScalar r_qq_" << i << "[num_comp_out_" << i << "];\n";
524           break;
525         case CEED_EVAL_GRAD:
526           code << "      CeedScalar r_qq_" << i << "[num_comp_out_" << i << "*dim];\n";
527           break;
528         case CEED_EVAL_WEIGHT:
529           break;  // Should not occur
530         case CEED_EVAL_DIV:
531           break;  // TODO: Not implemented
532         case CEED_EVAL_CURL:
533           break;  // TODO: Not implemented
534       }
535     }
536   } else {
537     code << "\n      // Note: Using full elements\n";
538     code << "      // -- Input fields --\n";
539     for (CeedInt i = 0; i < num_input_fields; i++) {
540       code << "      // ---- Input field " << i << " ----\n";
541       code << "      CeedScalar* r_q_" << i << " = r_t_" << i << ";\n";
542     }
543     code << "      // -- Output fields --\n";
544     for (CeedInt i = 0; i < num_output_fields; i++) {
545       code << "      // ---- Output field " << i << " ----\n";
546       code << "      CeedScalar* r_qq_" << i << " = r_tt_" << i << ";\n";
547     }
548   }
549   code << "\n      // -- QFunction Inputs and outputs --\n";
550   code << "      CeedScalar* in[" << num_input_fields << "];\n";
551   for (CeedInt i = 0; i < num_input_fields; i++) {
552     code << "      // ---- Input field " << i << " ----\n";
553     code << "      in[" << i << "] = r_q_" << i << ";\n";
554   }
555   code << "      CeedScalar* out[" << num_output_fields << "];\n";
556   for (CeedInt i = 0; i < num_output_fields; i++) {
557     code << "      // ---- Output field " << i << " ----\n";
558     code << "      out[" << i << "] = r_qq_" << i << ";\n";
559   }
560   code << "\n      // -- Apply QFunction --\n";
561   code << "      " << q_function_name << "(ctx, ";
562   if (dim != 3 || use_collograd_parallelization) {
563     code << "1";
564   } else {
565     code << "Q_1d";
566   }
567   code << ", in, out);\n";
568   if (use_collograd_parallelization) {
569     code << "      // -- Output fields --\n";
570     for (CeedInt i = 0; i < num_output_fields; i++) {
571       code << "      // ---- Output field " << i << " ----\n";
572       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
573       // Basis action
574       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
575       switch (eval_mode) {
576         case CEED_EVAL_NONE:
577           code << "      for (CeedInt j = 0; j < num_comp_out_" << i << " ; ++j) {\n";
578           code << "        r_tt_" << i << "[q + j*Q_1d] = r_qq_" << i << "[j];\n";
579           code << "      }\n";
580           break;  // No action
581         case CEED_EVAL_INTERP:
582           code << "      for (CeedInt j = 0; j < num_comp_out_" << i << " ; ++j) {\n";
583           code << "        r_tt_" << i << "[q + j*Q_1d] = r_qq_" << i << "[j];\n";
584           code << "      }\n";
585           break;
586         case CEED_EVAL_GRAD:
587           code << "      gradColloTranspose3d<num_comp_out_" << i << ",Q_1d>(data, q, r_qq_" << i << ", s_G_out_" << i << ", r_tt_" << i << ");\n";
588           break;
589         case CEED_EVAL_WEIGHT:
590           break;  // Should not occur
591         case CEED_EVAL_DIV:
592           break;  // TODO: Not implemented
593         case CEED_EVAL_CURL:
594           break;  // TODO: Not implemented
595       }
596     }
597     code << "    }\n";
598   }
599 
600   // Output basis apply if needed
601   // Generate the correct eval mode code for each output
602   code << "\n    // -- Output field basis action and restrictions --\n";
603   for (CeedInt i = 0; i < num_output_fields; i++) {
604     code << "    // ---- Output field " << i << " ----\n";
605     // Get elem_size, eval_mode, num_comp
606     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &Erestrict));
607     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
608     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
609     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
610     // TODO put in a function
611     // Basis action
612     code << "    // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
613     switch (eval_mode) {
614       case CEED_EVAL_NONE:
615         code << "    CeedScalar* r_v_" << i << " = r_tt_" << i << ";\n";
616         break;  // No action
617       case CEED_EVAL_INTERP:
618         code << "    CeedScalar r_v_" << i << "[num_comp_out_" << i << "*P_out_" << i << "];\n";
619         code << "    InterpTranspose" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_out_" << i << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i
620              << ", s_B_out_" << i << ", r_v_" << i << ");\n";
621         break;
622       case CEED_EVAL_GRAD:
623         code << "    CeedScalar r_v_" << i << "[num_comp_out_" << i << "*P_out_" << i << "];\n";
624         if (use_collograd_parallelization) {
625           code << "    InterpTranspose" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_out_" << i << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i
626                << ", s_B_out_" << i << ", r_v_" << i << ");\n";
627         } else {
628           CeedInt P_1d;
629           CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
630           CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
631           code << "    GradTranspose" << (dim > 1 ? "Tensor" : "") << (dim == 3 && Q_1d >= P_1d ? "Collocated" : "") << dim << "d<num_comp_out_" << i
632                << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i << ", s_B_out_" << i << ", s_G_out_" << i << ", r_v_" << i << ");\n";
633         }
634         break;
635       // LCOV_EXCL_START
636       case CEED_EVAL_WEIGHT: {
637         Ceed ceed;
638         CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
639         return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode");
640         break;  // Should not occur
641       }
642       case CEED_EVAL_DIV:
643         break;  // TODO: Not implemented
644       case CEED_EVAL_CURL:
645         break;  // TODO: Not implemented
646                 // LCOV_EXCL_STOP
647     }
648     // TODO put in a function
649     // Restriction
650     bool is_strided;
651     CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &is_strided));
652     if (!is_strided) {
653       CeedCallBackend(CeedElemRestrictionGetLVectorSize(Erestrict, &lsize));
654       code << "    const CeedInt lsize_out_" << i << " = " << lsize << ";\n";
655       CeedInt comp_stride;
656       CeedCallBackend(CeedElemRestrictionGetCompStride(Erestrict, &comp_stride));
657       code << "    // CompStride: " << comp_stride << "\n";
658       CeedCallBackend(CeedElemRestrictionGetData(Erestrict, &restr_data));
659       data->indices.outputs[i] = restr_data->d_ind;
660       code << "    writeDofsOffset" << dim << "d<num_comp_out_" << i << ", " << comp_stride << ", P_out_" << i << ">(data, lsize_out_" << i
661            << ", elem, indices.outputs[" << i << "], r_v_" << i << ", d_v_" << i << ");\n";
662     } else {
663       bool has_backend_strides;
664       CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &has_backend_strides));
665       CeedInt num_elem;
666       CeedCallBackend(CeedElemRestrictionGetNumElements(Erestrict, &num_elem));
667       CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
668       if (!has_backend_strides) {
669         CeedCallBackend(CeedElemRestrictionGetStrides(Erestrict, &strides));
670       }
671       code << "    // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
672       code << "    writeDofsStrided" << dim << "d<num_comp_out_" << i << ",P_out_" << i << "," << strides[0] << "," << strides[1] << "," << strides[2]
673            << ">(data, elem, r_v_" << i << ", d_v_" << i << ");\n";
674     }
675   }
676 
677   code << "  }\n";
678   code << "}\n";
679   code << "// -----------------------------------------------------------------------------\n\n";
680 
681   // View kernel for debugging
682   CeedDebug256(ceed, 2, "Generated Operator Kernels:\n");
683   CeedDebug(ceed, code.str().c_str());
684 
685   CeedCallBackend(CeedCompileCuda(ceed, code.str().c_str(), &data->module, 1, "T_1D", CeedIntMax(Q_1d, data->max_P_1d)));
686   CeedCallBackend(CeedGetKernelCuda(ceed, data->module, operator_name.c_str(), &data->op));
687 
688   CeedCallBackend(CeedOperatorSetSetupDone(op));
689   return CEED_ERROR_SUCCESS;
690 }
691 
692 //------------------------------------------------------------------------------
693