xref: /libCEED/backends/cuda-gen/ceed-cuda-gen-operator-build.cpp (revision 5fb68f377259d3910de46d787b7c5d1587fd01e1)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #define CEED_DEBUG_COLOR 12
9 
10 #include <ceed/backend.h>
11 #include <ceed/ceed.h>
12 #include <ceed/jit-tools.h>
13 #include <cuda_runtime.h>
14 
15 #include <iostream>
16 #include <sstream>
17 
18 #include "../cuda-ref/ceed-cuda-ref.h"
19 #include "../cuda-shared/ceed-cuda-shared.h"
20 #include "../cuda/ceed-cuda-compile.h"
21 #include "ceed-cuda-gen.h"
22 
23 //------------------------------------------------------------------------------
24 // Build singe operator kernel
25 //------------------------------------------------------------------------------
26 extern "C" int CeedCudaGenOperatorBuild(CeedOperator op) {
27   using std::ostringstream;
28   using std::string;
29   bool is_setup_done;
30   CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done));
31   if (is_setup_done) return CEED_ERROR_SUCCESS;
32   Ceed ceed;
33   CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
34   CeedOperator_Cuda_gen *data;
35   CeedCallBackend(CeedOperatorGetData(op, &data));
36   CeedQFunction           qf;
37   CeedQFunction_Cuda_gen *qf_data;
38   CeedCallBackend(CeedOperatorGetQFunction(op, &qf));
39   CeedCallBackend(CeedQFunctionGetData(qf, &qf_data));
40   CeedSize lsize;
41   CeedInt  Q, P_1d = 0, Q_1d = 0, elem_size, num_input_fields, num_output_fields, num_comp, dim = 1;
42   CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q));
43   Q_1d = Q;
44   CeedOperatorField *op_input_fields, *op_output_fields;
45   CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields));
46   CeedQFunctionField *qf_input_fields, *qf_output_fields;
47   CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields));
48   CeedEvalMode              eval_mode;
49   CeedBasis                 basis;
50   CeedBasis_Cuda_shared    *basis_data;
51   CeedElemRestriction       Erestrict;
52   CeedElemRestriction_Cuda *restr_data;
53 
54   // TODO: put in a function?
55   // Check for restriction only identity operator
56   bool is_identity_qf;
57   CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf));
58   if (is_identity_qf) {
59     CeedEvalMode eval_mode_in, eval_mode_out;
60     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in));
61     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out));
62     if (eval_mode_in == CEED_EVAL_NONE && eval_mode_out == CEED_EVAL_NONE)
63       // LCOV_EXCL_START
64       return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement restriction only identity operators");
65     // LCOV_EXCL_STOP
66   }
67 
68   ostringstream code;
69 
70   // TODO: put in a function?
71   // Add atomicAdd function for old NVidia architectures
72   struct cudaDeviceProp prop;
73   Ceed_Cuda            *ceed_data;
74   CeedCallBackend(CeedGetData(ceed, &ceed_data));
75   CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id));
76   if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) {
77     char *atomic_add_path, *atomic_add_source;
78     CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-atomic-add-fallback.h", &atomic_add_path));
79     CeedDebug256(ceed, 2, "----- Loading Atomic Add Source -----\n");
80     CeedCallBackend(CeedLoadSourceToBuffer(ceed, atomic_add_path, &atomic_add_source));
81     code << atomic_add_source;
82     CeedCallBackend(CeedFree(&atomic_add_path));
83     CeedCallBackend(CeedFree(&atomic_add_source));
84   }
85 
86   // Load basis source files
87   // TODO: generalize to accept different device functions?
88   {
89     char *tensor_basis_kernel_path, *tensor_basis_kernel_source;
90     CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h", &tensor_basis_kernel_path));
91     CeedDebug256(ceed, 2, "----- Loading Tensor Basis Kernel Source -----\n");
92     CeedCallBackend(CeedLoadSourceToBuffer(ceed, tensor_basis_kernel_path, &tensor_basis_kernel_source));
93     code << tensor_basis_kernel_source;
94     CeedCallBackend(CeedFree(&tensor_basis_kernel_path));
95     CeedCallBackend(CeedFree(&tensor_basis_kernel_source));
96   }
97   {
98     char *cuda_gen_template_path, *cuda_gen_template_source;
99     CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-gen-templates.h", &cuda_gen_template_path));
100     CeedDebug256(ceed, 2, "----- Loading Cuda-Gen Template Source -----\n");
101     CeedCallBackend(CeedLoadSourceToBuffer(ceed, cuda_gen_template_path, &cuda_gen_template_source));
102     code << cuda_gen_template_source;
103     CeedCallBackend(CeedFree(&cuda_gen_template_path));
104     CeedCallBackend(CeedFree(&cuda_gen_template_source));
105   }
106 
107   // Get QFunction source and name
108   string q_function_source(qf_data->q_function_source);
109   string q_function_name(qf_data->q_function_name);
110   string operator_name;
111   operator_name = "CeedKernelCudaGenOperator_" + q_function_name;
112 
113   // Find dim, P_1d, Q_1d
114   data->max_P_1d = 0;
115   for (CeedInt i = 0; i < num_input_fields; i++) {
116     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
117     if (basis != CEED_BASIS_COLLOCATED) {
118       CeedCallBackend(CeedBasisGetData(basis, &basis_data));
119       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
120 
121       // Collect dim, P_1d, and Q_1d
122       CeedCallBackend(CeedBasisGetDimension(basis, &dim));
123       bool isTensor;
124       CeedCallBackend(CeedBasisIsTensor(basis, &isTensor));
125       if (isTensor) {
126         CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
127         CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
128         if (P_1d > data->max_P_1d) data->max_P_1d = P_1d;
129       } else {
130         // LCOV_EXCL_START
131         return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis");
132         // LCOV_EXCL_STOP
133       }
134     }
135   }
136   // Check output bases for Q_1d, dim as well
137   //   The only input basis might be CEED_BASIS_COLLOCATED
138   for (CeedInt i = 0; i < num_output_fields; i++) {
139     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
140 
141     if (basis != CEED_BASIS_COLLOCATED) {
142       CeedCallBackend(CeedBasisGetData(basis, &basis_data));
143       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
144 
145       // Collect Q_1d
146       CeedCallBackend(CeedBasisGetDimension(basis, &dim));
147       bool isTensor;
148       CeedCallBackend(CeedBasisIsTensor(basis, &isTensor));
149       if (isTensor) {
150         CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
151       } else {
152         // LCOV_EXCL_START
153         return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis");
154         // LCOV_EXCL_STOP
155       }
156     }
157   }
158   data->dim  = dim;
159   data->Q_1d = Q_1d;
160 
161   // Only use 3D collocated gradient parallelization strategy when gradient is computed
162   // TODO: put in a function?
163   bool use_collograd_parallelization = false;
164   if (dim == 3) {
165     bool was_grad_found = false;
166     for (CeedInt i = 0; i < num_input_fields; i++) {
167       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
168       if (eval_mode == CEED_EVAL_GRAD) {
169         CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
170         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
171         use_collograd_parallelization = !!basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true);
172         was_grad_found                = true;
173       }
174     }
175     for (CeedInt i = 0; i < num_output_fields; i++) {
176       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
177       if (eval_mode == CEED_EVAL_GRAD) {
178         CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
179         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
180         use_collograd_parallelization = !!basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true);
181         was_grad_found                = true;
182       }
183     }
184   }
185 
186   // Define CEED_Q_VLA
187   code << "\n#undef CEED_Q_VLA\n";
188   if (dim != 3 || use_collograd_parallelization) {
189     code << "#define CEED_Q_VLA 1\n\n";
190   } else {
191     code << "#define CEED_Q_VLA " << Q_1d << "\n\n";
192   }
193 
194   code << q_function_source;
195 
196   // Setup
197   code << "\n// -----------------------------------------------------------------------------\n";
198   code << "\nextern \"C\" __global__ void " << operator_name
199        << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar* W) {\n";
200   for (CeedInt i = 0; i < num_input_fields; i++) {
201     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
202     if (eval_mode != CEED_EVAL_WEIGHT) {  // Skip CEED_EVAL_WEIGHT
203       code << "  const CeedScalar* d_u_" << i << " = fields.inputs[" << i << "];\n";
204     }
205   }
206 
207   for (CeedInt i = 0; i < num_output_fields; i++) {
208     code << "  CeedScalar* d_v_" << i << " = fields.outputs[" << i << "];\n";
209   }
210 
211   code << "  const CeedInt dim = " << dim << ";\n";
212   code << "  const CeedInt Q_1d = " << Q_1d << ";\n";
213 
214   code << "  extern __shared__ CeedScalar slice[];\n";
215   // TODO put in a function? InitSharedData_Cuda?
216   code << "  SharedData_Cuda data;\n";
217   code << "  data.t_id_x = threadIdx.x;\n";
218   code << "  data.t_id_y = threadIdx.y;\n";
219   code << "  data.t_id_z = threadIdx.z;\n";
220   code << "  data.t_id  = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n";
221   code << "  data.slice = slice+data.t_id_z*T_1D" << (dim > 1 ? "*T_1D" : "") << ";\n";
222 
223   code << "\n  // -- Input field constants and basis data --\n";
224   // TODO: Put in a function?
225   // Initialize constants, and matrices B and G
226   for (CeedInt i = 0; i < num_input_fields; i++) {
227     code << "  // ---- Input field " << i << " ----\n";
228     // Get elem_size, eval_mode, num_comp
229     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &Erestrict));
230     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
231     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
232     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
233 
234     // Set field constants
235     if (eval_mode != CEED_EVAL_WEIGHT) {
236       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
237       if (basis != CEED_BASIS_COLLOCATED) {
238         CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
239         code << "  const CeedInt P_in_" << i << " = " << P_1d << ";\n";
240       } else {
241         code << "  const CeedInt P_in_" << i << " = " << Q_1d << ";\n";
242       }
243       code << "  const CeedInt num_comp_in_" << i << " = " << num_comp << ";\n";
244     }
245 
246     // Load basis data
247     code << "  // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
248     switch (eval_mode) {
249       case CEED_EVAL_NONE:
250         break;
251       case CEED_EVAL_INTERP:
252         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
253         data->B.inputs[i] = basis_data->d_interp_1d;
254         code << "  __shared__ CeedScalar s_B_in_" << i << "[" << P_1d * Q_1d << "];\n";
255         code << "  loadMatrix<P_in_" << i << ",Q_1d>(data, B.inputs[" << i << "], s_B_in_" << i << ");\n";
256         break;
257       case CEED_EVAL_GRAD:
258         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
259         data->B.inputs[i] = basis_data->d_interp_1d;
260         code << "  __shared__ CeedScalar s_B_in_" << i << "[" << P_1d * Q_1d << "];\n";
261         code << "  loadMatrix<P_in_" << i << ",Q_1d>(data, B.inputs[" << i << "], s_B_in_" << i << ");\n";
262         if (use_collograd_parallelization) {
263           data->G.inputs[i] = basis_data->d_collo_grad_1d;
264           code << "  __shared__ CeedScalar s_G_in_" << i << "[" << Q_1d * Q_1d << "];\n";
265           code << "  loadMatrix<Q_1d,Q_1d>(data, G.inputs[" << i << "], s_G_in_" << i << ");\n";
266         } else {
267           bool has_collo_grad = !!basis_data->d_collo_grad_1d;
268           data->G.inputs[i]   = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
269           code << "  __shared__ CeedScalar s_G_in_" << i << "[" << Q_1d * (has_collo_grad ? Q_1d : P_1d) << "];\n";
270           code << "  loadMatrix<" << (has_collo_grad ? "Q_1d" : ("P_in_" + std::to_string(i))) << ",Q_1d>(data, G.inputs[" << i << "], s_G_in_" << i
271                << ");\n";
272         }
273         break;
274       case CEED_EVAL_WEIGHT:
275         break;  // No action
276       case CEED_EVAL_DIV:
277         break;  // TODO: Not implemented
278       case CEED_EVAL_CURL:
279         break;  // TODO: Not implemented
280     }
281   }
282 
283   code << "\n  // -- Output field constants and basis data --\n";
284   for (CeedInt i = 0; i < num_output_fields; i++) {
285     code << "  // ---- Output field " << i << " ----\n";
286     // Get elem_size, eval_mode, num_comp
287     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &Erestrict));
288     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
289     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
290     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
291 
292     // Set field constants
293     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
294     if (basis != CEED_BASIS_COLLOCATED) {
295       CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
296       code << "  const CeedInt P_out_" << i << " = " << P_1d << ";\n";
297     } else {
298       code << "  const CeedInt P_out_" << i << " = " << Q_1d << ";\n";
299     }
300     code << "  const CeedInt num_comp_out_" << i << " = " << num_comp << ";\n";
301 
302     // Load basis data
303     code << "  // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
304     switch (eval_mode) {
305       case CEED_EVAL_NONE:
306         break;  // No action
307       case CEED_EVAL_INTERP:
308         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
309         data->B.outputs[i] = basis_data->d_interp_1d;
310         code << "  __shared__ CeedScalar s_B_out_" << i << "[" << P_1d * Q_1d << "];\n";
311         code << "  loadMatrix<P_out_" << i << ",Q_1d>(data, B.outputs[" << i << "], s_B_out_" << i << ");\n";
312         break;
313       case CEED_EVAL_GRAD:
314         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
315         data->B.outputs[i] = basis_data->d_interp_1d;
316         code << "  __shared__ CeedScalar s_B_out_" << i << "[" << P_1d * Q_1d << "];\n";
317         code << "  loadMatrix<P_out_" << i << ",Q_1d>(data, B.outputs[" << i << "], s_B_out_" << i << ");\n";
318         if (use_collograd_parallelization) {
319           data->G.outputs[i] = basis_data->d_collo_grad_1d;
320           code << "  __shared__ CeedScalar s_G_out_" << i << "[" << Q_1d * Q_1d << "];\n";
321           code << "  loadMatrix<Q_1d,Q_1d>(data, G.outputs[" << i << "], s_G_out_" << i << ");\n";
322         } else {
323           bool has_collo_grad = !!basis_data->d_collo_grad_1d;
324           data->G.outputs[i]  = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
325           code << "  __shared__ CeedScalar s_G_out_" << i << "[" << Q_1d * (has_collo_grad ? Q_1d : P_1d) << "];\n";
326           code << "  loadMatrix<" << (has_collo_grad ? "Q_1d" : ("P_out_" + std::to_string(i))) << ",Q_1d>(data, G.outputs[" << i << "], s_G_out_"
327                << i << ");\n";
328         }
329         break;
330       // LCOV_EXCL_START
331       case CEED_EVAL_WEIGHT: {
332         Ceed ceed;
333         CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
334         return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode");
335         break;  // Should not occur
336       }
337       case CEED_EVAL_DIV:
338         break;  // TODO: Not implemented
339       case CEED_EVAL_CURL:
340         break;  // TODO: Not implemented
341                 // LCOV_EXCL_STOP
342     }
343   }
344   code << "\n  // -- Element loop --\n";
345   code << "  __syncthreads();\n";
346   code << "  for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n";
347   // Input basis apply if needed
348   // Generate the correct eval mode code for each input
349   code << "    // -- Input field restrictions and basis actions --\n";
350   for (CeedInt i = 0; i < num_input_fields; i++) {
351     code << "    // ---- Input field " << i << " ----\n";
352     // Get elem_size, eval_mode, num_comp
353     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &Erestrict));
354     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
355     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
356     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
357 
358     // TODO: put in a function?
359     // Restriction
360     if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_collograd_parallelization)) {
361       code << "    CeedScalar r_u_" << i << "[num_comp_in_" << i << "*P_in_" << i << "];\n";
362 
363       bool is_strided;
364       CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &is_strided));
365       if (!is_strided) {
366         CeedCallBackend(CeedElemRestrictionGetLVectorSize(Erestrict, &lsize));
367         code << "    const CeedInt lsize_in_" << i << " = " << lsize << ";\n";
368         CeedInt comp_stride;
369         CeedCallBackend(CeedElemRestrictionGetCompStride(Erestrict, &comp_stride));
370         code << "    // CompStride: " << comp_stride << "\n";
371         CeedCallBackend(CeedElemRestrictionGetData(Erestrict, &restr_data));
372         data->indices.inputs[i] = restr_data->d_ind;
373         code << "    readDofsOffset" << dim << "d<num_comp_in_" << i << ", " << comp_stride << ", P_in_" << i << ">(data, lsize_in_" << i
374              << ", elem, indices.inputs[" << i << "], d_u_" << i << ", r_u_" << i << ");\n";
375       } else {
376         bool backendstrides;
377         CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &backendstrides));
378         CeedInt num_elem;
379         CeedCallBackend(CeedElemRestrictionGetNumElements(Erestrict, &num_elem));
380         CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
381         if (!backendstrides) {
382           CeedCallBackend(CeedElemRestrictionGetStrides(Erestrict, &strides));
383         }
384         code << "    // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
385         code << "    readDofsStrided" << dim << "d<num_comp_in_" << i << ",P_in_" << i << "," << strides[0] << "," << strides[1] << "," << strides[2]
386              << ">(data, elem, d_u_" << i << ", r_u_" << i << ");\n";
387       }
388     }
389 
390     // TODO: put in a function?
391     // Basis action
392     code << "    // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
393     switch (eval_mode) {
394       case CEED_EVAL_NONE:
395         if (!use_collograd_parallelization) {
396           code << "    CeedScalar* r_t_" << i << " = r_u_" << i << ";\n";
397         }
398         break;
399       case CEED_EVAL_INTERP:
400         code << "    CeedScalar r_t_" << i << "[num_comp_in_" << i << "*Q_1d];\n";
401         code << "    Interp" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_in_" << i << ",P_in_" << i << ",Q_1d>(data, r_u_" << i << ", s_B_in_"
402              << i << ", r_t_" << i << ");\n";
403         break;
404       case CEED_EVAL_GRAD:
405         if (use_collograd_parallelization) {
406           code << "    CeedScalar r_t_" << i << "[num_comp_in_" << i << "*Q_1d];\n";
407           code << "    Interp" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_in_" << i << ",P_in_" << i << ",Q_1d>(data, r_u_" << i
408                << ", s_B_in_" << i << ", r_t_" << i << ");\n";
409         } else {
410           CeedInt P_1d;
411           CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
412           CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
413           code << "    CeedScalar r_t_" << i << "[num_comp_in_" << i << "*dim*Q_1d];\n";
414           code << "    Grad" << (dim > 1 ? "Tensor" : "") << (dim == 3 && Q_1d >= P_1d ? "Collocated" : "") << dim << "d<num_comp_in_" << i
415                << ",P_in_" << i << ",Q_1d>(data, r_u_" << i << ", s_B_in_" << i << ", s_G_in_" << i << ", r_t_" << i << ");\n";
416         }
417         break;
418       case CEED_EVAL_WEIGHT:
419         code << "    CeedScalar r_t_" << i << "[Q_1d];\n";
420         CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
421         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
422         data->W = basis_data->d_q_weight_1d;
423         code << "    Weight" << (dim > 1 ? "Tensor" : "") << dim << "d<Q_1d>(data, W, r_t_" << i << ");\n";
424         break;  // No action
425       case CEED_EVAL_DIV:
426         break;  // TODO: Not implemented
427       case CEED_EVAL_CURL:
428         break;  // TODO: Not implemented
429     }
430   }
431 
432   // TODO: put in a function + separate collograd logic
433   // Q function
434   code << "\n    // -- Output field setup --\n";
435   for (CeedInt i = 0; i < num_output_fields; i++) {
436     code << "\n    // ---- Output field " << i << " ----\n";
437     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
438     if (eval_mode == CEED_EVAL_GRAD) {
439       if (use_collograd_parallelization) {
440         // Accumulator for gradient slices
441         code << "    CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*Q_1d];\n";
442         code << "    for (CeedInt i = 0; i < num_comp_out_" << i << "; i++) {\n";
443         code << "      for (CeedInt j = 0; j < Q_1d; ++j) {\n";
444         code << "        r_tt_" << i << "[j + i*Q_1d] = 0.0;\n";
445         code << "      }\n";
446         code << "    }\n";
447       } else {
448         code << "    CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*dim*Q_1d];\n";
449       }
450     }
451     if (eval_mode == CEED_EVAL_NONE || eval_mode == CEED_EVAL_INTERP) {
452       code << "    CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*Q_1d];\n";
453     }
454   }
455   // We treat quadrature points per slice in 3d to save registers
456   if (use_collograd_parallelization) {
457     code << "\n    // Note: Using planes of 3D elements\n";
458     code << "#pragma unroll\n";
459     code << "    for (CeedInt q = 0; q < Q_1d; q++) {\n";
460     code << "      // -- Input fields --\n";
461     for (CeedInt i = 0; i < num_input_fields; i++) {
462       code << "      // ---- Input field " << i << " ----\n";
463       // Get elem_size, eval_mode, num_comp
464       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
465       // Basis action
466       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
467       switch (eval_mode) {
468         case CEED_EVAL_NONE:
469           code << "      CeedScalar r_q_" << i << "[num_comp_in_" << i << "];\n";
470 
471           bool is_strided;
472           CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &Erestrict));
473           CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &is_strided));
474           if (!is_strided) {
475             CeedCallBackend(CeedElemRestrictionGetLVectorSize(Erestrict, &lsize));
476             code << "      const CeedInt lsize_in_" << i << " = " << lsize << ";\n";
477             CeedInt comp_stride;
478             CeedCallBackend(CeedElemRestrictionGetCompStride(Erestrict, &comp_stride));
479             code << "      // CompStride: " << comp_stride << "\n";
480             CeedCallBackend(CeedElemRestrictionGetData(Erestrict, &restr_data));
481             data->indices.inputs[i] = restr_data->d_ind;
482             code << "      readSliceQuadsOffset"
483                  << "3d<num_comp_in_" << i << ", " << comp_stride << ", Q_1d>(data, lsize_in_" << i << ", elem, q, indices.inputs[" << i << "], d_u_"
484                  << i << ", r_q_" << i << ");\n";
485           } else {
486             CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
487             bool backendstrides;
488             CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &backendstrides));
489             CeedInt num_elem;
490             CeedCallBackend(CeedElemRestrictionGetNumElements(Erestrict, &num_elem));
491             CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
492             if (!backendstrides) {
493               CeedCallBackend(CeedElemRestrictionGetStrides(Erestrict, &strides));
494             }
495             code << "      // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
496             code << "      readSliceQuadsStrided"
497                  << "3d<num_comp_in_" << i
498                  << ",Q_1d"
499                     ","
500                  << strides[0] << "," << strides[1] << "," << strides[2] << ">(data, elem, q, d_u_" << i << ", r_q_" << i << ");\n";
501           }
502           break;
503         case CEED_EVAL_INTERP:
504           code << "      CeedScalar r_q_" << i << "[num_comp_in_" << i << "];\n";
505           code << "      for (CeedInt j = 0; j < num_comp_in_" << i << " ; ++j) {\n";
506           code << "        r_q_" << i << "[j] = r_t_" << i << "[q + j*Q_1d];\n";
507           code << "      }\n";
508           break;
509         case CEED_EVAL_GRAD:
510           code << "      CeedScalar r_q_" << i << "[num_comp_in_" << i << "*dim];\n";
511           code << "      gradCollo3d<num_comp_in_" << i << ",Q_1d>(data, q, r_t_" << i << ", s_G_in_" << i << ", r_q_" << i << ");\n";
512           break;
513         case CEED_EVAL_WEIGHT:
514           code << "      CeedScalar r_q_" << i << "[1];\n";
515           code << "      r_q_" << i << "[0] = r_t_" << i << "[q];\n";
516           break;  // No action
517         case CEED_EVAL_DIV:
518           break;  // TODO: Not implemented
519         case CEED_EVAL_CURL:
520           break;  // TODO: Not implemented
521       }
522     }
523     code << "\n      // -- Output fields --\n";
524     for (CeedInt i = 0; i < num_output_fields; i++) {
525       code << "      // ---- Output field " << i << " ----\n";
526       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
527       // Basis action
528       switch (eval_mode) {
529         case CEED_EVAL_NONE:
530           code << "      CeedScalar r_qq_" << i << "[num_comp_out_" << i << "];\n";
531           break;  // No action
532         case CEED_EVAL_INTERP:
533           code << "      CeedScalar r_qq_" << i << "[num_comp_out_" << i << "];\n";
534           break;
535         case CEED_EVAL_GRAD:
536           code << "      CeedScalar r_qq_" << i << "[num_comp_out_" << i << "*dim];\n";
537           break;
538         case CEED_EVAL_WEIGHT:
539           break;  // Should not occur
540         case CEED_EVAL_DIV:
541           break;  // TODO: Not implemented
542         case CEED_EVAL_CURL:
543           break;  // TODO: Not implemented
544       }
545     }
546   } else {
547     code << "\n      // Note: Using full elements\n";
548     code << "      // -- Input fields --\n";
549     for (CeedInt i = 0; i < num_input_fields; i++) {
550       code << "      // ---- Input field " << i << " ----\n";
551       code << "      CeedScalar* r_q_" << i << " = r_t_" << i << ";\n";
552     }
553     code << "      // -- Output fields --\n";
554     for (CeedInt i = 0; i < num_output_fields; i++) {
555       code << "      // ---- Output field " << i << " ----\n";
556       code << "      CeedScalar* r_qq_" << i << " = r_tt_" << i << ";\n";
557     }
558   }
559   code << "\n      // -- QFunction Inputs and outputs --\n";
560   code << "      CeedScalar* in[" << num_input_fields << "];\n";
561   for (CeedInt i = 0; i < num_input_fields; i++) {
562     code << "      // ---- Input field " << i << " ----\n";
563     code << "      in[" << i << "] = r_q_" << i << ";\n";
564   }
565   code << "      CeedScalar* out[" << num_output_fields << "];\n";
566   for (CeedInt i = 0; i < num_output_fields; i++) {
567     code << "      // ---- Output field " << i << " ----\n";
568     code << "      out[" << i << "] = r_qq_" << i << ";\n";
569   }
570   code << "\n      // -- Apply QFunction --\n";
571   code << "      " << q_function_name << "(ctx, ";
572   if (dim != 3 || use_collograd_parallelization) {
573     code << "1";
574   } else {
575     code << "Q_1d";
576   }
577   code << ", in, out);\n";
578   if (use_collograd_parallelization) {
579     code << "      // -- Output fields --\n";
580     for (CeedInt i = 0; i < num_output_fields; i++) {
581       code << "      // ---- Output field " << i << " ----\n";
582       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
583       // Basis action
584       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
585       switch (eval_mode) {
586         case CEED_EVAL_NONE:
587           code << "      for (CeedInt j = 0; j < num_comp_out_" << i << " ; ++j) {\n";
588           code << "        r_tt_" << i << "[q + j*Q_1d] = r_qq_" << i << "[j];\n";
589           code << "      }\n";
590           break;  // No action
591         case CEED_EVAL_INTERP:
592           code << "      for (CeedInt j = 0; j < num_comp_out_" << i << " ; ++j) {\n";
593           code << "        r_tt_" << i << "[q + j*Q_1d] = r_qq_" << i << "[j];\n";
594           code << "      }\n";
595           break;
596         case CEED_EVAL_GRAD:
597           code << "      gradColloTranspose3d<num_comp_out_" << i << ",Q_1d>(data, q, r_qq_" << i << ", s_G_out_" << i << ", r_tt_" << i << ");\n";
598           break;
599         case CEED_EVAL_WEIGHT:
600           break;  // Should not occur
601         case CEED_EVAL_DIV:
602           break;  // TODO: Not implemented
603         case CEED_EVAL_CURL:
604           break;  // TODO: Not implemented
605       }
606     }
607     code << "    }\n";
608   }
609 
610   // Output basis apply if needed
611   // Generate the correct eval mode code for each output
612   code << "\n    // -- Output field basis action and restrictions --\n";
613   for (CeedInt i = 0; i < num_output_fields; i++) {
614     code << "    // ---- Output field " << i << " ----\n";
615     // Get elem_size, eval_mode, num_comp
616     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &Erestrict));
617     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
618     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
619     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
620     // TODO put in a function
621     // Basis action
622     code << "    // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
623     switch (eval_mode) {
624       case CEED_EVAL_NONE:
625         code << "    CeedScalar* r_v_" << i << " = r_tt_" << i << ";\n";
626         break;  // No action
627       case CEED_EVAL_INTERP:
628         code << "    CeedScalar r_v_" << i << "[num_comp_out_" << i << "*P_out_" << i << "];\n";
629         code << "    InterpTranspose" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_out_" << i << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i
630              << ", s_B_out_" << i << ", r_v_" << i << ");\n";
631         break;
632       case CEED_EVAL_GRAD:
633         code << "    CeedScalar r_v_" << i << "[num_comp_out_" << i << "*P_out_" << i << "];\n";
634         if (use_collograd_parallelization) {
635           code << "    InterpTranspose" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_out_" << i << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i
636                << ", s_B_out_" << i << ", r_v_" << i << ");\n";
637         } else {
638           CeedInt P_1d;
639           CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
640           CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
641           code << "    GradTranspose" << (dim > 1 ? "Tensor" : "") << (dim == 3 && Q_1d >= P_1d ? "Collocated" : "") << dim << "d<num_comp_out_" << i
642                << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i << ", s_B_out_" << i << ", s_G_out_" << i << ", r_v_" << i << ");\n";
643         }
644         break;
645       // LCOV_EXCL_START
646       case CEED_EVAL_WEIGHT: {
647         Ceed ceed;
648         CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
649         return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode");
650         break;  // Should not occur
651       }
652       case CEED_EVAL_DIV:
653         break;  // TODO: Not implemented
654       case CEED_EVAL_CURL:
655         break;  // TODO: Not implemented
656                 // LCOV_EXCL_STOP
657     }
658     // TODO put in a function
659     // Restriction
660     bool is_strided;
661     CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &is_strided));
662     if (!is_strided) {
663       CeedCallBackend(CeedElemRestrictionGetLVectorSize(Erestrict, &lsize));
664       code << "    const CeedInt lsize_out_" << i << " = " << lsize << ";\n";
665       CeedInt comp_stride;
666       CeedCallBackend(CeedElemRestrictionGetCompStride(Erestrict, &comp_stride));
667       code << "    // CompStride: " << comp_stride << "\n";
668       CeedCallBackend(CeedElemRestrictionGetData(Erestrict, &restr_data));
669       data->indices.outputs[i] = restr_data->d_ind;
670       code << "    writeDofsOffset" << dim << "d<num_comp_out_" << i << ", " << comp_stride << ", P_out_" << i << ">(data, lsize_out_" << i
671            << ", elem, indices.outputs[" << i << "], r_v_" << i << ", d_v_" << i << ");\n";
672     } else {
673       bool has_backend_strides;
674       CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &has_backend_strides));
675       CeedInt num_elem;
676       CeedCallBackend(CeedElemRestrictionGetNumElements(Erestrict, &num_elem));
677       CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
678       if (!has_backend_strides) {
679         CeedCallBackend(CeedElemRestrictionGetStrides(Erestrict, &strides));
680       }
681       code << "    // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
682       code << "    writeDofsStrided" << dim << "d<num_comp_out_" << i << ",P_out_" << i << "," << strides[0] << "," << strides[1] << "," << strides[2]
683            << ">(data, elem, r_v_" << i << ", d_v_" << i << ");\n";
684     }
685   }
686 
687   code << "  }\n";
688   code << "}\n";
689   code << "// -----------------------------------------------------------------------------\n\n";
690 
691   // View kernel for debugging
692   CeedDebug256(ceed, 2, "Generated Operator Kernels:\n");
693   CeedDebug(ceed, code.str().c_str());
694 
695   CeedCallBackend(CeedCompileCuda(ceed, code.str().c_str(), &data->module, 1, "T_1D", CeedIntMax(Q_1d, data->max_P_1d)));
696   CeedCallBackend(CeedGetKernelCuda(ceed, data->module, operator_name.c_str(), &data->op));
697 
698   CeedCallBackend(CeedOperatorSetSetupDone(op));
699   return CEED_ERROR_SUCCESS;
700 }
701 //------------------------------------------------------------------------------
702