xref: /libCEED/rust/libceed-sys/c-src/backends/cuda-gen/ceed-cuda-gen-operator-build.cpp (revision cadb81b52bdcacffa10c3c3dcca32a680606c1bc)
1 // Copyright (c) 2017-2025, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #define CEED_DEBUG_COLOR 12
9 
10 #include <ceed.h>
11 #include <ceed/backend.h>
12 #include <ceed/gen-tools.h>
13 #include <ceed/jit-tools.h>
14 #include <cuda_runtime.h>
15 
16 #include <iostream>
17 #include <sstream>
18 #include <string>
19 
20 #include "../cuda-ref/ceed-cuda-ref.h"
21 #include "../cuda-shared/ceed-cuda-shared.h"
22 #include "../cuda/ceed-cuda-common.h"
23 #include "../cuda/ceed-cuda-compile.h"
24 #include "ceed-cuda-gen.h"
25 
26 struct FieldReuse_Cuda {
27   CeedInt      index;
28   bool         is_input;
29   CeedEvalMode eval_mode;
30 };
31 
32 //------------------------------------------------------------------------------
33 // Determine type of operator
34 //------------------------------------------------------------------------------
35 static int CeedOperatorBuildKernelData_Cuda_gen(Ceed ceed, CeedInt num_input_fields, CeedOperatorField *op_input_fields,
36                                                 CeedQFunctionField *qf_input_fields, CeedInt num_output_fields, CeedOperatorField *op_output_fields,
37                                                 CeedQFunctionField *qf_output_fields, CeedInt *max_P, CeedInt *max_P_1d, CeedInt *Q, CeedInt *Q_1d,
38                                                 CeedInt *max_dim, bool *is_all_tensor, bool *use_3d_slices) {
39   // Check if all are tensor
40   *is_all_tensor = true;
41   for (CeedInt i = 0; i < num_input_fields; i++) {
42     CeedBasis basis;
43 
44     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
45     if (basis != CEED_BASIS_NONE) {
46       bool is_field_tensor;
47 
48       CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor));
49       *is_all_tensor = *is_all_tensor && is_field_tensor;
50     }
51     CeedCallBackend(CeedBasisDestroy(&basis));
52   }
53   for (CeedInt i = 0; i < num_output_fields; i++) {
54     CeedBasis basis;
55 
56     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
57     if (basis != CEED_BASIS_NONE) {
58       bool is_field_tensor;
59 
60       CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor));
61       *is_all_tensor = *is_all_tensor && is_field_tensor;
62     }
63     CeedCallBackend(CeedBasisDestroy(&basis));
64   }
65 
66   // Find max_P, max_P_1d, Q, and Q_1d
67   bool is_all_3d = true;
68 
69   *max_P    = 0;
70   *max_P_1d = 0;
71   *Q        = 0;
72   *Q_1d     = 0;
73   for (CeedInt i = 0; i < num_input_fields; i++) {
74     CeedBasis basis;
75 
76     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
77     if (basis != CEED_BASIS_NONE) {
78       bool    is_field_tensor;
79       CeedInt field_dim = 0, field_P = 0, field_P_1d = 0, field_Q = 0, field_Q_1d = 0;
80 
81       // Check if 3D
82       CeedCallBackend(CeedBasisGetDimension(basis, &field_dim));
83       is_all_3d = is_all_3d && (field_dim == 3);
84       *max_dim  = CeedIntMax(*max_dim, field_dim);
85 
86       // Collect P, P_1d, Q, and Q_1d
87       CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P));
88       *max_P = CeedIntMax(*max_P, field_P);
89       CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor));
90       if (is_field_tensor) {
91         CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d));
92         *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d);
93       }
94       CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q));
95       CeedCheck(*Q == 0 || field_Q == *Q, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible");
96       *Q = field_Q;
97       if (is_field_tensor) {
98         CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d));
99         CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible");
100         *Q_1d = field_Q_1d;
101       }
102     }
103     CeedCallBackend(CeedBasisDestroy(&basis));
104   }
105   for (CeedInt i = 0; i < num_output_fields; i++) {
106     CeedBasis basis;
107 
108     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
109     if (basis != CEED_BASIS_NONE) {
110       bool    is_field_tensor;
111       CeedInt field_dim = 0, field_P = 0, field_P_1d = 0, field_Q = 0, field_Q_1d = 0;
112 
113       // Check if 3D
114       CeedCallBackend(CeedBasisGetDimension(basis, &field_dim));
115       is_all_3d = is_all_3d && (field_dim == 3);
116       *max_dim  = CeedIntMax(*max_dim, field_dim);
117 
118       // Collect P, P_1d, Q, and Q_1d
119       CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P));
120       *max_P = CeedIntMax(*max_P, field_P);
121       CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor));
122       if (is_field_tensor) {
123         CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d));
124         *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d);
125       }
126       CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q));
127       CeedCheck(*Q == 0 || field_Q == *Q, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible");
128       *Q = field_Q;
129       if (is_field_tensor) {
130         CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d));
131         CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible");
132         *Q_1d = field_Q_1d;
133       }
134     }
135     CeedCallBackend(CeedBasisDestroy(&basis));
136   }
137 
138   // Only use 3D collocated gradient parallelization strategy when gradient is computed
139   *use_3d_slices = false;
140   if (is_all_3d && *is_all_tensor) {
141     bool was_grad_found = false;
142 
143     for (CeedInt i = 0; i < num_input_fields; i++) {
144       CeedEvalMode eval_mode;
145 
146       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
147       if (eval_mode == CEED_EVAL_GRAD) {
148         CeedBasis_Cuda_shared *basis_data;
149         CeedBasis              basis;
150 
151         CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
152         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
153         *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true);
154         was_grad_found = true;
155         CeedCallBackend(CeedBasisDestroy(&basis));
156       }
157     }
158     for (CeedInt i = 0; i < num_output_fields; i++) {
159       CeedEvalMode eval_mode;
160 
161       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
162       if (eval_mode == CEED_EVAL_GRAD) {
163         CeedBasis_Cuda_shared *basis_data;
164         CeedBasis              basis;
165 
166         CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
167         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
168         *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true);
169         was_grad_found = true;
170         CeedCallBackend(CeedBasisDestroy(&basis));
171       }
172     }
173   }
174   return CEED_ERROR_SUCCESS;
175 }
176 
177 //------------------------------------------------------------------------------
178 // Setup fields
179 //------------------------------------------------------------------------------
180 static int CeedOperatorBuildKernelFieldData_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, Tab &tab, CeedInt i,
181                                                      CeedOperatorField op_field, CeedQFunctionField qf_field, FieldReuse_Cuda field_reuse,
182                                                      CeedInt max_dim, CeedInt Q, CeedInt Q_1d, bool is_input, bool is_all_tensor, bool is_at_points,
183                                                      bool use_3d_slices, bool skip_active_load) {
184   bool      is_tensor = true, is_active = true;
185   CeedBasis basis;
186 
187   CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis));
188   if (basis != CEED_BASIS_NONE) CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
189   {
190     CeedVector vec;
191 
192     CeedCallBackend(CeedOperatorFieldGetVector(op_field, &vec));
193     is_active = vec == CEED_VECTOR_ACTIVE;
194     CeedCallBackend(CeedVectorDestroy(&vec));
195   }
196 
197   const char            *field_name;
198   std::string            var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i);
199   std::string            P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q";
200   std::string            option_name = (is_input ? "inputs" : "outputs");
201   CeedEvalMode           eval_mode   = CEED_EVAL_NONE;
202   CeedInt                elem_size = 0, num_comp = 0, dim = max_dim, P_1d = 0;
203   CeedElemRestriction    elem_rstr;
204   CeedBasis_Cuda_shared *basis_data;
205 
206   // Field reuse info
207   bool use_previous_field = field_reuse.index != -1;
208 
209   CeedCallBackend(CeedOperatorFieldGetName(op_field, &field_name));
210   code << tab << "// -- " << (is_input ? "Input" : "Output") << " field " << i << ": " << field_name << "\n";
211 
212   // Get field data
213   CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr));
214   if (elem_rstr != CEED_ELEMRESTRICTION_NONE) {
215     CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size));
216     CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
217   }
218   CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
219   if (basis != CEED_BASIS_NONE) {
220     CeedCallBackend(CeedBasisGetData(basis, &basis_data));
221     CeedCallBackend(CeedBasisGetDimension(basis, &dim));
222     if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
223     else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d));
224   }
225   CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode));
226 
227   // Set field constants
228   code << tab << "const CeedInt dim" << var_suffix << " = " << dim << ";\n";
229   if (is_tensor && !is_all_tensor) {
230     CeedInt P = 0;
231 
232     CeedCallBackend(CeedBasisGetNumNodes(basis, &P));
233     code << tab << "const CeedInt P" << var_suffix << " = " << (basis == CEED_BASIS_NONE ? Q : P) << ";\n";
234   }
235   code << tab << "const CeedInt " << P_name << " = " << (basis == CEED_BASIS_NONE ? Q_1d : P_1d) << ";\n";
236   if (eval_mode != CEED_EVAL_WEIGHT) {
237     code << tab << "const CeedInt num_comp" << var_suffix << " = " << num_comp << ";\n";
238   }
239 
240   // Load basis data
241   code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n";
242   switch (eval_mode) {
243     case CEED_EVAL_NONE:
244       break;
245     case CEED_EVAL_INTERP:
246       if (is_at_points) {
247         // AtPoints
248         if (!basis_data->d_chebyshev_interp_1d) {
249           CeedSize    interp_bytes;
250           CeedScalar *chebyshev_interp_1d;
251 
252           interp_bytes = P_1d * Q_1d * sizeof(CeedScalar);
253           CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d));
254           CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d));
255           CeedCallCuda(CeedBasisReturnCeed(basis), cudaMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes));
256           CeedCallCuda(CeedBasisReturnCeed(basis),
257                        cudaMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, cudaMemcpyHostToDevice));
258           CeedCallBackend(CeedFree(&chebyshev_interp_1d));
259         }
260         if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d;
261         else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d;
262       } else {
263         // Standard quadrature
264         if (is_input) data->B.inputs[i] = basis_data->d_interp_1d;
265         else data->B.outputs[i] = basis_data->d_interp_1d;
266       }
267       if (use_previous_field && !skip_active_load) {
268         std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
269 
270         code << tab << "CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n";
271       } else {
272         bool is_collocated = false;
273 
274         CeedCallBackend(CeedBasisIsCollocated(basis, &is_collocated));
275         if ((is_active && skip_active_load) || (is_collocated && !is_at_points)) {
276           code << tab << "CeedScalar *s_B" << var_suffix << " = NULL;\n";
277         } else {
278           code << tab << "__shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n";
279           code << tab << "LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n";
280         }
281       }
282       break;
283     case CEED_EVAL_GRAD:
284       if (is_at_points) {
285         // AtPoints
286         if (!basis_data->d_chebyshev_interp_1d) {
287           CeedSize    interp_bytes;
288           CeedScalar *chebyshev_interp_1d;
289 
290           interp_bytes = P_1d * Q_1d * sizeof(CeedScalar);
291           CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d));
292           CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d));
293           CeedCallCuda(CeedBasisReturnCeed(basis), cudaMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes));
294           CeedCallCuda(CeedBasisReturnCeed(basis),
295                        cudaMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, cudaMemcpyHostToDevice));
296           CeedCallBackend(CeedFree(&chebyshev_interp_1d));
297         }
298         if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d;
299         else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d;
300       } else {
301         // Standard quadrature
302         if (is_input) data->B.inputs[i] = basis_data->d_interp_1d;
303         else data->B.outputs[i] = basis_data->d_interp_1d;
304       }
305       if (is_tensor) {
306         if (use_previous_field && !skip_active_load) {
307           std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
308 
309           code << tab << "CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n";
310         } else {
311           bool is_collocated = false;
312 
313           CeedCallBackend(CeedBasisIsCollocated(basis, &is_collocated));
314           if ((is_active && skip_active_load) || (is_collocated && !is_at_points)) {
315             code << tab << "CeedScalar *s_B" << var_suffix << " = NULL;\n";
316           } else {
317             code << tab << "__shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n";
318             code << tab << "LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n";
319           }
320         }
321       }
322       if (is_at_points) break;  // No G mat for AtPoints
323       if (use_3d_slices) {
324         if (is_input) data->G.inputs[i] = basis_data->d_collo_grad_1d;
325         else data->G.outputs[i] = basis_data->d_collo_grad_1d;
326         if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD && !skip_active_load) {
327           std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
328 
329           code << tab << "CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n";
330         } else if (is_active && skip_active_load) {
331           code << tab << "CeedScalar *s_G" << var_suffix << " = NULL;\n";
332         } else {
333           code << tab << "__shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n";
334           code << tab << "LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n";
335         }
336       } else {
337         bool has_collo_grad = basis_data->d_collo_grad_1d;
338 
339         if (is_input) data->G.inputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
340         else data->G.outputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
341         if (has_collo_grad) {
342           if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD && !skip_active_load) {
343             std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
344 
345             code << tab << "CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n";
346           } else if (is_active && skip_active_load) {
347             code << tab << "CeedScalar *s_G" << var_suffix << " = NULL;\n";
348           } else {
349             code << tab << "__shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n";
350             code << tab << "LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n";
351           }
352         } else {
353           if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD && !skip_active_load) {
354             std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
355 
356             code << tab << "CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n";
357           } else if (is_active && skip_active_load) {
358             code << tab << "CeedScalar *s_G" << var_suffix << " = NULL;\n";
359           } else {
360             code << tab << "__shared__ CeedScalar s_G" << var_suffix << "[" << P_name << "*" << Q_name << (is_tensor ? "" : "*dim")
361                  << (is_tensor ? "" : var_suffix) << "];\n";
362             code << tab << "LoadMatrix<" << P_name << ", " << Q_name << (is_tensor ? "" : "*dim") << (is_tensor ? "" : var_suffix) << ">(data, G."
363                  << option_name << "[" << i << "], s_G" << var_suffix << ");\n";
364           }
365         }
366       }
367       break;
368     case CEED_EVAL_WEIGHT:
369       break;  // No action
370       // LCOV_EXCL_START
371     case CEED_EVAL_DIV:
372     case CEED_EVAL_CURL:
373       break;  // TODO: Not implemented
374               // LCOV_EXCL_STOP
375   }
376   CeedCallBackend(CeedBasisDestroy(&basis));
377   return CEED_ERROR_SUCCESS;
378 }
379 
380 //------------------------------------------------------------------------------
381 // Restriction
382 //------------------------------------------------------------------------------
383 static int CeedOperatorBuildKernelRestriction_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, Tab &tab, CeedInt i,
384                                                        CeedInt field_input_buffer[], CeedOperatorField op_field, CeedQFunctionField qf_field,
385                                                        CeedInt max_dim, CeedInt Q_1d, bool is_input, bool is_all_tensor, bool is_at_points,
386                                                        bool use_3d_slices) {
387   std::string               var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i);
388   std::string               P_name     = (is_all_tensor ? "P_1d" : "P") + var_suffix;
389   CeedEvalMode              eval_mode  = CEED_EVAL_NONE;
390   CeedInt                   elem_size = 0, num_comp = 0;
391   CeedSize                  l_size;
392   CeedRestrictionType       rstr_type = CEED_RESTRICTION_STANDARD;
393   CeedElemRestriction_Cuda *rstr_data;
394   CeedElemRestriction       elem_rstr;
395 
396   // Get field data
397   CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr));
398   if (elem_rstr != CEED_ELEMRESTRICTION_NONE) {
399     CeedCallBackend(CeedElemRestrictionGetType(elem_rstr, &rstr_type));
400     CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size));
401     CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
402     CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data));
403   }
404   CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode));
405 
406   // Restriction
407   if (is_input) {
408     // Input
409     if (field_input_buffer[i] != i) {
410       std::string buffer_name = "r_e_in_" + std::to_string(field_input_buffer[i]);
411 
412       // Restriction was already done for previous input
413       code << tab << "CeedScalar *r_e" << var_suffix << " = " << buffer_name << ";\n";
414     } else if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_3d_slices && is_at_points)) {
415       if (eval_mode == CEED_EVAL_NONE && rstr_type != CEED_RESTRICTION_POINTS) {
416         // No basis action, so r_e_in_* in also r_q_in_* and needs to be allocated
417         code << tab << "CeedScalar r_e" << var_suffix << "[num_comp" << var_suffix << "*" << P_name << "];\n";
418       } else if (rstr_type != CEED_RESTRICTION_POINTS) {
419         // Otherwise we're using the scratch space
420         code << tab << "CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n";
421       }
422       switch (rstr_type) {
423         case CEED_RESTRICTION_STANDARD: {
424           CeedInt comp_stride;
425 
426           CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size));
427           code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n";
428           CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
429           code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n";
430           data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets;
431           code << tab << "ReadLVecStandard" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", "
432                << P_name << ">(data, l_size" << var_suffix << ", elem, indices.inputs[" << i << "], d" << var_suffix << ", r_e" << var_suffix
433                << ");\n";
434           break;
435         }
436         case CEED_RESTRICTION_STRIDED: {
437           bool    has_backend_strides;
438           CeedInt num_elem;
439 
440           CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides));
441           CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem));
442           CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
443 
444           if (!has_backend_strides) {
445             CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides));
446           }
447           code << tab << "const CeedInt strides" << var_suffix << "_0 = " << strides[0] << ", strides" << var_suffix << "_1 = " << strides[1]
448                << ", strides" << var_suffix << "_2 = " << strides[2] << ";\n";
449           code << tab << "ReadLVecStrided" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", strides"
450                << var_suffix << "_0, strides" << var_suffix << "_1, strides" << var_suffix << "_2>(data, elem, d" << var_suffix << ", r_e"
451                << var_suffix << ");\n";
452           break;
453         }
454         case CEED_RESTRICTION_POINTS: {
455           CeedInt comp_stride;
456 
457           CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
458           code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n";
459           data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets;
460           break;
461         }
462         // LCOV_EXCL_START
463         case CEED_RESTRICTION_ORIENTED:
464         case CEED_RESTRICTION_CURL_ORIENTED:
465           break;  // TODO: Not implemented
466                   // LCOV_EXCL_STOP
467       }
468     }
469   } else {
470     // Output
471     switch (rstr_type) {
472       case CEED_RESTRICTION_STANDARD: {
473         CeedInt comp_stride;
474 
475         CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size));
476         code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n";
477         CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
478         code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n";
479         data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets;
480         code << tab << "WriteLVecStandard" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", "
481              << P_name << ">(data, l_size" << var_suffix << ", elem, indices.outputs[" << i << "], r_e" << var_suffix << ", d" << var_suffix
482              << ");\n";
483         break;
484       }
485       case CEED_RESTRICTION_STRIDED: {
486         bool    has_backend_strides;
487         CeedInt num_elem;
488 
489         CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides));
490         CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem));
491         CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
492 
493         if (!has_backend_strides) {
494           CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides));
495         }
496         code << tab << "const CeedInt strides" << var_suffix << "_0 = " << strides[0] << ", strides" << var_suffix << "_1 = " << strides[1]
497              << ", strides" << var_suffix << "_2 = " << strides[2] << ";\n";
498         code << tab << "WriteLVecStrided" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", strides"
499              << var_suffix << "_0, strides" << var_suffix << "_1, strides" << var_suffix << "_2>(data, elem, r_e" << var_suffix << ", d" << var_suffix
500              << ");\n";
501         break;
502       }
503       case CEED_RESTRICTION_POINTS:
504         data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets;
505         break;
506       // LCOV_EXCL_START
507       case CEED_RESTRICTION_ORIENTED:
508       case CEED_RESTRICTION_CURL_ORIENTED:
509         break;  // TODO: Not implemented
510                 // LCOV_EXCL_STOP
511     }
512   }
513   CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
514   return CEED_ERROR_SUCCESS;
515 }
516 
517 //------------------------------------------------------------------------------
518 // Basis
519 //------------------------------------------------------------------------------
520 static int CeedOperatorBuildKernelBasis_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, Tab &tab, CeedInt i,
521                                                  CeedOperatorField op_field, CeedQFunctionField qf_field, CeedInt max_dim, CeedInt Q_1d,
522                                                  bool is_input, bool is_all_tensor, bool is_at_points, bool use_3d_slices) {
523   bool      is_tensor = true, is_collocated = true;
524   CeedBasis basis;
525   CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis));
526   CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
527   CeedCallBackend(CeedBasisIsCollocated(basis, &is_collocated));
528 
529   std::string         var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i);
530   std::string         P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q";
531   CeedEvalMode        eval_mode = CEED_EVAL_NONE;
532   CeedInt             dim = max_dim, elem_size = 0, num_comp = 0, P_1d = 0;
533   CeedElemRestriction elem_rstr;
534 
535   // Get field data
536   CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr));
537   if (elem_rstr != CEED_ELEMRESTRICTION_NONE) {
538     CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size));
539     CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
540   }
541   CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
542   if (basis != CEED_BASIS_NONE) {
543     CeedCallBackend(CeedBasisGetDimension(basis, &dim));
544     if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
545     else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d));
546   }
547   CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode));
548 
549   // Basis
550   code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n";
551   if (is_input) {
552     switch (eval_mode) {
553       case CEED_EVAL_NONE:
554         if (!use_3d_slices && !is_at_points) {
555           code << tab << "CeedScalar *r_q" << var_suffix << " = r_e" << var_suffix << ";\n";
556         }
557         break;
558       case CEED_EVAL_INTERP:
559         if (is_at_points) {
560           std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d";
561 
562           code << tab << "CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n";
563           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix
564                << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n";
565         } else {
566           std::string function_name = is_tensor ? ((dim == 1 ? "Interp" : "InterpTensor") + std::string(is_collocated ? "CollocatedNodes" : "") +
567                                                    std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened"))
568                                                 : "InterpNonTensor";
569           std::string op_t_1d_name  = (is_all_tensor || !is_tensor) ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name);
570 
571           code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_all_tensor && (dim >= 3) ? Q_name : "1") << "];\n";
572           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_e"
573                << var_suffix << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n";
574         }
575         break;
576       case CEED_EVAL_GRAD:
577         if (is_at_points) {
578           std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d";
579 
580           code << tab << "CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n";
581           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix
582                << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n";
583         } else if (use_3d_slices) {
584           std::string function_name =
585               (dim > 1 ? "InterpTensor" : "Interp") + std::string(is_collocated ? "CollocatedNodes" : "") + std::to_string(dim) + "d";
586 
587           code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n";
588           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix
589                << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n";
590         } else if (is_tensor) {
591           bool        is_collocated_grad = dim == 3 && Q_1d >= P_1d;
592           std::string function_name =
593               (dim == 1 ? "Grad"
594                         : ("GradTensor" + std::string(is_collocated ? "CollocatedNodes" : (is_collocated_grad ? "Collocated" : ""))) +
595                               std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened"));
596           std::string op_t_1d_name = is_all_tensor ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name);
597 
598           code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "*"
599                << (is_all_tensor && dim >= 3 ? Q_name : "1") << "];\n";
600           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_e"
601                << var_suffix << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n";
602         } else {
603           std::string function_name = "GradNonTensor";
604 
605           code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
606           code << tab << function_name << "<num_comp" << var_suffix << ", dim" << var_suffix << ", " << P_name << ", " << Q_name
607                << ", OP_T_1D>(data, r_e" << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n";
608         }
609         break;
610       case CEED_EVAL_WEIGHT: {
611         if (is_at_points) {
612           code << tab << "// Nothing to do AtPoints\n";
613         } else {
614           CeedBasis_Cuda_shared *basis_data;
615           std::string            function_name = is_tensor
616                                                      ? ((dim == 1 ? "Weight" : "WeightTensor") + std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened"))
617                                                      : "WeightNonTensor";
618 
619           code << tab << "CeedScalar r_q" << var_suffix << "[" << (is_all_tensor && (dim >= 3) ? Q_name : "1") << "];\n";
620           CeedCallBackend(CeedBasisGetData(basis, &basis_data));
621           data->W = basis_data->d_q_weight_1d;
622           code << tab << function_name << "<" << P_name << ", " << Q_name << ">(data, W, r_q" << var_suffix << ");\n";
623         }
624         break;
625       }
626       // LCOV_EXCL_START
627       case CEED_EVAL_DIV:
628       case CEED_EVAL_CURL:
629         break;  // TODO: Not implemented
630                 // LCOV_EXCL_STOP
631     }
632   } else {
633     switch (eval_mode) {
634       case CEED_EVAL_NONE:
635         code << tab << "CeedScalar *r_e" << var_suffix << " = r_q" << var_suffix << ";\n";
636         break;  // No action
637       case CEED_EVAL_INTERP:
638         code << tab << "CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n";
639         if (is_at_points) {
640           std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d";
641 
642           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix
643                << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n";
644         } else {
645           std::string function_name =
646               is_tensor ? ((dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::string(is_collocated ? "CollocatedNodes" : "") +
647                            std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened"))
648                         : "InterpTransposeNonTensor";
649           std::string op_t_1d_name = (is_all_tensor || !is_tensor) ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name);
650 
651           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_q"
652                << var_suffix << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n";
653         }
654         break;
655       case CEED_EVAL_GRAD:
656         code << tab << "CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n";
657         if (is_at_points) {
658           std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d";
659 
660           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix
661                << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n";
662         } else if (use_3d_slices) {
663           std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::string(is_collocated ? "CollocatedNodes" : "") +
664                                       std::to_string(dim) + "d";
665 
666           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" << var_suffix
667                << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n";
668         } else if (is_tensor) {
669           bool        is_collocated_grad = dim == 3 && Q_1d >= P_1d;
670           std::string function_name =
671               (dim == 1 ? "GradTranspose"
672                         : ("GradTransposeTensor" + std::string(is_collocated ? "CollocatedNodes" : (is_collocated_grad ? "Collocated" : "")))) +
673               std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened");
674           std::string op_t_1d_name = is_all_tensor ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name);
675 
676           code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_q"
677                << var_suffix << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n";
678         } else {
679           std::string function_name = "GradTransposeNonTensor";
680 
681           code << tab << function_name << "<num_comp" << var_suffix << ", dim" << var_suffix << ", " << P_name << ", " << Q_name
682                << ", OP_T_1D>(data, r_q" << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n";
683         }
684         break;
685       // LCOV_EXCL_START
686       case CEED_EVAL_WEIGHT:
687         break;  // Should not occur
688       case CEED_EVAL_DIV:
689       case CEED_EVAL_CURL:
690         break;  // TODO: Not implemented
691                 // LCOV_EXCL_STOP
692     }
693   }
694   CeedCallBackend(CeedBasisDestroy(&basis));
695   return CEED_ERROR_SUCCESS;
696 }
697 
698 //------------------------------------------------------------------------------
699 // QFunction
700 //------------------------------------------------------------------------------
701 static int CeedOperatorBuildKernelQFunction_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, Tab &tab, CeedInt max_dim,
702                                                      CeedInt max_num_points, CeedInt num_input_fields, CeedOperatorField *op_input_fields,
703                                                      CeedQFunctionField *qf_input_fields, CeedInt num_output_fields,
704                                                      CeedOperatorField *op_output_fields, CeedQFunctionField *qf_output_fields,
705                                                      std::string qfunction_name, CeedInt Q_1d, bool is_all_tensor, bool is_at_points,
706                                                      bool use_3d_slices) {
707   std::string         Q_name    = is_all_tensor ? "Q_1d" : "Q";
708   CeedEvalMode        eval_mode = CEED_EVAL_NONE;
709   CeedElemRestriction elem_rstr;
710 
711   // Setup output arrays
712   code << "\n";
713   code << tab << "// -- Output field setup\n";
714   for (CeedInt i = 0; i < num_output_fields; i++) {
715     const char *field_name;
716     std::string var_suffix = "_out_" + std::to_string(i);
717 
718     CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
719     code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
720     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
721     switch (eval_mode) {
722       case CEED_EVAL_NONE:
723         if (is_at_points) {
724           code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "];\n";
725         } else {
726           code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_all_tensor && (max_dim >= 3) ? Q_name : "1")
727                << "];\n";
728         }
729         break;
730       case CEED_EVAL_INTERP:
731         if (is_at_points) {
732           // Accumulator for point data
733           code << tab << "CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "];\n";
734           code << tab << "for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "; i++) r_c" << var_suffix
735                << "[i] = 0.0;\n";
736         } else {
737           code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_all_tensor && (max_dim >= 3) ? Q_name : "1")
738                << "];\n";
739         }
740         break;
741       case CEED_EVAL_GRAD:
742         if (is_at_points) {
743           // Accumulator for point data
744           code << tab << "CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "];\n";
745           code << tab << "for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "; i++) r_c" << var_suffix
746                << "[i] = 0.0;\n";
747         } else if (use_3d_slices) {
748           // Accumulator for gradient slices
749           code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n";
750           code << tab << "for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << Q_name << "; i++) r_q" << var_suffix << "[i] = 0.0;\n";
751         } else {
752           code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "*"
753                << (is_all_tensor && (max_dim >= 3) ? Q_name : "1") << "];\n";
754         }
755         break;
756       case CEED_EVAL_WEIGHT:
757         break;
758         // LCOV_EXCL_START
759       case CEED_EVAL_DIV:
760       case CEED_EVAL_CURL:
761         break;  // TODO: Not implemented
762                 // LCOV_EXCL_STOP
763     }
764   }
765 
766   if (is_at_points) {
767     // We need to handle batches of points
768     code << "\n";
769     code << tab << "// Note: Using batches of points\n";
770     code << tab << "const CeedInt point_loop_bound = (blockDim.x*blockDim.y) * ceil((1.0*max_num_points) / (blockDim.x*blockDim.y));\n\n";
771     code << tab << "#pragma unroll\n";
772     code << tab << "for (CeedInt i = threadIdx.x + threadIdx.y*blockDim.x; i < point_loop_bound; i += blockDim.x*blockDim.y) {\n";
773     tab.push();
774     code << tab << "const CeedInt p = i % max_num_points;\n\n";
775 
776     code << tab << "// -- Coordinates\n";
777     code << tab << "CeedScalar r_x[max_dim];\n";
778     code << tab << "ReadPoint<max_dim, coords_comp_stride, max_num_points>(data, elem, p, max_num_points, points.indices, points.coords, r_x);\n\n";
779 
780     code << tab << "// -- Input fields\n";
781     for (CeedInt i = 0; i < num_input_fields; i++) {
782       const char *field_name;
783       std::string var_suffix = "_in_" + std::to_string(i);
784       std::string P_name     = "P_1d" + var_suffix;
785 
786       CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name));
787       code << tab << "// ---- Input field " << i << ": " << field_name << "\n";
788       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
789       // Basis action
790       code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n";
791       switch (eval_mode) {
792         case CEED_EVAL_NONE:
793           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
794           code << tab << "ReadPoint<num_comp" << var_suffix << ", comp_stride" << var_suffix
795                << ", max_num_points>(data, elem, p, max_num_points, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n";
796           break;
797         case CEED_EVAL_INTERP:
798           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
799           code << tab << "InterpAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name
800                << ">(data, i, r_c" << var_suffix << ", r_x, r_s" << var_suffix << ");\n";
801           break;
802         case CEED_EVAL_GRAD:
803           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
804           code << tab << "GradAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name
805                << ">(data, i, r_c" << var_suffix << ", r_x, r_s" << var_suffix << ");\n";
806           break;
807         case CEED_EVAL_WEIGHT:
808           code << tab << "CeedScalar r_s" << var_suffix << "[1];\n";
809           code << tab << "r_s" << var_suffix << "[0] = 1.0;\n";
810           break;
811           // LCOV_EXCL_START
812         case CEED_EVAL_DIV:
813         case CEED_EVAL_CURL:
814           break;  // TODO: Not implemented
815                   // LCOV_EXCL_STOP
816       }
817     }
818     code << "\n";
819     code << tab << "// -- Output fields\n";
820     for (CeedInt i = 0; i < num_output_fields; i++) {
821       const char *field_name;
822       std::string var_suffix = "_out_" + std::to_string(i);
823 
824       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
825       code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
826       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
827       // Basis action
828       switch (eval_mode) {
829         case CEED_EVAL_NONE:
830           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
831           break;
832         case CEED_EVAL_INTERP:
833           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
834           break;
835         case CEED_EVAL_GRAD:
836           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
837           break;
838           // LCOV_EXCL_START
839         case CEED_EVAL_WEIGHT:
840           break;  // Should not occur
841         case CEED_EVAL_DIV:
842         case CEED_EVAL_CURL:
843           break;  // TODO: Not implemented
844                   // LCOV_EXCL_STOP
845       }
846     }
847 
848   } else if (use_3d_slices) {
849     // We treat quadrature points per slice in 3d to save registers
850     code << "\n";
851     code << tab << "// Note: Using planes of 3D elements\n";
852     code << tab << "#pragma unroll\n";
853     code << tab << "for (CeedInt q = 0; q < " << Q_name << "; q++) {\n";
854     tab.push();
855     code << tab << "// -- Input fields\n";
856     for (CeedInt i = 0; i < num_input_fields; i++) {
857       const char *field_name;
858       std::string var_suffix = "_in_" + std::to_string(i);
859 
860       CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name));
861       code << tab << "// ---- Input field " << i << ": " << field_name << "\n";
862       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
863       // Basis action
864       code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n";
865       switch (eval_mode) {
866         case CEED_EVAL_NONE:
867           bool is_strided;
868 
869           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
870 
871           CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr));
872           CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided));
873           if (is_strided) {
874             bool    has_backend_strides;
875             CeedInt num_elem, elem_size;
876 
877             CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size));
878             CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides));
879             CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem));
880             CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
881 
882             if (!has_backend_strides) {
883               CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides));
884             }
885             code << tab << "const CeedInt strides" << var_suffix << "_0 = " << strides[0] << ", strides" << var_suffix << "_1 = " << strides[1]
886                  << ", strides" << var_suffix << "_2 = " << strides[2] << ";\n";
887             code << tab << "ReadEVecSliceStrided3d<num_comp" << var_suffix << ", " << Q_name << ", strides" << var_suffix << "_0, strides"
888                  << var_suffix << "_1, strides" << var_suffix << "_2>(data, elem, q, d" << var_suffix << ", r_s" << var_suffix << ");\n";
889           } else {
890             CeedSize                  l_size = 0;
891             CeedInt                   comp_stride;
892             CeedElemRestriction_Cuda *rstr_data;
893 
894             CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size));
895             code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n";
896             CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
897             code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n";
898             CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data));
899             data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets;
900             code << tab << "ReadEVecSliceStandard3d<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", " << Q_name << ">(data, l_size"
901                  << var_suffix << ", elem, q, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n";
902           }
903           CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
904           break;
905         case CEED_EVAL_INTERP:
906           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
907           code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) {\n";
908           tab.push();
909           code << tab << "r_s" << var_suffix << "[j] = r_q" << var_suffix << "[q + j*" << Q_name << "];\n";
910           tab.pop();
911           code << tab << "}\n";
912           break;
913         case CEED_EVAL_GRAD:
914           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
915           code << tab << "GradColloSlice3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_q" << var_suffix << ", s_G"
916                << var_suffix << ", r_s" << var_suffix << ");\n";
917           break;
918         case CEED_EVAL_WEIGHT:
919           code << tab << "CeedScalar r_s" << var_suffix << "[1];\n";
920           code << tab << "r_s" << var_suffix << "[0] = r_q" << var_suffix << "[q];\n";
921           break;
922           // LCOV_EXCL_START
923         case CEED_EVAL_DIV:
924         case CEED_EVAL_CURL:
925           break;  // TODO: Not implemented
926                   // LCOV_EXCL_STOP
927       }
928     }
929     code << "\n";
930     code << tab << "// -- Output fields\n";
931     for (CeedInt i = 0; i < num_output_fields; i++) {
932       const char *field_name;
933       std::string var_suffix = "_out_" + std::to_string(i);
934 
935       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
936       code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
937       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
938       // Basis action
939       switch (eval_mode) {
940         case CEED_EVAL_NONE:
941           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
942           break;
943         case CEED_EVAL_INTERP:
944           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
945           break;
946         case CEED_EVAL_GRAD:
947           code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
948           break;
949           // LCOV_EXCL_START
950         case CEED_EVAL_WEIGHT:
951           break;  // Should not occur
952         case CEED_EVAL_DIV:
953         case CEED_EVAL_CURL:
954           break;  // TODO: Not implemented
955                   // LCOV_EXCL_STOP
956       }
957     }
958   } else {
959     code << "\n";
960     code << tab << "// Note: Using full elements\n";
961     code << tab << "{\n";
962     tab.push();
963     code << tab << "// -- Input fields\n";
964     for (CeedInt i = 0; i < num_input_fields; i++) {
965       const char *field_name;
966 
967       CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name));
968       code << tab << "// ---- Input field " << i << ": " << field_name << "\n";
969       code << tab << "CeedScalar *r_s_in_" << i << " = r_q_in_" << i << ";\n";
970     }
971     code << tab << "// -- Output fields\n";
972     for (CeedInt i = 0; i < num_output_fields; i++) {
973       const char *field_name;
974 
975       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
976       code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
977       code << tab << "CeedScalar *r_s_out_" << i << " = r_q_out_" << i << ";\n";
978     }
979   }
980 
981   // Input and output buffers
982   code << "\n";
983   code << tab << "// -- QFunction inputs and outputs\n";
984   code << tab << "// ---- Inputs\n";
985   code << tab << "CeedScalar *inputs[" << CeedIntMax(num_input_fields, 1) << "];\n";
986   for (CeedInt i = 0; i < num_input_fields; i++) {
987     const char *field_name;
988 
989     CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name));
990     code << tab << "// ------ Input field " << i << ": " << field_name << "\n";
991     code << tab << "inputs[" << i << "] = r_s_in_" << i << ";\n";
992   }
993   code << tab << "// ---- Outputs\n";
994   code << tab << "CeedScalar *outputs[" << CeedIntMax(num_output_fields, 1) << "];\n";
995   for (CeedInt i = 0; i < num_output_fields; i++) {
996     const char *field_name;
997 
998     CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
999     code << tab << "// ------ Output field " << i << ": " << field_name << "\n";
1000     code << tab << "outputs[" << i << "] = r_s_out_" << i << ";\n";
1001   }
1002 
1003   // Apply QFunction
1004   code << "\n";
1005   code << tab << "// -- Apply QFunction\n";
1006   code << tab << "" << qfunction_name << "(ctx, ";
1007   if (max_dim != 3 || is_at_points || use_3d_slices || !is_all_tensor) {
1008     code << "1";
1009   } else {
1010     code << Q_name;
1011   }
1012   code << ", inputs, outputs);\n";
1013 
1014   if (is_at_points) {
1015     // Map back to coefficients
1016     code << "\n";
1017     code << tab << "// -- Output fields\n";
1018     for (CeedInt i = 0; i < num_output_fields; i++) {
1019       const char *field_name;
1020       std::string var_suffix = "_out_" + std::to_string(i);
1021       std::string P_name     = "P_1d" + var_suffix;
1022 
1023       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
1024       code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
1025       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
1026       // Basis action
1027       code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n";
1028       switch (eval_mode) {
1029         case CEED_EVAL_NONE: {
1030           CeedInt             comp_stride;
1031           CeedElemRestriction elem_rstr;
1032 
1033           CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr));
1034           CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
1035           CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
1036           code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n";
1037           code << tab << "WritePoint<num_comp" << var_suffix << ", comp_stride" << var_suffix
1038                << ", max_num_points>(data, elem, i, points.num_per_elem[elem], indices.outputs[" << i << "]"
1039                << ", r_s" << var_suffix << ", d" << var_suffix << ");\n";
1040           break;
1041         }
1042         case CEED_EVAL_INTERP:
1043           code << tab << "if (i >= points.num_per_elem[elem]) {\n";
1044           tab.push();
1045           code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n";
1046           tab.pop();
1047           code << tab << "}\n";
1048           code << tab << "InterpTransposeAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name
1049                << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n";
1050           break;
1051         case CEED_EVAL_GRAD:
1052           code << tab << "if (i >= points.num_per_elem[elem]) {\n";
1053           tab.push();
1054           code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << "*dim" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n";
1055           tab.pop();
1056           code << tab << "}\n";
1057           code << tab << "GradTransposeAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name
1058                << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n";
1059           break;
1060           // LCOV_EXCL_START
1061         case CEED_EVAL_WEIGHT:
1062           break;  // Should not occur
1063         case CEED_EVAL_DIV:
1064         case CEED_EVAL_CURL:
1065           break;  // TODO: Not implemented
1066                   // LCOV_EXCL_STOP
1067       }
1068     }
1069   } else if (use_3d_slices) {
1070     // Copy or apply transpose grad, if needed
1071     code << "\n";
1072     code << tab << "// -- Output fields\n";
1073     for (CeedInt i = 0; i < num_output_fields; i++) {
1074       const char *field_name;
1075       std::string var_suffix = "_out_" + std::to_string(i);
1076       std::string P_name     = "P_1d" + var_suffix;
1077 
1078       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
1079       code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
1080       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
1081       // Basis action
1082       code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n";
1083       switch (eval_mode) {
1084         case CEED_EVAL_NONE:
1085           code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n";
1086           tab.push();
1087           code << tab << "r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n";
1088           tab.pop();
1089           code << tab << "}\n";
1090           break;
1091         case CEED_EVAL_INTERP:
1092           code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n";
1093           tab.push();
1094           code << tab << "r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n";
1095           tab.pop();
1096           code << tab << "}\n";
1097           break;
1098         case CEED_EVAL_GRAD:
1099           code << tab << "GradColloSliceTranspose3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_s" << var_suffix << ", s_G"
1100                << var_suffix << ", r_q" << var_suffix << ");\n";
1101           break;
1102           // LCOV_EXCL_START
1103         case CEED_EVAL_WEIGHT:
1104           break;  // Should not occur
1105         case CEED_EVAL_DIV:
1106         case CEED_EVAL_CURL:
1107           break;  // TODO: Not implemented
1108                   // LCOV_EXCL_STOP
1109       }
1110     }
1111   }
1112   tab.pop();
1113   code << tab << "}\n";
1114   return CEED_ERROR_SUCCESS;
1115 }
1116 
1117 //------------------------------------------------------------------------------
1118 // Build single operator kernel
1119 //------------------------------------------------------------------------------
1120 extern "C" int CeedOperatorBuildKernel_Cuda_gen(CeedOperator op, bool *is_good_build) {
1121   bool                    is_all_tensor = true, is_all_nontensor = true, is_at_points = false, use_3d_slices = false;
1122   Ceed                    ceed;
1123   CeedInt                 Q = 0, Q_1d = 0, num_input_fields, num_output_fields, max_dim = 1, max_num_points = 0, coords_comp_stride = 0;
1124   CeedQFunctionField     *qf_input_fields, *qf_output_fields;
1125   CeedQFunction_Cuda_gen *qf_data;
1126   CeedQFunction           qf;
1127   CeedOperatorField      *op_input_fields, *op_output_fields;
1128   CeedOperator_Cuda_gen  *data;
1129   std::ostringstream      code;
1130   Tab                     tab;
1131 
1132   CeedCallBackend(CeedOperatorGetData(op, &data));
1133   {
1134     bool is_setup_done;
1135 
1136     CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done));
1137     if (is_setup_done) {
1138       *is_good_build = !data->use_fallback;
1139       return CEED_ERROR_SUCCESS;
1140     }
1141   }
1142 
1143   // Check field compatibility
1144   CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields));
1145   {
1146     bool has_shared_bases = true;
1147 
1148     for (CeedInt i = 0; i < num_input_fields; i++) {
1149       CeedBasis basis;
1150 
1151       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
1152       if (basis != CEED_BASIS_NONE) {
1153         bool        is_tensor = true;
1154         const char *resource;
1155         char       *resource_root;
1156         Ceed        basis_ceed;
1157 
1158         CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
1159         is_all_tensor    = is_all_tensor && is_tensor;
1160         is_all_nontensor = is_all_nontensor && !is_tensor;
1161         CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed));
1162         CeedCallBackend(CeedGetResource(basis_ceed, &resource));
1163         CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root));
1164         has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared");
1165         CeedCallBackend(CeedFree(&resource_root));
1166         CeedCallBackend(CeedDestroy(&basis_ceed));
1167       }
1168       CeedCallBackend(CeedBasisDestroy(&basis));
1169     }
1170 
1171     for (CeedInt i = 0; i < num_output_fields; i++) {
1172       CeedBasis basis;
1173 
1174       CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
1175       if (basis != CEED_BASIS_NONE) {
1176         bool        is_tensor = true;
1177         const char *resource;
1178         char       *resource_root;
1179         Ceed        basis_ceed;
1180 
1181         CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
1182         is_all_tensor    = is_all_tensor && is_tensor;
1183         is_all_nontensor = is_all_nontensor && !is_tensor;
1184 
1185         CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed));
1186         CeedCallBackend(CeedGetResource(basis_ceed, &resource));
1187         CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root));
1188         has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared");
1189         CeedCallBackend(CeedFree(&resource_root));
1190         CeedCallBackend(CeedDestroy(&basis_ceed));
1191       }
1192       CeedCallBackend(CeedBasisDestroy(&basis));
1193     }
1194     // -- Fallback to ref if not all bases are shared
1195     if (!has_shared_bases) {
1196       *is_good_build = false;
1197       return CEED_ERROR_SUCCESS;
1198     }
1199   }
1200   CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
1201   CeedCallBackend(CeedOperatorGetQFunction(op, &qf));
1202   CeedCallBackend(CeedQFunctionGetData(qf, &qf_data));
1203   CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields));
1204 
1205   // Get operator data
1206   CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points));
1207   {
1208     CeedInt max_P = 0, max_P_1d = 0;
1209 
1210     CeedCallBackend(CeedOperatorBuildKernelData_Cuda_gen(ceed, num_input_fields, op_input_fields, qf_input_fields, num_output_fields,
1211                                                          op_output_fields, qf_output_fields, &max_P, &max_P_1d, &Q, &Q_1d, &max_dim, &is_all_tensor,
1212                                                          &use_3d_slices));
1213     data->max_P_1d = is_all_tensor ? max_P_1d : max_P;
1214   }
1215   if (max_dim == 0) max_dim = 1;
1216   data->dim = max_dim;
1217   if (is_at_points) {
1218     CeedElemRestriction_Cuda *rstr_data;
1219     CeedElemRestriction       rstr_points = NULL;
1220 
1221     CeedCallBackend(CeedOperatorAtPointsGetPoints(op, &rstr_points, NULL));
1222     CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr_points, &max_num_points));
1223     CeedCallBackend(CeedElemRestrictionGetCompStride(rstr_points, &coords_comp_stride));
1224     CeedCallBackend(CeedElemRestrictionGetData(rstr_points, &rstr_data));
1225     data->points.indices = (CeedInt *)rstr_data->d_offsets;
1226     CeedCallBackend(CeedElemRestrictionDestroy(&rstr_points));
1227   }
1228   if (is_at_points) use_3d_slices = false;
1229   if (Q_1d == 0) {
1230     if (is_at_points) Q_1d = max_num_points;
1231     else CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q_1d));
1232   }
1233   if (Q == 0) Q = Q_1d;
1234   data->Q    = Q;
1235   data->Q_1d = Q_1d;
1236 
1237   // Check for restriction only identity operator
1238   {
1239     bool is_identity_qf;
1240 
1241     CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf));
1242     if (is_identity_qf) {
1243       CeedEvalMode eval_mode_in, eval_mode_out;
1244 
1245       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in));
1246       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out));
1247       CeedCheck(eval_mode_in != CEED_EVAL_NONE || eval_mode_out != CEED_EVAL_NONE, ceed, CEED_ERROR_BACKEND,
1248                 "Backend does not implement restriction only identity operators");
1249     }
1250   }
1251 
1252   // Add atomicAdd function for old NVidia architectures
1253   {
1254     Ceed_Cuda            *ceed_data;
1255     struct cudaDeviceProp prop;
1256 
1257     CeedCallBackend(CeedGetData(ceed, &ceed_data));
1258     CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id));
1259     if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) {
1260       code << tab << "// AtomicAdd fallback source\n";
1261       code << tab << "#include <ceed/jit-source/cuda/cuda-atomic-add-fallback.h>\n\n";
1262     }
1263   }
1264 
1265   // Load basis source files
1266   if (!is_all_nontensor) {
1267     code << tab << "// Tensor basis source\n";
1268     code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h>\n\n";
1269   }
1270   if (!is_all_tensor) {
1271     code << tab << "// Non-tensor basis source\n";
1272     code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-nontensor-templates.h>\n\n";
1273   }
1274   if (!is_all_tensor && !is_all_nontensor) {
1275     code << "// Tensor basis source\n";
1276     code << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-flattened-templates.h>\n\n";
1277   }
1278   if (is_at_points) {
1279     code << "// AtPoints basis source\n";
1280     code << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-at-points-templates.h>\n\n";
1281   }
1282   code << "// CodeGen operator source\n";
1283   code << "#include <ceed/jit-source/cuda/cuda-gen-templates.h>\n\n";
1284 
1285   // Get QFunction name
1286   std::string qfunction_name(qf_data->qfunction_name);
1287   std::string operator_name;
1288 
1289   operator_name = "CeedKernelCudaGenOperator_" + qfunction_name;
1290 
1291   // Define CEED_Q_VLA
1292   code << "\n" << tab << "#undef CEED_Q_VLA\n";
1293   if (max_dim != 3 || is_at_points || use_3d_slices || !is_all_tensor) {
1294     code << tab << "#define CEED_Q_VLA 1\n\n";
1295   } else {
1296     code << tab << "#define CEED_Q_VLA " << Q_1d << "\n\n";
1297   }
1298 
1299   // Add user QFunction source
1300   {
1301     const char *source_path;
1302 
1303     CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path));
1304     CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/cuda/gen backend requires QFunction source code file");
1305 
1306     code << tab << "// User QFunction source\n";
1307     code << tab << "#include \"" << source_path << "\"\n\n";
1308   }
1309 
1310   // Setup
1311   code << "\n" << tab << "// -----------------------------------------------------------------------------\n";
1312   code << tab << "// Operator Kernel\n";
1313   code << tab << "// \n";
1314   code << tab << "// d_[in,out]_i:   CeedVector device array\n";
1315   code << tab << "// r_[in,out]_e_i: Element vector register\n";
1316   code << tab << "// r_[in,out]_q_i: Quadrature space vector register\n";
1317   code << tab << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n";
1318   code << tab << "// r_[in,out]_s_i: Quadrature space slice vector register\n";
1319   code << tab << "// \n";
1320   code << tab << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n";
1321   code << tab << "// s_G_[in,out]_i: Gradient matrix, shared memory\n";
1322   code << tab << "// -----------------------------------------------------------------------------\n";
1323   code << tab << "extern \"C\" __global__ void " << operator_name
1324        << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar *W, Points_Cuda "
1325           "points) {\n";
1326   tab.push();
1327 
1328   // Scratch buffers
1329   for (CeedInt i = 0; i < num_input_fields; i++) {
1330     CeedEvalMode eval_mode;
1331 
1332     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
1333     if (eval_mode != CEED_EVAL_WEIGHT) {  // Skip CEED_EVAL_WEIGHT
1334       code << tab << "const CeedScalar *__restrict__ d_in_" << i << " = fields.inputs[" << i << "];\n";
1335     }
1336   }
1337   for (CeedInt i = 0; i < num_output_fields; i++) {
1338     code << tab << "CeedScalar *__restrict__ d_out_" << i << " = fields.outputs[" << i << "];\n";
1339   }
1340 
1341   code << tab << "const CeedInt max_dim = " << max_dim << ";\n";
1342   if (!is_all_tensor) {
1343     code << tab << "const CeedInt Q = " << Q << ";\n";
1344   }
1345   if (!is_all_nontensor) {
1346     code << tab << "const CeedInt Q_1d = " << Q_1d << ";\n";
1347   }
1348   if (is_at_points) {
1349     code << tab << "const CeedInt max_num_points = " << max_num_points << ";\n";
1350     code << tab << "const CeedInt coords_comp_stride = " << coords_comp_stride << ";\n";
1351   }
1352 
1353   // Shared data
1354   code << tab << "extern __shared__ CeedScalar slice[];\n";
1355   code << tab << "SharedData_Cuda data;\n";
1356   code << tab << "data.t_id_x = threadIdx.x;\n";
1357   code << tab << "data.t_id_y = threadIdx.y;\n";
1358   code << tab << "data.t_id_z = threadIdx.z;\n";
1359   code << tab << "data.t_id   = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n";
1360   code << tab << "data.slice  = slice + data.t_id_z*OP_T_1D" << ((!is_all_tensor || max_dim == 1) ? "" : "*OP_T_1D") << ";\n";
1361 
1362   // -- Determine input mat reuse
1363   FieldReuse_Cuda input_matrix_reuse[CEED_FIELD_MAX];
1364 
1365   for (CeedInt i = 0; i < num_input_fields; i++) {
1366     input_matrix_reuse[i].index = -1;
1367   }
1368   for (CeedInt i = 0; i < num_input_fields; i++) {
1369     bool         is_tensor = true;
1370     CeedEvalMode eval_mode_i;
1371     CeedBasis    basis_i;
1372 
1373     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i));
1374     if (eval_mode_i == CEED_EVAL_WEIGHT) continue;
1375     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i));
1376     CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor));
1377     for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) {
1378       CeedEvalMode eval_mode_j;
1379       CeedBasis    basis_j;
1380 
1381       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j));
1382       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
1383       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j));
1384       if (basis_i == basis_j) {
1385         if (is_tensor) {
1386           input_matrix_reuse[i].index     = j;
1387           input_matrix_reuse[i].is_input  = true;
1388           input_matrix_reuse[i].eval_mode = eval_mode_j;
1389         } else {
1390           // For non-tensor can only re-use with the same eval mode
1391           if (eval_mode_i == eval_mode_j) {
1392             input_matrix_reuse[i].index     = j;
1393             input_matrix_reuse[i].is_input  = true;
1394             input_matrix_reuse[i].eval_mode = eval_mode_j;
1395           }
1396         }
1397       }
1398       CeedCallBackend(CeedBasisDestroy(&basis_j));
1399     }
1400     CeedCallBackend(CeedBasisDestroy(&basis_i));
1401   }
1402 
1403   // -- Determine output mat reuse
1404   FieldReuse_Cuda output_matrix_reuse[CEED_FIELD_MAX];
1405 
1406   for (CeedInt i = 0; i < num_output_fields; i++) {
1407     output_matrix_reuse[i].index = -1;
1408   }
1409   for (CeedInt i = 0; i < num_output_fields; i++) {
1410     bool         is_tensor = true;
1411     CeedEvalMode eval_mode_i;
1412     CeedBasis    basis_i;
1413 
1414     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i));
1415     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i));
1416     CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor));
1417     for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) {
1418       CeedEvalMode eval_mode_j;
1419       CeedBasis    basis_j;
1420 
1421       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j));
1422       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
1423       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j));
1424       if (basis_i == basis_j) {
1425         if (is_tensor) {
1426           output_matrix_reuse[i].index     = j;
1427           output_matrix_reuse[i].is_input  = true;
1428           output_matrix_reuse[i].eval_mode = eval_mode_j;
1429         } else {
1430           // For non-tensor can only re-use with the same eval mode
1431           if (eval_mode_i == eval_mode_j) {
1432             output_matrix_reuse[i].index     = j;
1433             output_matrix_reuse[i].is_input  = true;
1434             output_matrix_reuse[i].eval_mode = eval_mode_j;
1435           }
1436         }
1437       }
1438       CeedCallBackend(CeedBasisDestroy(&basis_j));
1439     }
1440     for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) {
1441       CeedEvalMode eval_mode_j;
1442       CeedBasis    basis_j;
1443 
1444       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j));
1445       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
1446       CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j));
1447       if (basis_i == basis_j) {
1448         if (is_tensor) {
1449           output_matrix_reuse[i].index     = j;
1450           output_matrix_reuse[i].is_input  = false;
1451           output_matrix_reuse[i].eval_mode = eval_mode_j;
1452         } else {
1453           // For non-tensor can only re-use with the same eval mode
1454           if (eval_mode_i == eval_mode_j) {
1455             output_matrix_reuse[i].index     = j;
1456             output_matrix_reuse[i].is_input  = false;
1457             output_matrix_reuse[i].eval_mode = eval_mode_j;
1458           }
1459         }
1460       }
1461       CeedCallBackend(CeedBasisDestroy(&basis_j));
1462     }
1463     CeedCallBackend(CeedBasisDestroy(&basis_i));
1464   }
1465 
1466   // Initialize constants, and matrices B and G
1467   code << "\n" << tab << "// Input field constants and basis data\n";
1468   for (CeedInt i = 0; i < num_input_fields; i++) {
1469     CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i],
1470                                                               max_dim, Q, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices, false));
1471   }
1472   code << "\n" << tab << "// Output field constants and basis data\n";
1473   for (CeedInt i = 0; i < num_output_fields; i++) {
1474     CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i],
1475                                                               max_dim, Q, Q_1d, false, is_all_tensor, is_at_points, use_3d_slices, false));
1476   }
1477 
1478   // Loop over all elements
1479   code << "\n" << tab << "// Element loop\n";
1480   code << tab << "__syncthreads();\n";
1481   code << tab << "for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n";
1482   tab.push();
1483 
1484   // -- Compute minimum buffer space needed
1485   CeedInt max_rstr_buffer_size = 1;
1486 
1487   for (CeedInt i = 0; i < num_input_fields; i++) {
1488     CeedEvalMode eval_mode;
1489 
1490     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
1491     if (eval_mode != CEED_EVAL_NONE && eval_mode != CEED_EVAL_WEIGHT) {
1492       CeedInt             num_comp;
1493       CeedElemRestriction elem_rstr;
1494 
1495       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr));
1496       CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
1497       max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1));
1498       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
1499     }
1500   }
1501   for (CeedInt i = 0; i < num_output_fields; i++) {
1502     CeedEvalMode eval_mode;
1503 
1504     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
1505     if (eval_mode != CEED_EVAL_NONE) {
1506       CeedInt             num_comp;
1507       CeedElemRestriction elem_rstr;
1508 
1509       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr));
1510       CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
1511       max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1));
1512       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
1513     }
1514   }
1515   code << tab << "// Scratch restriction buffer space\n";
1516   code << tab << "CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n";
1517 
1518   // -- Determine best input field processing order
1519   CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX];
1520 
1521   for (CeedInt i = 0; i < num_input_fields; i++) {
1522     field_rstr_in_buffer[i] = -1;
1523     input_field_order[i]    = -1;
1524   }
1525   {
1526     bool    is_ordered[CEED_FIELD_MAX];
1527     CeedInt curr_index = 0;
1528 
1529     for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false;
1530     for (CeedInt i = 0; i < num_input_fields; i++) {
1531       CeedVector          vec_i;
1532       CeedElemRestriction rstr_i;
1533 
1534       if (is_ordered[i]) continue;
1535       field_rstr_in_buffer[i]       = i;
1536       is_ordered[i]                 = true;
1537       input_field_order[curr_index] = i;
1538       curr_index++;
1539       CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i));
1540       if (vec_i == CEED_VECTOR_NONE) continue;  // CEED_EVAL_WEIGHT
1541       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i));
1542       for (CeedInt j = i + 1; j < num_input_fields; j++) {
1543         CeedVector          vec_j;
1544         CeedElemRestriction rstr_j;
1545 
1546         CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j));
1547         CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j));
1548         if (rstr_i == rstr_j && vec_i == vec_j) {
1549           field_rstr_in_buffer[j]       = i;
1550           is_ordered[j]                 = true;
1551           input_field_order[curr_index] = j;
1552           curr_index++;
1553         }
1554         CeedCallBackend(CeedVectorDestroy(&vec_j));
1555         CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j));
1556       }
1557       CeedCallBackend(CeedVectorDestroy(&vec_i));
1558       CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i));
1559     }
1560   }
1561 
1562   // -- Input restriction and basis
1563   code << "\n" << tab << "// -- Input field restrictions and basis actions\n";
1564   for (CeedInt i = 0; i < num_input_fields; i++) {
1565     const char   *field_name;
1566     const CeedInt f = input_field_order[i];
1567 
1568     CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name));
1569     code << tab << "// ---- Input field " << f << ": " << field_name << "\n";
1570 
1571     // ---- Restriction
1572     CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, tab, f, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f],
1573                                                                 max_dim, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices));
1574 
1575     // ---- Basis action
1576     CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, f, op_input_fields[f], qf_input_fields[f], max_dim, Q_1d, true,
1577                                                           is_all_tensor, is_at_points, use_3d_slices));
1578   }
1579 
1580   // -- Q function
1581   CeedCallBackend(CeedOperatorBuildKernelQFunction_Cuda_gen(code, data, tab, max_dim, max_num_points, num_input_fields, op_input_fields,
1582                                                             qf_input_fields, num_output_fields, op_output_fields, qf_output_fields, qfunction_name,
1583                                                             Q_1d, is_all_tensor, is_at_points, use_3d_slices));
1584 
1585   // -- Output basis and restriction
1586   code << "\n" << tab << "// -- Output field basis action and restrictions\n";
1587   for (CeedInt i = 0; i < num_output_fields; i++) {
1588     const char *field_name;
1589 
1590     CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
1591     code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
1592 
1593     // ---- Basis action
1594     CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], max_dim, Q_1d, false,
1595                                                           is_all_tensor, is_at_points, use_3d_slices));
1596 
1597     // ---- Restriction
1598     CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, tab, i, NULL, op_output_fields[i], qf_output_fields[i], max_dim, Q_1d,
1599                                                                 false, is_all_tensor, is_at_points, use_3d_slices));
1600   }
1601 
1602   // Close loop and function
1603   tab.pop();
1604   code << tab << "}\n";
1605   tab.pop();
1606   code << tab << "}\n";
1607   code << tab << "// -----------------------------------------------------------------------------\n\n";
1608 
1609   // Compile
1610   {
1611     bool          is_compile_good = false;
1612     const CeedInt T_1d            = CeedIntMax(is_all_tensor ? Q_1d : Q, data->max_P_1d);
1613 
1614     data->thread_1d = T_1d;
1615     CeedCallBackend(CeedTryCompile_Cuda(ceed, code.str().c_str(), &is_compile_good, &data->module, 1, "OP_T_1D", T_1d));
1616     if (is_compile_good) {
1617       *is_good_build = true;
1618       CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, operator_name.c_str(), &data->op));
1619     } else {
1620       *is_good_build     = false;
1621       data->use_fallback = true;
1622     }
1623   }
1624   CeedCallBackend(CeedOperatorSetSetupDone(op));
1625   CeedCallBackend(CeedDestroy(&ceed));
1626   CeedCallBackend(CeedQFunctionDestroy(&qf));
1627   return CEED_ERROR_SUCCESS;
1628 }
1629 
1630 //------------------------------------------------------------------------------
1631 // Build AtPoints assembly operator kernel
1632 //------------------------------------------------------------------------------
1633 static int CeedOperatorBuildKernelAssemblyAtPoints_Cuda_gen(CeedOperator op, bool is_full, bool *is_good_build) {
1634   bool                    is_all_tensor = true, is_at_points = false, use_3d_slices = false;
1635   Ceed                    ceed;
1636   CeedInt                 Q, Q_1d, num_input_fields, num_output_fields, max_dim = 1, max_num_points = 0, coords_comp_stride = 0;
1637   CeedQFunctionField     *qf_input_fields, *qf_output_fields;
1638   CeedQFunction_Cuda_gen *qf_data;
1639   CeedQFunction           qf;
1640   CeedOperatorField      *op_input_fields, *op_output_fields;
1641   CeedOperator_Cuda_gen  *data;
1642   std::ostringstream      code;
1643   Tab                     tab;
1644 
1645   // Check compatibility
1646   CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
1647   CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points));
1648   CeedCheck(is_at_points, ceed, CEED_ERROR_BACKEND, "Only AtPoints operator assembly supported");
1649 
1650   // Retrieve operator data
1651   CeedCallBackend(CeedOperatorGetData(op, &data));
1652   Q       = data->Q;
1653   Q_1d    = data->Q_1d;
1654   max_dim = data->dim;
1655   {
1656     CeedElemRestriction rstr_points = NULL;
1657 
1658     CeedCallBackend(CeedOperatorAtPointsGetPoints(op, &rstr_points, NULL));
1659     CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr_points, &max_num_points));
1660     CeedCallBackend(CeedElemRestrictionGetCompStride(rstr_points, &coords_comp_stride));
1661     CeedCallBackend(CeedElemRestrictionDestroy(&rstr_points));
1662   }
1663   CeedCallBackend(CeedOperatorGetQFunction(op, &qf));
1664   CeedCallBackend(CeedQFunctionGetData(qf, &qf_data));
1665   CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields));
1666   CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields));
1667 
1668   // Add atomicAdd function for old NVidia architectures
1669   {
1670     Ceed_Cuda            *ceed_data;
1671     struct cudaDeviceProp prop;
1672 
1673     CeedCallBackend(CeedGetData(ceed, &ceed_data));
1674     CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id));
1675     if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) {
1676       code << tab << "// AtomicAdd fallback source\n";
1677       code << tab << "#include <ceed/jit-source/cuda/cuda-atomic-add-fallback.h>\n\n";
1678     }
1679   }
1680 
1681   // Load basis source files
1682   code << tab << "// Tensor basis source\n";
1683   code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h>\n\n";
1684   code << tab << "// AtPoints basis source\n";
1685   code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-at-points-templates.h>\n\n";
1686   code << tab << "// CodeGen operator source\n";
1687   code << tab << "#include <ceed/jit-source/cuda/cuda-gen-templates.h>\n\n";
1688 
1689   // Get QFunction name
1690   std::string qfunction_name(qf_data->qfunction_name);
1691   std::string operator_name;
1692 
1693   if (is_full) {
1694     operator_name = "CeedKernelCudaGenOperatorFullAssembly_" + qfunction_name;
1695   } else {
1696     operator_name = "CeedKernelCudaGenOperatorDiagonalAssembly_" + qfunction_name;
1697   }
1698 
1699   // Define CEED_Q_VLA
1700   code << "\n" << tab << "#undef CEED_Q_VLA\n";
1701   code << tab << "#define CEED_Q_VLA 1\n\n";
1702 
1703   // Add user QFunction source
1704   {
1705     const char *source_path;
1706 
1707     CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path));
1708     CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/cuda/gen backend requires QFunction source code file");
1709 
1710     code << tab << "// User QFunction source\n";
1711     code << tab << "#include \"" << source_path << "\"\n\n";
1712   }
1713 
1714   // Setup
1715   code << "\n" << tab << "// -----------------------------------------------------------------------------\n";
1716   code << tab << "// Operator Assembly Kernel\n";
1717   code << tab << "// \n";
1718   code << tab << "// d_[in,out]_i:   CeedVector device array\n";
1719   code << tab << "// r_[in,out]_e_i: Element vector register\n";
1720   code << tab << "// r_[in,out]_q_i: Quadrature space vector register\n";
1721   code << tab << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n";
1722   code << tab << "// r_[in,out]_s_i: Quadrature space slice vector register\n";
1723   code << tab << "// \n";
1724   code << tab << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n";
1725   code << tab << "// s_G_[in,out]_i: Gradient matrix, shared memory\n";
1726   code << tab << "// -----------------------------------------------------------------------------\n";
1727   code << tab << "extern \"C\" __global__ void " << operator_name
1728        << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar *W, Points_Cuda "
1729           "points, CeedScalar *__restrict__ values_array) {\n";
1730   tab.push();
1731 
1732   // Scratch buffers
1733   for (CeedInt i = 0; i < num_input_fields; i++) {
1734     CeedEvalMode eval_mode;
1735 
1736     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
1737     if (eval_mode != CEED_EVAL_WEIGHT) {  // Skip CEED_EVAL_WEIGHT
1738       code << tab << "const CeedScalar *__restrict__ d_in_" << i << " = fields.inputs[" << i << "];\n";
1739     }
1740   }
1741   for (CeedInt i = 0; i < num_output_fields; i++) {
1742     code << tab << "CeedScalar *__restrict__ d_out_" << i << " = fields.outputs[" << i << "];\n";
1743   }
1744 
1745   code << tab << "const CeedInt max_dim = " << max_dim << ";\n";
1746   code << tab << "const CeedInt Q_1d = " << Q_1d << ";\n";
1747   code << tab << "const CeedInt max_num_points = " << max_num_points << ";\n";
1748   code << tab << "const CeedInt coords_comp_stride = " << coords_comp_stride << ";\n";
1749 
1750   // Shared data
1751   code << tab << "extern __shared__ CeedScalar slice[];\n";
1752   code << tab << "SharedData_Cuda data;\n";
1753   code << tab << "data.t_id_x = threadIdx.x;\n";
1754   code << tab << "data.t_id_y = threadIdx.y;\n";
1755   code << tab << "data.t_id_z = threadIdx.z;\n";
1756   code << tab << "data.t_id   = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n";
1757   code << tab << "data.slice  = slice + data.t_id_z*OP_T_1D" << ((!is_all_tensor || max_dim == 1) ? "" : "*OP_T_1D") << ";\n";
1758 
1759   // -- Determine input mat reuse
1760   FieldReuse_Cuda input_matrix_reuse[CEED_FIELD_MAX];
1761 
1762   for (CeedInt i = 0; i < num_input_fields; i++) {
1763     input_matrix_reuse[i].index = -1;
1764   }
1765   for (CeedInt i = 0; i < num_input_fields; i++) {
1766     CeedEvalMode eval_mode_i;
1767     CeedBasis    basis_i;
1768 
1769     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i));
1770     if (eval_mode_i == CEED_EVAL_WEIGHT) continue;
1771     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i));
1772     for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) {
1773       CeedEvalMode eval_mode_j;
1774       CeedBasis    basis_j;
1775 
1776       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j));
1777       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
1778       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j));
1779       if (basis_i == basis_j) {
1780         input_matrix_reuse[i].index     = j;
1781         input_matrix_reuse[i].is_input  = true;
1782         input_matrix_reuse[i].eval_mode = eval_mode_j;
1783       }
1784       CeedCallBackend(CeedBasisDestroy(&basis_j));
1785     }
1786     CeedCallBackend(CeedBasisDestroy(&basis_i));
1787   }
1788 
1789   // -- Determine output mat reuse
1790   FieldReuse_Cuda output_matrix_reuse[CEED_FIELD_MAX];
1791 
1792   for (CeedInt i = 0; i < num_output_fields; i++) {
1793     output_matrix_reuse[i].index = -1;
1794   }
1795   for (CeedInt i = 0; i < num_output_fields; i++) {
1796     CeedEvalMode eval_mode_i;
1797     CeedBasis    basis_i;
1798 
1799     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i));
1800     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i));
1801     for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) {
1802       CeedEvalMode eval_mode_j;
1803       CeedBasis    basis_j;
1804 
1805       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j));
1806       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
1807       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j));
1808       if (basis_i == basis_j) {
1809         output_matrix_reuse[i].index     = j;
1810         output_matrix_reuse[i].is_input  = true;
1811         output_matrix_reuse[i].eval_mode = eval_mode_j;
1812       }
1813       CeedCallBackend(CeedBasisDestroy(&basis_j));
1814     }
1815     for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) {
1816       CeedEvalMode eval_mode_j;
1817       CeedBasis    basis_j;
1818 
1819       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j));
1820       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
1821       CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j));
1822       if (basis_i == basis_j) {
1823         output_matrix_reuse[i].index     = j;
1824         output_matrix_reuse[i].is_input  = false;
1825         output_matrix_reuse[i].eval_mode = eval_mode_j;
1826       }
1827       CeedCallBackend(CeedBasisDestroy(&basis_j));
1828     }
1829     CeedCallBackend(CeedBasisDestroy(&basis_i));
1830   }
1831 
1832   // Initialize constants, and matrices B and G
1833   code << "\n" << tab << "// Input field constants and basis data\n";
1834   for (CeedInt i = 0; i < num_input_fields; i++) {
1835     CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i],
1836                                                               max_dim, Q, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices, false));
1837   }
1838   code << "\n" << tab << "// Output field constants and basis data\n";
1839   for (CeedInt i = 0; i < num_output_fields; i++) {
1840     CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i],
1841                                                               max_dim, Q, Q_1d, false, is_all_tensor, is_at_points, use_3d_slices, false));
1842   }
1843 
1844   // Loop over all elements
1845   code << "\n" << tab << "// Element loop\n";
1846   code << tab << "__syncthreads();\n";
1847   code << tab << "for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n";
1848   tab.push();
1849 
1850   // -- Compute minimum buffer space needed
1851   CeedInt max_rstr_buffer_size = 1;
1852 
1853   for (CeedInt i = 0; i < num_input_fields; i++) {
1854     CeedEvalMode eval_mode;
1855 
1856     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
1857     if (eval_mode != CEED_EVAL_NONE && eval_mode != CEED_EVAL_WEIGHT) {
1858       CeedInt             num_comp;
1859       CeedElemRestriction elem_rstr;
1860 
1861       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr));
1862       CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
1863       max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1));
1864       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
1865     }
1866   }
1867   for (CeedInt i = 0; i < num_output_fields; i++) {
1868     CeedEvalMode eval_mode;
1869 
1870     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
1871     if (eval_mode != CEED_EVAL_NONE) {
1872       CeedInt             num_comp;
1873       CeedElemRestriction elem_rstr;
1874 
1875       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr));
1876       CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
1877       max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1));
1878       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
1879     }
1880   }
1881   code << tab << "// Scratch restriction buffer space\n";
1882   code << tab << "CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n";
1883 
1884   // -- Determine best input field processing order
1885   CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX];
1886 
1887   for (CeedInt i = 0; i < num_input_fields; i++) {
1888     field_rstr_in_buffer[i] = -1;
1889     input_field_order[i]    = -1;
1890   }
1891   {
1892     bool    is_ordered[CEED_FIELD_MAX];
1893     CeedInt curr_index = 0;
1894 
1895     for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false;
1896     for (CeedInt i = 0; i < num_input_fields; i++) {
1897       CeedVector          vec_i;
1898       CeedElemRestriction rstr_i;
1899 
1900       if (is_ordered[i]) continue;
1901       field_rstr_in_buffer[i]       = i;
1902       is_ordered[i]                 = true;
1903       input_field_order[curr_index] = i;
1904       curr_index++;
1905       CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i));
1906       if (vec_i == CEED_VECTOR_NONE) continue;  // CEED_EVAL_WEIGHT
1907       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i));
1908       for (CeedInt j = i + 1; j < num_input_fields; j++) {
1909         CeedVector          vec_j;
1910         CeedElemRestriction rstr_j;
1911 
1912         CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j));
1913         CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j));
1914         if (rstr_i == rstr_j && vec_i == vec_j) {
1915           field_rstr_in_buffer[j]       = i;
1916           is_ordered[j]                 = true;
1917           input_field_order[curr_index] = j;
1918           curr_index++;
1919         }
1920         CeedCallBackend(CeedVectorDestroy(&vec_j));
1921         CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j));
1922       }
1923       CeedCallBackend(CeedVectorDestroy(&vec_i));
1924       CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i));
1925     }
1926   }
1927 
1928   // -- Input restriction and basis
1929   code << "\n" << tab << "// -- Input field restrictions and basis actions\n";
1930   CeedInt active_field_index = -1;
1931 
1932   for (CeedInt i = 0; i < num_input_fields; i++) {
1933     bool          is_active = false;
1934     const char   *field_name;
1935     const CeedInt f = input_field_order[i];
1936 
1937     {
1938       CeedVector vec;
1939 
1940       CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[f], &vec));
1941       is_active = vec == CEED_VECTOR_ACTIVE;
1942       CeedCallBackend(CeedVectorDestroy(&vec));
1943     }
1944 
1945     CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name));
1946     code << tab << "// ---- Input field " << f << ": " << field_name << "\n";
1947 
1948     if (is_active) {
1949       std::string var_suffix = "_in_" + std::to_string(f);
1950 
1951       code << tab << "// Active field - no restriction or basis action here\n";
1952       if (active_field_index == -1) {
1953         active_field_index = f;
1954         code << tab << "CeedScalar r_e" << var_suffix << "[num_comp" << var_suffix << "*" << (max_dim >= 3 ? "P_1d" + var_suffix : "1")
1955              << "] = {0.0};\n";
1956       } else {
1957         code << tab << "CeedScalar *r_e" << var_suffix << " = r_e_in_" << active_field_index << ";\n";
1958       }
1959     } else {
1960       // ---- Restriction
1961       CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, tab, f, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f],
1962                                                                   max_dim, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices));
1963 
1964       // ---- Basis action
1965       CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, f, op_input_fields[f], qf_input_fields[f], max_dim, Q_1d, true,
1966                                                             is_all_tensor, is_at_points, use_3d_slices));
1967     }
1968   }
1969 
1970   // -- Loop over active field
1971   std::string active_var_suffix = "_in_" + std::to_string(active_field_index);
1972 
1973   code << "\n" << tab << "// Loop over nodes in active field\n";
1974   code << tab << "for (CeedInt n = 0; n < num_comp" << active_var_suffix << "*P_1d" << active_var_suffix
1975        << (max_dim > 1 ? "*P_1d" + active_var_suffix : "") << (max_dim > 2 ? "*P_1d" + active_var_suffix : "") << "; n++) {\n";
1976   tab.push();
1977 
1978   // -- Set current active node and component to 1
1979   code << tab << "// Set current active node and component to 1.0\n";
1980   code << tab << "SetEVecStandard" << max_dim << "d_Single<num_comp" << active_var_suffix << ", P_1d" << active_var_suffix << ">(data, n, 1.0, r_e"
1981        << active_var_suffix << ");\n\n";
1982 
1983   for (CeedInt i = 0; i < num_input_fields; i++) {
1984     bool          is_active = false;
1985     const char   *field_name;
1986     const CeedInt f = input_field_order[i];
1987 
1988     {
1989       CeedVector vec;
1990 
1991       CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[f], &vec));
1992       is_active = vec == CEED_VECTOR_ACTIVE;
1993       CeedCallBackend(CeedVectorDestroy(&vec));
1994     }
1995     if (!is_active) continue;
1996 
1997     CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name));
1998     code << tab << "// ---- Input field " << f << ": " << field_name << "\n";
1999 
2000     // ---- Basis action
2001     CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, f, op_input_fields[f], qf_input_fields[f], max_dim, Q_1d, true,
2002                                                           is_all_tensor, is_at_points, use_3d_slices));
2003   }
2004 
2005   // -- Q function
2006   CeedCallBackend(CeedOperatorBuildKernelQFunction_Cuda_gen(code, data, tab, max_dim, max_num_points, num_input_fields, op_input_fields,
2007                                                             qf_input_fields, num_output_fields, op_output_fields, qf_output_fields, qfunction_name,
2008                                                             Q_1d, is_all_tensor, is_at_points, use_3d_slices));
2009 
2010   // -- Output basis and restriction
2011   code << "\n" << tab << "// -- Output field basis action and restrictions\n";
2012   for (CeedInt i = 0; i < num_output_fields; i++) {
2013     bool        is_active = false;
2014     const char *field_name;
2015 
2016     {
2017       CeedVector vec;
2018 
2019       CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec));
2020       is_active = vec == CEED_VECTOR_ACTIVE;
2021       CeedCallBackend(CeedVectorDestroy(&vec));
2022     }
2023     if (!is_active) continue;
2024 
2025     CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
2026     code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
2027 
2028     // ---- Basis action
2029     CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], max_dim, Q_1d, false,
2030                                                           is_all_tensor, is_at_points, use_3d_slices));
2031 
2032     // ---- Restriction
2033     if (is_full) {
2034       std::string         var_suffix = "_out_" + std::to_string(i);
2035       CeedInt             comp_stride;
2036       CeedSize            l_size;
2037       CeedElemRestriction elem_rstr;
2038 
2039       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr));
2040       CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size));
2041       code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n";
2042       CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
2043       code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n";
2044       code << tab << "WriteLVecStandard" << max_dim << "d_Assembly<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", P_1d" + var_suffix
2045            << ">(data, l_size" << var_suffix << ", elem, n, r_e" << var_suffix << ", values_array);\n";
2046       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
2047     } else {
2048       std::string         var_suffix = "_out_" + std::to_string(i);
2049       CeedInt             comp_stride;
2050       CeedSize            l_size;
2051       CeedElemRestriction elem_rstr;
2052 
2053       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr));
2054       CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size));
2055       code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n";
2056       CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
2057       code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n";
2058       code << tab << "WriteLVecStandard" << max_dim << "d_Single<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", P_1d" + var_suffix
2059            << ">(data, l_size" << var_suffix << ", elem, n, indices.outputs[" << i << "], r_e" << var_suffix << ", values_array);\n";
2060       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
2061     }
2062   }
2063 
2064   // -- Reset current active node and component
2065   code << "\n" << tab << "// Reset current active node and component to 0.0\n";
2066   code << tab << "SetEVecStandard" << max_dim << "d_Single<num_comp" << active_var_suffix << ", P_1d" << active_var_suffix << ">(data, n, 0.0, r_e"
2067        << active_var_suffix << ");\n";
2068 
2069   // -- End of loop over active field
2070   tab.pop();
2071   code << tab << "}\n";
2072 
2073   // Close loop and function
2074   tab.pop();
2075   code << tab << "}\n";
2076   tab.pop();
2077   code << tab << "}\n";
2078   code << tab << "// -----------------------------------------------------------------------------\n\n";
2079 
2080   // Compile
2081   {
2082     bool          is_compile_good = false;
2083     const CeedInt T_1d            = CeedIntMax(is_all_tensor ? Q_1d : Q, data->max_P_1d);
2084 
2085     data->thread_1d = T_1d;
2086     CeedCallBackend(CeedTryCompile_Cuda(ceed, code.str().c_str(), &is_compile_good,
2087                                         is_full ? &data->module_assemble_full : &data->module_assemble_diagonal, 1, "OP_T_1D", T_1d));
2088     if (is_compile_good) {
2089       *is_good_build = true;
2090       CeedCallBackend(CeedGetKernel_Cuda(ceed, is_full ? data->module_assemble_full : data->module_assemble_diagonal, operator_name.c_str(),
2091                                          is_full ? &data->assemble_full : &data->assemble_diagonal));
2092     } else {
2093       *is_good_build              = false;
2094       data->use_assembly_fallback = true;
2095     }
2096   }
2097   CeedCallBackend(CeedDestroy(&ceed));
2098   CeedCallBackend(CeedQFunctionDestroy(&qf));
2099   return CEED_ERROR_SUCCESS;
2100 }
2101 
2102 extern "C" int CeedOperatorBuildKernelDiagonalAssemblyAtPoints_Cuda_gen(CeedOperator op, bool *is_good_build) {
2103   return CeedOperatorBuildKernelAssemblyAtPoints_Cuda_gen(op, false, is_good_build);
2104 }
2105 
2106 extern "C" int CeedOperatorBuildKernelFullAssemblyAtPoints_Cuda_gen(CeedOperator op, bool *is_good_build) {
2107   return CeedOperatorBuildKernelAssemblyAtPoints_Cuda_gen(op, true, is_good_build);
2108 }
2109 
2110 //------------------------------------------------------------------------------
2111 // Build QFunction assembly operator kernel
2112 //------------------------------------------------------------------------------
2113 extern "C" int CeedOperatorBuildKernelLinearAssembleQFunction_Cuda_gen(CeedOperator op, bool *is_good_build) {
2114   bool                    is_all_tensor = true, is_all_nontensor = true, is_at_points = false, use_3d_slices = false;
2115   Ceed                    ceed;
2116   CeedInt                 Q, Q_1d, num_input_fields, num_output_fields, max_dim = 1, max_num_points = 0;
2117   CeedQFunctionField     *qf_input_fields, *qf_output_fields;
2118   CeedQFunction_Cuda_gen *qf_data;
2119   CeedQFunction           qf;
2120   CeedOperatorField      *op_input_fields, *op_output_fields;
2121   CeedOperator_Cuda_gen  *data;
2122   std::ostringstream      code;
2123   Tab                     tab;
2124 
2125   // Check compatibility
2126   CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
2127   CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points));
2128   CeedCheck(!is_at_points, ceed, CEED_ERROR_BACKEND, "AtPoints QFunction assembly is not supported");
2129 
2130   // Check field compatibility
2131   CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields));
2132   {
2133     bool has_shared_bases = true;
2134 
2135     for (CeedInt i = 0; i < num_input_fields; i++) {
2136       CeedBasis basis;
2137 
2138       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
2139       if (basis != CEED_BASIS_NONE) {
2140         bool        is_tensor = true;
2141         const char *resource;
2142         char       *resource_root;
2143         Ceed        basis_ceed;
2144 
2145         CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
2146         is_all_tensor    = is_all_tensor && is_tensor;
2147         is_all_nontensor = is_all_nontensor && !is_tensor;
2148         CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed));
2149         CeedCallBackend(CeedGetResource(basis_ceed, &resource));
2150         CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root));
2151         has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared");
2152         CeedCallBackend(CeedFree(&resource_root));
2153         CeedCallBackend(CeedDestroy(&basis_ceed));
2154       }
2155       CeedCallBackend(CeedBasisDestroy(&basis));
2156     }
2157 
2158     for (CeedInt i = 0; i < num_output_fields; i++) {
2159       CeedBasis basis;
2160 
2161       CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
2162       if (basis != CEED_BASIS_NONE) {
2163         bool        is_tensor = true;
2164         const char *resource;
2165         char       *resource_root;
2166         Ceed        basis_ceed;
2167 
2168         CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
2169         is_all_tensor    = is_all_tensor && is_tensor;
2170         is_all_nontensor = is_all_nontensor && !is_tensor;
2171 
2172         CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed));
2173         CeedCallBackend(CeedGetResource(basis_ceed, &resource));
2174         CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root));
2175         has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared");
2176         CeedCallBackend(CeedFree(&resource_root));
2177         CeedCallBackend(CeedDestroy(&basis_ceed));
2178       }
2179       CeedCallBackend(CeedBasisDestroy(&basis));
2180     }
2181   }
2182 
2183   // Retrieve operator data
2184   CeedCallBackend(CeedOperatorGetData(op, &data));
2185   Q       = data->Q;
2186   Q_1d    = data->Q_1d;
2187   max_dim = data->dim;
2188   CeedCallBackend(CeedOperatorGetQFunction(op, &qf));
2189   CeedCallBackend(CeedQFunctionGetData(qf, &qf_data));
2190   CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields));
2191 
2192   // Add atomicAdd function for old NVidia architectures
2193   {
2194     Ceed_Cuda            *ceed_data;
2195     struct cudaDeviceProp prop;
2196 
2197     CeedCallBackend(CeedGetData(ceed, &ceed_data));
2198     CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id));
2199     if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) {
2200       code << tab << "// AtomicAdd fallback source\n";
2201       code << tab << "#include <ceed/jit-source/cuda/cuda-atomic-add-fallback.h>\n\n";
2202     }
2203   }
2204 
2205   // Load basis source files
2206   if (!is_all_nontensor) {
2207     code << tab << "// Tensor basis source\n";
2208     code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h>\n\n";
2209   }
2210   if (!is_all_tensor) {
2211     code << tab << "// Non-tensor basis source\n";
2212     code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-nontensor-templates.h>\n\n";
2213   }
2214   if (!is_all_tensor && !is_all_nontensor) {
2215     code << "// Tensor basis source\n";
2216     code << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-flattened-templates.h>\n\n";
2217   }
2218   code << "// CodeGen operator source\n";
2219   code << "#include <ceed/jit-source/cuda/cuda-gen-templates.h>\n\n";
2220 
2221   // Get QFunction name
2222   std::string qfunction_name(qf_data->qfunction_name);
2223   std::string operator_name;
2224 
2225   operator_name = "CeedKernelCudaGenQFunctionAssembly_" + qfunction_name;
2226 
2227   // Define CEED_Q_VLA
2228   code << "\n" << tab << "#undef CEED_Q_VLA\n";
2229   if (max_dim != 3 || is_at_points || use_3d_slices || !is_all_tensor) {
2230     code << tab << "#define CEED_Q_VLA 1\n\n";
2231   } else {
2232     code << tab << "#define CEED_Q_VLA " << Q_1d << "\n\n";
2233   }
2234 
2235   // Add user QFunction source
2236   {
2237     const char *source_path;
2238 
2239     CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path));
2240     CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/cuda/gen backend requires QFunction source code file");
2241 
2242     code << tab << "// User QFunction source\n";
2243     code << tab << "#include \"" << source_path << "\"\n\n";
2244   }
2245 
2246   // Setup
2247   code << "\n" << tab << "// -----------------------------------------------------------------------------\n";
2248   code << tab << "// Operator Assembly Kernel\n";
2249   code << tab << "// \n";
2250   code << tab << "// d_[in,out]_i:   CeedVector device array\n";
2251   code << tab << "// r_[in,out]_e_i: Element vector register\n";
2252   code << tab << "// r_[in,out]_q_i: Quadrature space vector register\n";
2253   code << tab << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n";
2254   code << tab << "// r_[in,out]_s_i: Quadrature space slice vector register\n";
2255   code << tab << "// \n";
2256   code << tab << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n";
2257   code << tab << "// s_G_[in,out]_i: Gradient matrix, shared memory\n";
2258   code << tab << "// -----------------------------------------------------------------------------\n";
2259   code << tab << "extern \"C\" __global__ void " << operator_name
2260        << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar *W, Points_Cuda "
2261           "points, CeedScalar *__restrict__ values_array) {\n";
2262   tab.push();
2263 
2264   // Scratch buffers
2265   for (CeedInt i = 0; i < num_input_fields; i++) {
2266     CeedEvalMode eval_mode;
2267 
2268     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
2269     if (eval_mode != CEED_EVAL_WEIGHT) {  // Skip CEED_EVAL_WEIGHT
2270       code << tab << "const CeedScalar *__restrict__ d_in_" << i << " = fields.inputs[" << i << "];\n";
2271     }
2272   }
2273   for (CeedInt i = 0; i < num_output_fields; i++) {
2274     code << tab << "CeedScalar *__restrict__ d_out_" << i << " = fields.outputs[" << i << "];\n";
2275   }
2276 
2277   code << tab << "const CeedInt max_dim = " << max_dim << ";\n";
2278   if (!is_all_tensor) {
2279     code << tab << "const CeedInt Q = " << Q << ";\n";
2280   }
2281   if (!is_all_nontensor) {
2282     code << tab << "const CeedInt Q_1d = " << Q_1d << ";\n";
2283   }
2284 
2285   // Shared data
2286   code << tab << "extern __shared__ CeedScalar slice[];\n";
2287   code << tab << "SharedData_Cuda data;\n";
2288   code << tab << "data.t_id_x = threadIdx.x;\n";
2289   code << tab << "data.t_id_y = threadIdx.y;\n";
2290   code << tab << "data.t_id_z = threadIdx.z;\n";
2291   code << tab << "data.t_id   = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n";
2292   code << tab << "data.slice  = slice + data.t_id_z*OP_T_1D" << ((!is_all_tensor || max_dim == 1) ? "" : "*OP_T_1D") << ";\n";
2293 
2294   // -- Determine input mat reuse
2295   FieldReuse_Cuda input_matrix_reuse[CEED_FIELD_MAX];
2296 
2297   for (CeedInt i = 0; i < num_input_fields; i++) {
2298     input_matrix_reuse[i].index = -1;
2299   }
2300   for (CeedInt i = 0; i < num_input_fields; i++) {
2301     bool         is_tensor = true;
2302     CeedEvalMode eval_mode_i;
2303     CeedBasis    basis_i;
2304 
2305     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i));
2306     if (eval_mode_i == CEED_EVAL_WEIGHT) continue;
2307     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i));
2308     CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor));
2309     for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) {
2310       CeedEvalMode eval_mode_j;
2311       CeedBasis    basis_j;
2312 
2313       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j));
2314       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
2315       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j));
2316       if (basis_i == basis_j) {
2317         if (is_tensor) {
2318           input_matrix_reuse[i].index     = j;
2319           input_matrix_reuse[i].is_input  = true;
2320           input_matrix_reuse[i].eval_mode = eval_mode_j;
2321         } else {
2322           // For non-tensor can only re-use with the same eval mode
2323           if (eval_mode_i == eval_mode_j) {
2324             input_matrix_reuse[i].index     = j;
2325             input_matrix_reuse[i].is_input  = true;
2326             input_matrix_reuse[i].eval_mode = eval_mode_j;
2327           }
2328         }
2329       }
2330       CeedCallBackend(CeedBasisDestroy(&basis_j));
2331     }
2332     CeedCallBackend(CeedBasisDestroy(&basis_i));
2333   }
2334 
2335   // -- Determine output mat reuse
2336   FieldReuse_Cuda output_matrix_reuse[CEED_FIELD_MAX];
2337 
2338   for (CeedInt i = 0; i < num_output_fields; i++) {
2339     output_matrix_reuse[i].index = -1;
2340   }
2341   for (CeedInt i = 0; i < num_output_fields; i++) {
2342     bool         is_tensor = true;
2343     CeedEvalMode eval_mode_i;
2344     CeedBasis    basis_i;
2345 
2346     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i));
2347     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i));
2348     CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor));
2349     for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) {
2350       CeedEvalMode eval_mode_j;
2351       CeedBasis    basis_j;
2352 
2353       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j));
2354       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
2355       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j));
2356       if (basis_i == basis_j) {
2357         if (is_tensor) {
2358           output_matrix_reuse[i].index     = j;
2359           output_matrix_reuse[i].is_input  = true;
2360           output_matrix_reuse[i].eval_mode = eval_mode_j;
2361         } else {
2362           // For non-tensor can only re-use with the same eval mode
2363           if (eval_mode_i == eval_mode_j) {
2364             output_matrix_reuse[i].index     = j;
2365             output_matrix_reuse[i].is_input  = true;
2366             output_matrix_reuse[i].eval_mode = eval_mode_j;
2367           }
2368         }
2369       }
2370       CeedCallBackend(CeedBasisDestroy(&basis_j));
2371     }
2372     for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) {
2373       CeedEvalMode eval_mode_j;
2374       CeedBasis    basis_j;
2375 
2376       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j));
2377       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
2378       CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j));
2379       if (basis_i == basis_j) {
2380         if (is_tensor) {
2381           output_matrix_reuse[i].index     = j;
2382           output_matrix_reuse[i].is_input  = false;
2383           output_matrix_reuse[i].eval_mode = eval_mode_j;
2384         } else {
2385           // For non-tensor can only re-use with the same eval mode
2386           if (eval_mode_i == eval_mode_j) {
2387             output_matrix_reuse[i].index     = j;
2388             output_matrix_reuse[i].is_input  = false;
2389             output_matrix_reuse[i].eval_mode = eval_mode_j;
2390           }
2391         }
2392       }
2393       CeedCallBackend(CeedBasisDestroy(&basis_j));
2394     }
2395     CeedCallBackend(CeedBasisDestroy(&basis_i));
2396   }
2397 
2398   // Initialize constants, and matrices B and G
2399   code << "\n" << tab << "// Input field constants and basis data\n";
2400   for (CeedInt i = 0; i < num_input_fields; i++) {
2401     CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i],
2402                                                               max_dim, Q, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices, true));
2403   }
2404   code << "\n" << tab << "// Output field constants and basis data\n";
2405   for (CeedInt i = 0; i < num_output_fields; i++) {
2406     CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i],
2407                                                               max_dim, Q, Q_1d, false, is_all_tensor, is_at_points, use_3d_slices, true));
2408   }
2409 
2410   // Loop over all elements
2411   code << "\n" << tab << "// Element loop\n";
2412   code << tab << "__syncthreads();\n";
2413   code << tab << "for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n";
2414   tab.push();
2415 
2416   // -- Compute minimum buffer space needed
2417   CeedInt max_rstr_buffer_size = 1;
2418 
2419   for (CeedInt i = 0; i < num_input_fields; i++) {
2420     CeedEvalMode eval_mode;
2421 
2422     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
2423     if (eval_mode != CEED_EVAL_NONE && eval_mode != CEED_EVAL_WEIGHT) {
2424       CeedInt             num_comp;
2425       CeedElemRestriction elem_rstr;
2426 
2427       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr));
2428       CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
2429       max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1));
2430       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
2431     }
2432   }
2433   for (CeedInt i = 0; i < num_output_fields; i++) {
2434     CeedEvalMode eval_mode;
2435 
2436     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
2437     if (eval_mode != CEED_EVAL_NONE) {
2438       CeedInt             num_comp;
2439       CeedElemRestriction elem_rstr;
2440 
2441       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr));
2442       CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
2443       max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1));
2444       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
2445     }
2446   }
2447   code << tab << "// Scratch restriction buffer space\n";
2448   code << tab << "CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n";
2449 
2450   // -- Determine best input field processing order
2451   CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX];
2452 
2453   for (CeedInt i = 0; i < num_input_fields; i++) {
2454     field_rstr_in_buffer[i] = -1;
2455     input_field_order[i]    = -1;
2456   }
2457   {
2458     bool    is_ordered[CEED_FIELD_MAX];
2459     CeedInt curr_index = 0;
2460 
2461     for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false;
2462     for (CeedInt i = 0; i < num_input_fields; i++) {
2463       CeedVector          vec_i;
2464       CeedElemRestriction rstr_i;
2465 
2466       if (is_ordered[i]) continue;
2467       field_rstr_in_buffer[i]       = i;
2468       is_ordered[i]                 = true;
2469       input_field_order[curr_index] = i;
2470       curr_index++;
2471       CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i));
2472       if (vec_i == CEED_VECTOR_NONE) continue;  // CEED_EVAL_WEIGHT
2473       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i));
2474       for (CeedInt j = i + 1; j < num_input_fields; j++) {
2475         CeedVector          vec_j;
2476         CeedElemRestriction rstr_j;
2477 
2478         CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j));
2479         CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j));
2480         if (rstr_i == rstr_j && vec_i == vec_j) {
2481           field_rstr_in_buffer[j]       = i;
2482           is_ordered[j]                 = true;
2483           input_field_order[curr_index] = j;
2484           curr_index++;
2485         }
2486         CeedCallBackend(CeedVectorDestroy(&vec_j));
2487         CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j));
2488       }
2489       CeedCallBackend(CeedVectorDestroy(&vec_i));
2490       CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i));
2491     }
2492   }
2493 
2494   // -- Input restriction and basis
2495   code << "\n" << tab << "// -- Input field restrictions and basis actions\n";
2496   CeedInt num_active_in = 0, num_active_out = 0, qf_assembly_size_out = 0;
2497   CeedInt active_fields_in[CEED_FIELD_MAX], active_fields_out[CEED_FIELD_MAX];
2498 
2499   for (CeedInt i = 0; i < num_input_fields; i++) {
2500     bool          is_active = false;
2501     const char   *field_name;
2502     const CeedInt f = input_field_order[i];
2503 
2504     {
2505       CeedVector vec;
2506 
2507       CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[f], &vec));
2508       is_active = vec == CEED_VECTOR_ACTIVE;
2509       CeedCallBackend(CeedVectorDestroy(&vec));
2510     }
2511 
2512     CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name));
2513     code << tab << "// ---- Input field " << f << ": " << field_name << "\n";
2514 
2515     if (is_active) {
2516       CeedEvalMode eval_mode;
2517       CeedInt      field_size;
2518 
2519       active_fields_in[num_active_in] = f;
2520       num_active_in++;
2521       CeedCallBackend(CeedQFunctionFieldGetSize(qf_input_fields[f], &field_size));
2522       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[f], &eval_mode));
2523       if (eval_mode == CEED_EVAL_GRAD) {
2524         code << tab << "CeedScalar r_q_in_" << f << "[num_comp_in_" << f << "*" << "dim_in_" << f << "*"
2525              << (is_all_tensor && (max_dim >= 3) ? "Q_1d" : "1") << "] = {0.};\n";
2526       } else {
2527         code << tab << "CeedScalar r_q_in_" << f << "[num_comp_in_" << f << "*" << (is_all_tensor && (max_dim >= 3) ? "Q_1d" : "1") << "] = {0.};\n";
2528       }
2529       code << tab << "const CeedInt field_size_in_" << f << " = " << field_size << ";\n";
2530     } else {
2531       // ---- Restriction
2532       CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, tab, f, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f],
2533                                                                   max_dim, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices));
2534 
2535       // ---- Basis action
2536       CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, f, op_input_fields[f], qf_input_fields[f], max_dim, Q_1d, true,
2537                                                             is_all_tensor, is_at_points, use_3d_slices));
2538     }
2539   }
2540   code << tab << "const CeedInt field_sizes_in[" << num_active_in << "] = {";
2541   for (CeedInt i = 0; i < num_active_in; i++) {
2542     code << "field_size_in_" << active_fields_in[i] << (i < num_active_in - 1 ? ", " : "");
2543   }
2544   code << "};\n";
2545   code << tab << "CeedScalar * r_q_in[" << num_active_in << "] = {";
2546   for (CeedInt i = 0; i < num_active_in; i++) {
2547     code << "r_q_in_" << active_fields_in[i] << (i < num_active_in - 1 ? ", " : "");
2548   }
2549   code << "};\n";
2550 
2551   for (CeedInt i = 0; i < num_output_fields; i++) {
2552     bool is_active = false;
2553 
2554     {
2555       CeedVector vec;
2556 
2557       CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec));
2558       is_active = vec == CEED_VECTOR_ACTIVE;
2559       CeedCallBackend(CeedVectorDestroy(&vec));
2560     }
2561     if (is_active) {
2562       const char *field_name;
2563       CeedInt     field_size;
2564 
2565       active_fields_out[num_active_out] = i;
2566       num_active_out++;
2567       CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[i], &field_size));
2568       qf_assembly_size_out += field_size;
2569       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
2570       code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
2571       code << tab << "const CeedInt field_size_out_" << i << " = " << field_size << ";\n";
2572     }
2573   }
2574   code << tab << "const CeedInt field_sizes_out[" << num_active_out << "] = {";
2575   for (CeedInt i = 0; i < num_active_out; i++) {
2576     code << "field_size_out_" << active_fields_out[i] << (i < num_active_out - 1 ? ", " : "");
2577   }
2578   code << "};\n";
2579   code << tab << "const CeedInt total_size_out = " << qf_assembly_size_out << ";\n";
2580 
2581   // -- Loop over active field
2582   code << "\n" << tab << "CeedInt input_offset = 0;\n";
2583   code << tab << "// Loop over active QFunction input fields\n";
2584   code << tab << "const CeedInt num_active_in = " << num_active_in << ";\n";
2585   code << tab << "for (CeedInt a = 0; a < num_active_in; a++) {\n";
2586   tab.push();
2587 
2588   // -- Loop over size of active field
2589   code << "\n" << tab << "// Loop over current active input field size\n";
2590   code << tab << "const CeedInt field_size_in = field_sizes_in[a];\n";
2591   code << tab << "for (CeedInt s = 0; s < field_size_in; s++) {\n";
2592   tab.push();
2593 
2594   // -- Set current active point and component to 1
2595   code << tab << "// Set current active point and component to 1.0\n";
2596   if (is_all_tensor && (max_dim >= 3)) {
2597     code << tab << "for (CeedInt i = 0; i < Q_1d; i++) r_q_in[a][i + s * Q_1d] = 1.0;\n";
2598   } else {
2599     code << tab << "r_q_in[a][s] = 1.0;\n";
2600   }
2601 
2602   // -- Q function
2603   CeedCallBackend(CeedOperatorBuildKernelQFunction_Cuda_gen(code, data, tab, max_dim, max_num_points, num_input_fields, op_input_fields,
2604                                                             qf_input_fields, num_output_fields, op_output_fields, qf_output_fields, qfunction_name,
2605                                                             Q_1d, is_all_tensor, is_at_points, use_3d_slices));
2606 
2607   // -- Output basis and restriction
2608   code << "\n" << tab << "// -- Output field basis action and restrictions\n";
2609   CeedScalar offset = 0;
2610 
2611   for (CeedInt i = 0; i < num_output_fields; i++) {
2612     bool        is_active = false;
2613     const char *field_name;
2614 
2615     {
2616       CeedVector vec;
2617 
2618       CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec));
2619       is_active = vec == CEED_VECTOR_ACTIVE;
2620       CeedCallBackend(CeedVectorDestroy(&vec));
2621     }
2622     if (!is_active) continue;
2623 
2624     CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
2625     code << tab << "// ---- Output field " << i << ": " << field_name << "\n";
2626 
2627     // ---- Restriction
2628     CeedInt field_size;
2629 
2630     code << tab << "WriteLVecStandard" << (is_all_tensor ? max_dim : 1) << "d_QFAssembly<total_size_out, field_size_out_" << i << ", "
2631          << (is_all_tensor ? "Q_1d" : "Q") << ">(data, num_elem, elem, input_offset + s, " << offset << ", r_q_out_" << i << ", values_array);\n";
2632     CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[i], &field_size));
2633     offset += field_size;
2634   }
2635 
2636   // -- Reset current active node and component
2637   code << "\n" << tab << "// Reset current active node and component to 0.0\n";
2638   if (is_all_tensor && (max_dim >= 3)) {
2639     code << tab << "for (CeedInt i = 0; i < Q_1d; i++) r_q_in[a][i + s * Q_1d] = 0.0;\n";
2640   } else {
2641     code << tab << "r_q_in[a][s] = 0.0;\n";
2642   }
2643 
2644   // -- End of loop over size of active field
2645   tab.pop();
2646   code << tab << "}\n";
2647   code << tab << "input_offset += field_size_in;\n";
2648 
2649   // -- End of loop over active field
2650   tab.pop();
2651   code << tab << "}\n";
2652 
2653   // Close loop and function
2654   tab.pop();
2655   code << tab << "}\n";
2656   tab.pop();
2657   code << tab << "}\n";
2658   code << tab << "// -----------------------------------------------------------------------------\n\n";
2659 
2660   // Compile
2661   {
2662     bool          is_compile_good = false;
2663     const CeedInt T_1d            = CeedIntMax(is_all_tensor ? Q_1d : Q, data->max_P_1d);
2664 
2665     data->thread_1d = T_1d;
2666     CeedCallBackend(CeedTryCompile_Cuda(ceed, code.str().c_str(), &is_compile_good, &data->module_assemble_qfunction, 1, "OP_T_1D", T_1d));
2667     if (is_compile_good) {
2668       *is_good_build = true;
2669       CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module_assemble_qfunction, operator_name.c_str(), &data->assemble_qfunction));
2670     } else {
2671       *is_good_build              = false;
2672       data->use_assembly_fallback = true;
2673     }
2674   }
2675   CeedCallBackend(CeedDestroy(&ceed));
2676   CeedCallBackend(CeedQFunctionDestroy(&qf));
2677   return CEED_ERROR_SUCCESS;
2678 }
2679 
2680 //------------------------------------------------------------------------------
2681