xref: /libCEED/backends/cuda-gen/ceed-cuda-gen-operator-build.cpp (revision b2e3f8ecbfa285d0f4ffde9b24c57cc13f0319fb)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #define CEED_DEBUG_COLOR 12
9 
10 #include <ceed.h>
11 #include <ceed/backend.h>
12 #include <ceed/jit-source/cuda/cuda-types.h>
13 #include <ceed/jit-tools.h>
14 #include <cuda_runtime.h>
15 
16 #include <iostream>
17 #include <sstream>
18 
19 #include "../cuda-ref/ceed-cuda-ref.h"
20 #include "../cuda-shared/ceed-cuda-shared.h"
21 #include "../cuda/ceed-cuda-common.h"
22 #include "../cuda/ceed-cuda-compile.h"
23 #include "ceed-cuda-gen.h"
24 
25 //------------------------------------------------------------------------------
26 // Build singe operator kernel
27 //------------------------------------------------------------------------------
28 extern "C" int CeedCudaGenOperatorBuild(CeedOperator op) {
29   using std::ostringstream;
30   using std::string;
31   bool is_setup_done;
32   CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done));
33   if (is_setup_done) return CEED_ERROR_SUCCESS;
34   Ceed ceed;
35   CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
36   CeedOperator_Cuda_gen *data;
37   CeedCallBackend(CeedOperatorGetData(op, &data));
38   CeedQFunction           qf;
39   CeedQFunction_Cuda_gen *qf_data;
40   CeedCallBackend(CeedOperatorGetQFunction(op, &qf));
41   CeedCallBackend(CeedQFunctionGetData(qf, &qf_data));
42   CeedSize lsize;
43   CeedInt  Q, P_1d = 0, Q_1d = 0, elem_size, num_input_fields, num_output_fields, num_comp, dim = 1;
44   CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q));
45   Q_1d = Q;
46   CeedOperatorField *op_input_fields, *op_output_fields;
47   CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields));
48   CeedQFunctionField *qf_input_fields, *qf_output_fields;
49   CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields));
50   CeedEvalMode              eval_mode;
51   CeedBasis                 basis;
52   CeedBasis_Cuda_shared    *basis_data;
53   CeedElemRestriction       Erestrict;
54   CeedElemRestriction_Cuda *restr_data;
55 
56   // TODO: put in a function?
57   // Check for restriction only identity operator
58   bool is_identity_qf;
59   CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf));
60   if (is_identity_qf) {
61     CeedEvalMode eval_mode_in, eval_mode_out;
62     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in));
63     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out));
64     if (eval_mode_in == CEED_EVAL_NONE && eval_mode_out == CEED_EVAL_NONE)
65       // LCOV_EXCL_START
66       return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement restriction only identity operators");
67     // LCOV_EXCL_STOP
68   }
69 
70   ostringstream code;
71 
72   // TODO: put in a function?
73   // Add atomicAdd function for old NVidia architectures
74   struct cudaDeviceProp prop;
75   Ceed_Cuda            *ceed_data;
76   CeedCallBackend(CeedGetData(ceed, &ceed_data));
77   CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id));
78   if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) {
79     char *atomic_add_path, *atomic_add_source;
80     CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-atomic-add-fallback.h", &atomic_add_path));
81     CeedDebug256(ceed, 2, "----- Loading Atomic Add Source -----\n");
82     CeedCallBackend(CeedLoadSourceToBuffer(ceed, atomic_add_path, &atomic_add_source));
83     code << atomic_add_source;
84     CeedCallBackend(CeedFree(&atomic_add_path));
85     CeedCallBackend(CeedFree(&atomic_add_source));
86   }
87 
88   // Load basis source files
89   // TODO: generalize to accept different device functions?
90   {
91     char *tensor_basis_kernel_path, *tensor_basis_kernel_source;
92     CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h", &tensor_basis_kernel_path));
93     CeedDebug256(ceed, 2, "----- Loading Tensor Basis Kernel Source -----\n");
94     CeedCallBackend(CeedLoadSourceToBuffer(ceed, tensor_basis_kernel_path, &tensor_basis_kernel_source));
95     code << tensor_basis_kernel_source;
96     CeedCallBackend(CeedFree(&tensor_basis_kernel_path));
97     CeedCallBackend(CeedFree(&tensor_basis_kernel_source));
98   }
99   {
100     char *cuda_gen_template_path, *cuda_gen_template_source;
101     CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-gen-templates.h", &cuda_gen_template_path));
102     CeedDebug256(ceed, 2, "----- Loading Cuda-Gen Template Source -----\n");
103     CeedCallBackend(CeedLoadSourceToBuffer(ceed, cuda_gen_template_path, &cuda_gen_template_source));
104     code << cuda_gen_template_source;
105     CeedCallBackend(CeedFree(&cuda_gen_template_path));
106     CeedCallBackend(CeedFree(&cuda_gen_template_source));
107   }
108 
109   // Get QFunction source and name
110   string q_function_source(qf_data->q_function_source);
111   string q_function_name(qf_data->q_function_name);
112   string operator_name;
113   operator_name = "CeedKernelCudaGenOperator_" + q_function_name;
114 
115   // Find dim, P_1d, Q_1d
116   data->max_P_1d = 0;
117   for (CeedInt i = 0; i < num_input_fields; i++) {
118     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
119     if (basis != CEED_BASIS_COLLOCATED) {
120       CeedCallBackend(CeedBasisGetData(basis, &basis_data));
121       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
122 
123       // Collect dim, P_1d, and Q_1d
124       CeedCallBackend(CeedBasisGetDimension(basis, &dim));
125       bool isTensor;
126       CeedCallBackend(CeedBasisIsTensor(basis, &isTensor));
127       if (isTensor) {
128         CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
129         CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
130         if (P_1d > data->max_P_1d) data->max_P_1d = P_1d;
131       } else {
132         // LCOV_EXCL_START
133         return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis");
134         // LCOV_EXCL_STOP
135       }
136     }
137   }
138   // Check output bases for Q_1d, dim as well
139   //   The only input basis might be CEED_BASIS_COLLOCATED
140   for (CeedInt i = 0; i < num_output_fields; i++) {
141     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
142 
143     if (basis != CEED_BASIS_COLLOCATED) {
144       CeedCallBackend(CeedBasisGetData(basis, &basis_data));
145       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
146 
147       // Collect Q_1d
148       CeedCallBackend(CeedBasisGetDimension(basis, &dim));
149       bool isTensor;
150       CeedCallBackend(CeedBasisIsTensor(basis, &isTensor));
151       if (isTensor) {
152         CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
153       } else {
154         // LCOV_EXCL_START
155         return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis");
156         // LCOV_EXCL_STOP
157       }
158     }
159   }
160   data->dim  = dim;
161   data->Q_1d = Q_1d;
162 
163   // Only use 3D collocated gradient parallelization strategy when gradient is computed
164   // TODO: put in a function?
165   bool use_collograd_parallelization = false;
166   if (dim == 3) {
167     bool was_grad_found = false;
168     for (CeedInt i = 0; i < num_input_fields; i++) {
169       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
170       if (eval_mode == CEED_EVAL_GRAD) {
171         CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
172         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
173         use_collograd_parallelization = !!basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true);
174         was_grad_found                = true;
175       }
176     }
177     for (CeedInt i = 0; i < num_output_fields; i++) {
178       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
179       if (eval_mode == CEED_EVAL_GRAD) {
180         CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
181         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
182         use_collograd_parallelization = !!basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true);
183         was_grad_found                = true;
184       }
185     }
186   }
187 
188   // Define CEED_Q_VLA
189   code << "\n#undef CEED_Q_VLA\n";
190   if (dim != 3 || use_collograd_parallelization) {
191     code << "#define CEED_Q_VLA 1\n\n";
192   } else {
193     code << "#define CEED_Q_VLA " << Q_1d << "\n\n";
194   }
195 
196   code << q_function_source;
197 
198   // Setup
199   code << "\n// -----------------------------------------------------------------------------\n";
200   code << "\nextern \"C\" __global__ void " << operator_name
201        << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar* W) {\n";
202   for (CeedInt i = 0; i < num_input_fields; i++) {
203     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
204     if (eval_mode != CEED_EVAL_WEIGHT) {  // Skip CEED_EVAL_WEIGHT
205       code << "  const CeedScalar* d_u_" << i << " = fields.inputs[" << i << "];\n";
206     }
207   }
208 
209   for (CeedInt i = 0; i < num_output_fields; i++) {
210     code << "  CeedScalar* d_v_" << i << " = fields.outputs[" << i << "];\n";
211   }
212 
213   code << "  const CeedInt dim = " << dim << ";\n";
214   code << "  const CeedInt Q_1d = " << Q_1d << ";\n";
215 
216   code << "  extern __shared__ CeedScalar slice[];\n";
217   // TODO put in a function? InitSharedData_Cuda?
218   code << "  SharedData_Cuda data;\n";
219   code << "  data.t_id_x = threadIdx.x;\n";
220   code << "  data.t_id_y = threadIdx.y;\n";
221   code << "  data.t_id_z = threadIdx.z;\n";
222   code << "  data.t_id  = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n";
223   code << "  data.slice = slice+data.t_id_z*T_1D" << (dim > 1 ? "*T_1D" : "") << ";\n";
224 
225   code << "\n  // -- Input field constants and basis data --\n";
226   // TODO: Put in a function?
227   // Initialize constants, and matrices B and G
228   for (CeedInt i = 0; i < num_input_fields; i++) {
229     code << "  // ---- Input field " << i << " ----\n";
230     // Get elem_size, eval_mode, num_comp
231     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &Erestrict));
232     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
233     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
234     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
235 
236     // Set field constants
237     if (eval_mode != CEED_EVAL_WEIGHT) {
238       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
239       if (basis != CEED_BASIS_COLLOCATED) {
240         CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
241         code << "  const CeedInt P_in_" << i << " = " << P_1d << ";\n";
242       } else {
243         code << "  const CeedInt P_in_" << i << " = " << Q_1d << ";\n";
244       }
245       code << "  const CeedInt num_comp_in_" << i << " = " << num_comp << ";\n";
246     }
247 
248     // Load basis data
249     code << "  // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
250     switch (eval_mode) {
251       case CEED_EVAL_NONE:
252         break;
253       case CEED_EVAL_INTERP:
254         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
255         data->B.inputs[i] = basis_data->d_interp_1d;
256         code << "  __shared__ CeedScalar s_B_in_" << i << "[" << P_1d * Q_1d << "];\n";
257         code << "  loadMatrix<P_in_" << i << ",Q_1d>(data, B.inputs[" << i << "], s_B_in_" << i << ");\n";
258         break;
259       case CEED_EVAL_GRAD:
260         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
261         data->B.inputs[i] = basis_data->d_interp_1d;
262         code << "  __shared__ CeedScalar s_B_in_" << i << "[" << P_1d * Q_1d << "];\n";
263         code << "  loadMatrix<P_in_" << i << ",Q_1d>(data, B.inputs[" << i << "], s_B_in_" << i << ");\n";
264         if (use_collograd_parallelization) {
265           data->G.inputs[i] = basis_data->d_collo_grad_1d;
266           code << "  __shared__ CeedScalar s_G_in_" << i << "[" << Q_1d * Q_1d << "];\n";
267           code << "  loadMatrix<Q_1d,Q_1d>(data, G.inputs[" << i << "], s_G_in_" << i << ");\n";
268         } else {
269           bool has_collo_grad = !!basis_data->d_collo_grad_1d;
270           data->G.inputs[i]   = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
271           code << "  __shared__ CeedScalar s_G_in_" << i << "[" << Q_1d * (has_collo_grad ? Q_1d : P_1d) << "];\n";
272           code << "  loadMatrix<" << (has_collo_grad ? "Q_1d" : ("P_in_" + std::to_string(i))) << ",Q_1d>(data, G.inputs[" << i << "], s_G_in_" << i
273                << ");\n";
274         }
275         break;
276       case CEED_EVAL_WEIGHT:
277         break;  // No action
278       case CEED_EVAL_DIV:
279         break;  // TODO: Not implemented
280       case CEED_EVAL_CURL:
281         break;  // TODO: Not implemented
282     }
283   }
284 
285   code << "\n  // -- Output field constants and basis data --\n";
286   for (CeedInt i = 0; i < num_output_fields; i++) {
287     code << "  // ---- Output field " << i << " ----\n";
288     // Get elem_size, eval_mode, num_comp
289     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &Erestrict));
290     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
291     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
292     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
293 
294     // Set field constants
295     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
296     if (basis != CEED_BASIS_COLLOCATED) {
297       CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
298       code << "  const CeedInt P_out_" << i << " = " << P_1d << ";\n";
299     } else {
300       code << "  const CeedInt P_out_" << i << " = " << Q_1d << ";\n";
301     }
302     code << "  const CeedInt num_comp_out_" << i << " = " << num_comp << ";\n";
303 
304     // Load basis data
305     code << "  // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
306     switch (eval_mode) {
307       case CEED_EVAL_NONE:
308         break;  // No action
309       case CEED_EVAL_INTERP:
310         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
311         data->B.outputs[i] = basis_data->d_interp_1d;
312         code << "  __shared__ CeedScalar s_B_out_" << i << "[" << P_1d * Q_1d << "];\n";
313         code << "  loadMatrix<P_out_" << i << ",Q_1d>(data, B.outputs[" << i << "], s_B_out_" << i << ");\n";
314         break;
315       case CEED_EVAL_GRAD:
316         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
317         data->B.outputs[i] = basis_data->d_interp_1d;
318         code << "  __shared__ CeedScalar s_B_out_" << i << "[" << P_1d * Q_1d << "];\n";
319         code << "  loadMatrix<P_out_" << i << ",Q_1d>(data, B.outputs[" << i << "], s_B_out_" << i << ");\n";
320         if (use_collograd_parallelization) {
321           data->G.outputs[i] = basis_data->d_collo_grad_1d;
322           code << "  __shared__ CeedScalar s_G_out_" << i << "[" << Q_1d * Q_1d << "];\n";
323           code << "  loadMatrix<Q_1d,Q_1d>(data, G.outputs[" << i << "], s_G_out_" << i << ");\n";
324         } else {
325           bool has_collo_grad = !!basis_data->d_collo_grad_1d;
326           data->G.outputs[i]  = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
327           code << "  __shared__ CeedScalar s_G_out_" << i << "[" << Q_1d * (has_collo_grad ? Q_1d : P_1d) << "];\n";
328           code << "  loadMatrix<" << (has_collo_grad ? "Q_1d" : ("P_out_" + std::to_string(i))) << ",Q_1d>(data, G.outputs[" << i << "], s_G_out_"
329                << i << ");\n";
330         }
331         break;
332       // LCOV_EXCL_START
333       case CEED_EVAL_WEIGHT: {
334         Ceed ceed;
335         CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
336         return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode");
337         break;  // Should not occur
338       }
339       case CEED_EVAL_DIV:
340         break;  // TODO: Not implemented
341       case CEED_EVAL_CURL:
342         break;  // TODO: Not implemented
343                 // LCOV_EXCL_STOP
344     }
345   }
346   code << "\n  // -- Element loop --\n";
347   code << "  __syncthreads();\n";
348   code << "  for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n";
349   // Input basis apply if needed
350   // Generate the correct eval mode code for each input
351   code << "    // -- Input field restrictions and basis actions --\n";
352   for (CeedInt i = 0; i < num_input_fields; i++) {
353     code << "    // ---- Input field " << i << " ----\n";
354     // Get elem_size, eval_mode, num_comp
355     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &Erestrict));
356     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
357     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
358     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
359 
360     // TODO: put in a function?
361     // Restriction
362     if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_collograd_parallelization)) {
363       code << "    CeedScalar r_u_" << i << "[num_comp_in_" << i << "*P_in_" << i << "];\n";
364 
365       bool is_strided;
366       CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &is_strided));
367       if (!is_strided) {
368         CeedCallBackend(CeedElemRestrictionGetLVectorSize(Erestrict, &lsize));
369         code << "    const CeedInt lsize_in_" << i << " = " << lsize << ";\n";
370         CeedInt comp_stride;
371         CeedCallBackend(CeedElemRestrictionGetCompStride(Erestrict, &comp_stride));
372         code << "    // CompStride: " << comp_stride << "\n";
373         CeedCallBackend(CeedElemRestrictionGetData(Erestrict, &restr_data));
374         data->indices.inputs[i] = restr_data->d_ind;
375         code << "    readDofsOffset" << dim << "d<num_comp_in_" << i << ", " << comp_stride << ", P_in_" << i << ">(data, lsize_in_" << i
376              << ", elem, indices.inputs[" << i << "], d_u_" << i << ", r_u_" << i << ");\n";
377       } else {
378         bool backendstrides;
379         CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &backendstrides));
380         CeedInt num_elem;
381         CeedCallBackend(CeedElemRestrictionGetNumElements(Erestrict, &num_elem));
382         CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
383         if (!backendstrides) {
384           CeedCallBackend(CeedElemRestrictionGetStrides(Erestrict, &strides));
385         }
386         code << "    // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
387         code << "    readDofsStrided" << dim << "d<num_comp_in_" << i << ",P_in_" << i << "," << strides[0] << "," << strides[1] << "," << strides[2]
388              << ">(data, elem, d_u_" << i << ", r_u_" << i << ");\n";
389       }
390     }
391 
392     // TODO: put in a function?
393     // Basis action
394     code << "    // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
395     switch (eval_mode) {
396       case CEED_EVAL_NONE:
397         if (!use_collograd_parallelization) {
398           code << "    CeedScalar* r_t_" << i << " = r_u_" << i << ";\n";
399         }
400         break;
401       case CEED_EVAL_INTERP:
402         code << "    CeedScalar r_t_" << i << "[num_comp_in_" << i << "*Q_1d];\n";
403         code << "    Interp" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_in_" << i << ",P_in_" << i << ",Q_1d>(data, r_u_" << i << ", s_B_in_"
404              << i << ", r_t_" << i << ");\n";
405         break;
406       case CEED_EVAL_GRAD:
407         if (use_collograd_parallelization) {
408           code << "    CeedScalar r_t_" << i << "[num_comp_in_" << i << "*Q_1d];\n";
409           code << "    Interp" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_in_" << i << ",P_in_" << i << ",Q_1d>(data, r_u_" << i
410                << ", s_B_in_" << i << ", r_t_" << i << ");\n";
411         } else {
412           CeedInt P_1d;
413           CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
414           CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
415           code << "    CeedScalar r_t_" << i << "[num_comp_in_" << i << "*dim*Q_1d];\n";
416           code << "    Grad" << (dim > 1 ? "Tensor" : "") << (dim == 3 && Q_1d >= P_1d ? "Collocated" : "") << dim << "d<num_comp_in_" << i
417                << ",P_in_" << i << ",Q_1d>(data, r_u_" << i << ", s_B_in_" << i << ", s_G_in_" << i << ", r_t_" << i << ");\n";
418         }
419         break;
420       case CEED_EVAL_WEIGHT:
421         code << "    CeedScalar r_t_" << i << "[Q_1d];\n";
422         CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
423         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
424         data->W = basis_data->d_q_weight_1d;
425         code << "    Weight" << (dim > 1 ? "Tensor" : "") << dim << "d<Q_1d>(data, W, r_t_" << i << ");\n";
426         break;  // No action
427       case CEED_EVAL_DIV:
428         break;  // TODO: Not implemented
429       case CEED_EVAL_CURL:
430         break;  // TODO: Not implemented
431     }
432   }
433 
434   // TODO: put in a function + separate collograd logic
435   // Q function
436   code << "\n    // -- Output field setup --\n";
437   for (CeedInt i = 0; i < num_output_fields; i++) {
438     code << "\n    // ---- Output field " << i << " ----\n";
439     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
440     if (eval_mode == CEED_EVAL_GRAD) {
441       if (use_collograd_parallelization) {
442         // Accumulator for gradient slices
443         code << "    CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*Q_1d];\n";
444         code << "    for (CeedInt i = 0; i < num_comp_out_" << i << "; i++) {\n";
445         code << "      for (CeedInt j = 0; j < Q_1d; ++j) {\n";
446         code << "        r_tt_" << i << "[j + i*Q_1d] = 0.0;\n";
447         code << "      }\n";
448         code << "    }\n";
449       } else {
450         code << "    CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*dim*Q_1d];\n";
451       }
452     }
453     if (eval_mode == CEED_EVAL_NONE || eval_mode == CEED_EVAL_INTERP) {
454       code << "    CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*Q_1d];\n";
455     }
456   }
457   // We treat quadrature points per slice in 3d to save registers
458   if (use_collograd_parallelization) {
459     code << "\n    // Note: Using planes of 3D elements\n";
460     code << "#pragma unroll\n";
461     code << "    for (CeedInt q = 0; q < Q_1d; q++) {\n";
462     code << "      // -- Input fields --\n";
463     for (CeedInt i = 0; i < num_input_fields; i++) {
464       code << "      // ---- Input field " << i << " ----\n";
465       // Get elem_size, eval_mode, num_comp
466       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
467       // Basis action
468       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
469       switch (eval_mode) {
470         case CEED_EVAL_NONE:
471           code << "      CeedScalar r_q_" << i << "[num_comp_in_" << i << "];\n";
472 
473           bool is_strided;
474           CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &Erestrict));
475           CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &is_strided));
476           if (!is_strided) {
477             CeedCallBackend(CeedElemRestrictionGetLVectorSize(Erestrict, &lsize));
478             code << "      const CeedInt lsize_in_" << i << " = " << lsize << ";\n";
479             CeedInt comp_stride;
480             CeedCallBackend(CeedElemRestrictionGetCompStride(Erestrict, &comp_stride));
481             code << "      // CompStride: " << comp_stride << "\n";
482             CeedCallBackend(CeedElemRestrictionGetData(Erestrict, &restr_data));
483             data->indices.inputs[i] = restr_data->d_ind;
484             code << "      readSliceQuadsOffset"
485                  << "3d<num_comp_in_" << i << ", " << comp_stride << ", Q_1d>(data, lsize_in_" << i << ", elem, q, indices.inputs[" << i << "], d_u_"
486                  << i << ", r_q_" << i << ");\n";
487           } else {
488             CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
489             bool backendstrides;
490             CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &backendstrides));
491             CeedInt num_elem;
492             CeedCallBackend(CeedElemRestrictionGetNumElements(Erestrict, &num_elem));
493             CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
494             if (!backendstrides) {
495               CeedCallBackend(CeedElemRestrictionGetStrides(Erestrict, &strides));
496             }
497             code << "      // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
498             code << "      readSliceQuadsStrided"
499                  << "3d<num_comp_in_" << i
500                  << ",Q_1d"
501                     ","
502                  << strides[0] << "," << strides[1] << "," << strides[2] << ">(data, elem, q, d_u_" << i << ", r_q_" << i << ");\n";
503           }
504           break;
505         case CEED_EVAL_INTERP:
506           code << "      CeedScalar r_q_" << i << "[num_comp_in_" << i << "];\n";
507           code << "      for (CeedInt j = 0; j < num_comp_in_" << i << " ; ++j) {\n";
508           code << "        r_q_" << i << "[j] = r_t_" << i << "[q + j*Q_1d];\n";
509           code << "      }\n";
510           break;
511         case CEED_EVAL_GRAD:
512           code << "      CeedScalar r_q_" << i << "[num_comp_in_" << i << "*dim];\n";
513           code << "      gradCollo3d<num_comp_in_" << i << ",Q_1d>(data, q, r_t_" << i << ", s_G_in_" << i << ", r_q_" << i << ");\n";
514           break;
515         case CEED_EVAL_WEIGHT:
516           code << "      CeedScalar r_q_" << i << "[1];\n";
517           code << "      r_q_" << i << "[0] = r_t_" << i << "[q];\n";
518           break;  // No action
519         case CEED_EVAL_DIV:
520           break;  // TODO: Not implemented
521         case CEED_EVAL_CURL:
522           break;  // TODO: Not implemented
523       }
524     }
525     code << "\n      // -- Output fields --\n";
526     for (CeedInt i = 0; i < num_output_fields; i++) {
527       code << "      // ---- Output field " << i << " ----\n";
528       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
529       // Basis action
530       switch (eval_mode) {
531         case CEED_EVAL_NONE:
532           code << "      CeedScalar r_qq_" << i << "[num_comp_out_" << i << "];\n";
533           break;  // No action
534         case CEED_EVAL_INTERP:
535           code << "      CeedScalar r_qq_" << i << "[num_comp_out_" << i << "];\n";
536           break;
537         case CEED_EVAL_GRAD:
538           code << "      CeedScalar r_qq_" << i << "[num_comp_out_" << i << "*dim];\n";
539           break;
540         case CEED_EVAL_WEIGHT:
541           break;  // Should not occur
542         case CEED_EVAL_DIV:
543           break;  // TODO: Not implemented
544         case CEED_EVAL_CURL:
545           break;  // TODO: Not implemented
546       }
547     }
548   } else {
549     code << "\n      // Note: Using full elements\n";
550     code << "      // -- Input fields --\n";
551     for (CeedInt i = 0; i < num_input_fields; i++) {
552       code << "      // ---- Input field " << i << " ----\n";
553       code << "      CeedScalar* r_q_" << i << " = r_t_" << i << ";\n";
554     }
555     code << "      // -- Output fields --\n";
556     for (CeedInt i = 0; i < num_output_fields; i++) {
557       code << "      // ---- Output field " << i << " ----\n";
558       code << "      CeedScalar* r_qq_" << i << " = r_tt_" << i << ";\n";
559     }
560   }
561   code << "\n      // -- QFunction Inputs and outputs --\n";
562   code << "      CeedScalar* in[" << num_input_fields << "];\n";
563   for (CeedInt i = 0; i < num_input_fields; i++) {
564     code << "      // ---- Input field " << i << " ----\n";
565     code << "      in[" << i << "] = r_q_" << i << ";\n";
566   }
567   code << "      CeedScalar* out[" << num_output_fields << "];\n";
568   for (CeedInt i = 0; i < num_output_fields; i++) {
569     code << "      // ---- Output field " << i << " ----\n";
570     code << "      out[" << i << "] = r_qq_" << i << ";\n";
571   }
572   code << "\n      // -- Apply QFunction --\n";
573   code << "      " << q_function_name << "(ctx, ";
574   if (dim != 3 || use_collograd_parallelization) {
575     code << "1";
576   } else {
577     code << "Q_1d";
578   }
579   code << ", in, out);\n";
580   if (use_collograd_parallelization) {
581     code << "      // -- Output fields --\n";
582     for (CeedInt i = 0; i < num_output_fields; i++) {
583       code << "      // ---- Output field " << i << " ----\n";
584       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
585       // Basis action
586       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
587       switch (eval_mode) {
588         case CEED_EVAL_NONE:
589           code << "      for (CeedInt j = 0; j < num_comp_out_" << i << " ; ++j) {\n";
590           code << "        r_tt_" << i << "[q + j*Q_1d] = r_qq_" << i << "[j];\n";
591           code << "      }\n";
592           break;  // No action
593         case CEED_EVAL_INTERP:
594           code << "      for (CeedInt j = 0; j < num_comp_out_" << i << " ; ++j) {\n";
595           code << "        r_tt_" << i << "[q + j*Q_1d] = r_qq_" << i << "[j];\n";
596           code << "      }\n";
597           break;
598         case CEED_EVAL_GRAD:
599           code << "      gradColloTranspose3d<num_comp_out_" << i << ",Q_1d>(data, q, r_qq_" << i << ", s_G_out_" << i << ", r_tt_" << i << ");\n";
600           break;
601         case CEED_EVAL_WEIGHT:
602           break;  // Should not occur
603         case CEED_EVAL_DIV:
604           break;  // TODO: Not implemented
605         case CEED_EVAL_CURL:
606           break;  // TODO: Not implemented
607       }
608     }
609     code << "    }\n";
610   }
611 
612   // Output basis apply if needed
613   // Generate the correct eval mode code for each output
614   code << "\n    // -- Output field basis action and restrictions --\n";
615   for (CeedInt i = 0; i < num_output_fields; i++) {
616     code << "    // ---- Output field " << i << " ----\n";
617     // Get elem_size, eval_mode, num_comp
618     CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &Erestrict));
619     CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elem_size));
620     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
621     CeedCallBackend(CeedElemRestrictionGetNumComponents(Erestrict, &num_comp));
622     // TODO put in a function
623     // Basis action
624     code << "    // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
625     switch (eval_mode) {
626       case CEED_EVAL_NONE:
627         code << "    CeedScalar* r_v_" << i << " = r_tt_" << i << ";\n";
628         break;  // No action
629       case CEED_EVAL_INTERP:
630         code << "    CeedScalar r_v_" << i << "[num_comp_out_" << i << "*P_out_" << i << "];\n";
631         code << "    InterpTranspose" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_out_" << i << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i
632              << ", s_B_out_" << i << ", r_v_" << i << ");\n";
633         break;
634       case CEED_EVAL_GRAD:
635         code << "    CeedScalar r_v_" << i << "[num_comp_out_" << i << "*P_out_" << i << "];\n";
636         if (use_collograd_parallelization) {
637           code << "    InterpTranspose" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_out_" << i << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i
638                << ", s_B_out_" << i << ", r_v_" << i << ");\n";
639         } else {
640           CeedInt P_1d;
641           CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
642           CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
643           code << "    GradTranspose" << (dim > 1 ? "Tensor" : "") << (dim == 3 && Q_1d >= P_1d ? "Collocated" : "") << dim << "d<num_comp_out_" << i
644                << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i << ", s_B_out_" << i << ", s_G_out_" << i << ", r_v_" << i << ");\n";
645         }
646         break;
647       // LCOV_EXCL_START
648       case CEED_EVAL_WEIGHT: {
649         Ceed ceed;
650         CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
651         return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode");
652         break;  // Should not occur
653       }
654       case CEED_EVAL_DIV:
655         break;  // TODO: Not implemented
656       case CEED_EVAL_CURL:
657         break;  // TODO: Not implemented
658                 // LCOV_EXCL_STOP
659     }
660     // TODO put in a function
661     // Restriction
662     bool is_strided;
663     CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &is_strided));
664     if (!is_strided) {
665       CeedCallBackend(CeedElemRestrictionGetLVectorSize(Erestrict, &lsize));
666       code << "    const CeedInt lsize_out_" << i << " = " << lsize << ";\n";
667       CeedInt comp_stride;
668       CeedCallBackend(CeedElemRestrictionGetCompStride(Erestrict, &comp_stride));
669       code << "    // CompStride: " << comp_stride << "\n";
670       CeedCallBackend(CeedElemRestrictionGetData(Erestrict, &restr_data));
671       data->indices.outputs[i] = restr_data->d_ind;
672       code << "    writeDofsOffset" << dim << "d<num_comp_out_" << i << ", " << comp_stride << ", P_out_" << i << ">(data, lsize_out_" << i
673            << ", elem, indices.outputs[" << i << "], r_v_" << i << ", d_v_" << i << ");\n";
674     } else {
675       bool has_backend_strides;
676       CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &has_backend_strides));
677       CeedInt num_elem;
678       CeedCallBackend(CeedElemRestrictionGetNumElements(Erestrict, &num_elem));
679       CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
680       if (!has_backend_strides) {
681         CeedCallBackend(CeedElemRestrictionGetStrides(Erestrict, &strides));
682       }
683       code << "    // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
684       code << "    writeDofsStrided" << dim << "d<num_comp_out_" << i << ",P_out_" << i << "," << strides[0] << "," << strides[1] << "," << strides[2]
685            << ">(data, elem, r_v_" << i << ", d_v_" << i << ");\n";
686     }
687   }
688 
689   code << "  }\n";
690   code << "}\n";
691   code << "// -----------------------------------------------------------------------------\n\n";
692 
693   // View kernel for debugging
694   CeedDebug256(ceed, 2, "Generated Operator Kernels:\n");
695   CeedDebug(ceed, code.str().c_str());
696 
697   CeedCallBackend(CeedCompileCuda(ceed, code.str().c_str(), &data->module, 1, "T_1D", CeedIntMax(Q_1d, data->max_P_1d)));
698   CeedCallBackend(CeedGetKernelCuda(ceed, data->module, operator_name.c_str(), &data->op));
699 
700   CeedCallBackend(CeedOperatorSetSetupDone(op));
701   return CEED_ERROR_SUCCESS;
702 }
703 //------------------------------------------------------------------------------
704