xref: /libCEED/backends/hip-gen/ceed-hip-gen-operator-build.cpp (revision 6de40545466b5fa4cbcd17feda0352f2a4b1d743)
1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #define CEED_DEBUG_COLOR 12
9 
10 #include <ceed.h>
11 #include <ceed/backend.h>
12 #include <ceed/jit-tools.h>
13 
14 #include <iostream>
15 #include <sstream>
16 #include <string>
17 
18 #include "../hip-ref/ceed-hip-ref.h"
19 #include "../hip-shared/ceed-hip-shared.h"
20 #include "../hip/ceed-hip-common.h"
21 #include "../hip/ceed-hip-compile.h"
22 #include "ceed-hip-gen.h"
23 
24 struct FieldReuse_Hip {
25   CeedInt      index;
26   bool         is_input;
27   CeedEvalMode eval_mode;
28 };
29 
30 //------------------------------------------------------------------------------
31 // Calculate the block size used for launching the operator kernel
32 //------------------------------------------------------------------------------
33 extern "C" int BlockGridCalculate_Hip_gen(const CeedInt dim, const CeedInt num_elem, const CeedInt P_1d, const CeedInt Q_1d, CeedInt *block_sizes) {
34   const CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);
35   if (dim == 1) {
36     CeedInt elems_per_block = 64 * thread_1d > 256 ? 256 / thread_1d : 64;
37 
38     elems_per_block = elems_per_block > 0 ? elems_per_block : 1;
39     block_sizes[0]  = thread_1d;
40     block_sizes[1]  = 1;
41     block_sizes[2]  = elems_per_block;
42   } else if (dim == 2) {
43     const CeedInt elems_per_block = thread_1d < 4 ? 16 : 2;
44 
45     block_sizes[0] = thread_1d;
46     block_sizes[1] = thread_1d;
47     block_sizes[2] = elems_per_block;
48   } else if (dim == 3) {
49     const CeedInt elems_per_block = thread_1d < 6 ? 4 : (thread_1d < 8 ? 2 : 1);
50 
51     block_sizes[0] = thread_1d;
52     block_sizes[1] = thread_1d;
53     block_sizes[2] = elems_per_block;
54   }
55   return CEED_ERROR_SUCCESS;
56 }
57 
58 //------------------------------------------------------------------------------
59 // Determine type of operator
60 //------------------------------------------------------------------------------
61 static int CeedOperatorBuildKernelData_Hip_gen(Ceed ceed, CeedInt num_input_fields, CeedOperatorField *op_input_fields,
62                                                CeedQFunctionField *qf_input_fields, CeedInt num_output_fields, CeedOperatorField *op_output_fields,
63                                                CeedQFunctionField *qf_output_fields, CeedInt *max_P, CeedInt *max_P_1d, CeedInt *Q, CeedInt *Q_1d,
64                                                CeedInt *max_dim, bool *is_all_tensor, bool *use_3d_slices) {
65   // Check if all are tensor
66   *is_all_tensor = true;
67   for (CeedInt i = 0; i < num_input_fields; i++) {
68     CeedBasis basis;
69 
70     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
71     if (basis != CEED_BASIS_NONE) {
72       bool is_field_tensor;
73 
74       CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor));
75       *is_all_tensor = *is_all_tensor && is_field_tensor;
76     }
77     CeedCallBackend(CeedBasisDestroy(&basis));
78   }
79   for (CeedInt i = 0; i < num_output_fields; i++) {
80     CeedBasis basis;
81 
82     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
83     if (basis != CEED_BASIS_NONE) {
84       bool is_field_tensor;
85 
86       CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor));
87       *is_all_tensor = *is_all_tensor && is_field_tensor;
88     }
89     CeedCallBackend(CeedBasisDestroy(&basis));
90   }
91 
92   // Find max_P, max_P_1d, Q, and Q_1d
93   bool is_all_3d = true;
94 
95   *max_P    = 0;
96   *max_P_1d = 0;
97   *Q        = 0;
98   *Q_1d     = 0;
99   for (CeedInt i = 0; i < num_input_fields; i++) {
100     CeedBasis basis;
101 
102     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
103     if (basis != CEED_BASIS_NONE) {
104       bool    is_field_tensor;
105       CeedInt field_dim = 0, field_P = 0, field_P_1d = 0, field_Q = 0, field_Q_1d = 0;
106 
107       // Check if 3D
108       CeedCallBackend(CeedBasisGetDimension(basis, &field_dim));
109       is_all_3d = is_all_3d && (field_dim == 3);
110       *max_dim  = CeedIntMax(*max_dim, field_dim);
111 
112       // Collect P, P_1d, Q, and Q_1d
113       CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P));
114       *max_P = CeedIntMax(*max_P, field_P);
115       CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor));
116       if (is_field_tensor) {
117         CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d));
118         *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d);
119       }
120       CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q));
121       CeedCheck(*Q == 0 || field_Q == *Q, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible");
122       *Q = field_Q;
123       if (is_field_tensor) {
124         CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d));
125         CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible");
126         *Q_1d = field_Q_1d;
127       }
128     }
129     CeedCallBackend(CeedBasisDestroy(&basis));
130   }
131   for (CeedInt i = 0; i < num_output_fields; i++) {
132     CeedBasis basis;
133 
134     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
135     if (basis != CEED_BASIS_NONE) {
136       bool    is_field_tensor;
137       CeedInt field_dim = 0, field_P = 0, field_P_1d = 0, field_Q = 0, field_Q_1d = 0;
138 
139       // Check if 3D
140       CeedCallBackend(CeedBasisGetDimension(basis, &field_dim));
141       is_all_3d = is_all_3d && (field_dim == 3);
142       *max_dim  = CeedIntMax(*max_dim, field_dim);
143 
144       // Collect P, P_1d, Q, and Q_1d
145       CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P));
146       *max_P = CeedIntMax(*max_P, field_P);
147       CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor));
148       if (is_field_tensor) {
149         CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d));
150         *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d);
151       }
152       CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q));
153       CeedCheck(*Q == 0 || field_Q == *Q, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible");
154       *Q = field_Q;
155       if (is_field_tensor) {
156         CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d));
157         CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible");
158         *Q_1d = field_Q_1d;
159       }
160     }
161     CeedCallBackend(CeedBasisDestroy(&basis));
162   }
163 
164   // Only use 3D collocated gradient parallelization strategy when gradient is computed
165   *use_3d_slices = false;
166   if (is_all_3d && *is_all_tensor) {
167     bool was_grad_found = false;
168 
169     for (CeedInt i = 0; i < num_input_fields; i++) {
170       CeedEvalMode eval_mode;
171 
172       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
173       if (eval_mode == CEED_EVAL_GRAD) {
174         CeedBasis_Hip_shared *basis_data;
175         CeedBasis             basis;
176 
177         CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
178         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
179         *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true);
180         was_grad_found = true;
181         CeedCallBackend(CeedBasisDestroy(&basis));
182       }
183     }
184     for (CeedInt i = 0; i < num_output_fields; i++) {
185       CeedEvalMode eval_mode;
186 
187       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
188       if (eval_mode == CEED_EVAL_GRAD) {
189         CeedBasis_Hip_shared *basis_data;
190         CeedBasis             basis;
191 
192         CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
193         CeedCallBackend(CeedBasisGetData(basis, &basis_data));
194         *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true);
195         was_grad_found = true;
196         CeedCallBackend(CeedBasisDestroy(&basis));
197       }
198     }
199   }
200   return CEED_ERROR_SUCCESS;
201 }
202 
203 //------------------------------------------------------------------------------
204 // Setup fields
205 //------------------------------------------------------------------------------
206 static int CeedOperatorBuildKernelFieldData_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedOperatorField op_field,
207                                                     CeedQFunctionField qf_field, FieldReuse_Hip field_reuse, CeedInt max_dim, CeedInt Q, CeedInt Q_1d,
208                                                     bool is_input, bool is_all_tensor, bool is_at_points, bool use_3d_slices) {
209   bool      is_tensor = true;
210   CeedBasis basis;
211   CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis));
212   if (basis != CEED_BASIS_NONE) CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
213 
214   const char           *field_name;
215   std::string           var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i);
216   std::string           P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q";
217   std::string           option_name = (is_input ? "inputs" : "outputs");
218   CeedEvalMode          eval_mode   = CEED_EVAL_NONE;
219   CeedInt               elem_size = 0, num_comp = 0, dim = max_dim, P_1d = 0;
220   CeedElemRestriction   elem_rstr;
221   CeedBasis_Hip_shared *basis_data;
222 
223   // Field reuse info
224   bool use_previous_field = field_reuse.index != -1;
225 
226   CeedCallBackend(CeedOperatorFieldGetName(op_field, &field_name));
227   code << "  // -- " << (is_input ? "Input" : "Output") << " field " << i << ": " << field_name << "\n";
228 
229   // Get field data
230   CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr));
231   if (elem_rstr != CEED_ELEMRESTRICTION_NONE) {
232     CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size));
233     CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
234   }
235   CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
236   if (basis != CEED_BASIS_NONE) {
237     CeedCallBackend(CeedBasisGetData(basis, &basis_data));
238     CeedCallBackend(CeedBasisGetDimension(basis, &dim));
239     if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
240     else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d));
241   }
242   CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode));
243 
244   // Set field constants
245   code << "  const CeedInt dim" << var_suffix << " = " << dim << ";\n";
246   if (is_tensor && !is_all_tensor) {
247     CeedInt P = 0;
248 
249     CeedCallBackend(CeedBasisGetNumNodes(basis, &P));
250     code << "  const CeedInt P" << var_suffix << " = " << (basis == CEED_BASIS_NONE ? Q : P) << ";\n";
251   }
252   code << "  const CeedInt " << P_name << " = " << (basis == CEED_BASIS_NONE ? Q_1d : P_1d) << ";\n";
253   if (eval_mode != CEED_EVAL_WEIGHT) {
254     code << "  const CeedInt num_comp" << var_suffix << " = " << num_comp << ";\n";
255   }
256 
257   // Load basis data
258   code << "  // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
259   switch (eval_mode) {
260     case CEED_EVAL_NONE:
261       break;
262     case CEED_EVAL_INTERP:
263       if (is_at_points) {
264         // AtPoints
265         if (!basis_data->d_chebyshev_interp_1d) {
266           CeedSize    interp_bytes;
267           CeedScalar *chebyshev_interp_1d;
268 
269           interp_bytes = P_1d * Q_1d * sizeof(CeedScalar);
270           CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d));
271           CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d));
272           CeedCallHip(CeedBasisReturnCeed(basis), hipMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes));
273           CeedCallHip(CeedBasisReturnCeed(basis),
274                       hipMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, hipMemcpyHostToDevice));
275           CeedCallBackend(CeedFree(&chebyshev_interp_1d));
276         }
277         if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d;
278         else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d;
279       } else {
280         // Standard quadrature
281         if (is_input) data->B.inputs[i] = basis_data->d_interp_1d;
282         else data->B.outputs[i] = basis_data->d_interp_1d;
283       }
284       if (use_previous_field) {
285         std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
286 
287         code << "  CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n";
288       } else {
289         code << "  __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n";
290         code << "  LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n";
291       }
292       break;
293     case CEED_EVAL_GRAD:
294       if (is_at_points) {
295         // AtPoints
296         if (!basis_data->d_chebyshev_interp_1d) {
297           CeedSize    interp_bytes;
298           CeedScalar *chebyshev_interp_1d;
299 
300           interp_bytes = P_1d * Q_1d * sizeof(CeedScalar);
301           CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d));
302           CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d));
303           CeedCallHip(CeedBasisReturnCeed(basis), hipMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes));
304           CeedCallHip(CeedBasisReturnCeed(basis),
305                       hipMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, hipMemcpyHostToDevice));
306           CeedCallBackend(CeedFree(&chebyshev_interp_1d));
307         }
308         if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d;
309         else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d;
310       } else {
311         // Standard quadrature
312         if (is_input) data->B.inputs[i] = basis_data->d_interp_1d;
313         else data->B.outputs[i] = basis_data->d_interp_1d;
314       }
315       if (is_tensor) {
316         if (use_previous_field) {
317           std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
318 
319           code << "  CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n";
320         } else {
321           code << "  __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n";
322           code << "  LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n";
323         }
324       }
325       if (is_at_points) break;  // No G mat for AtPoints
326       if (use_3d_slices) {
327         if (is_input) data->G.inputs[i] = basis_data->d_collo_grad_1d;
328         else data->G.outputs[i] = basis_data->d_collo_grad_1d;
329         if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) {
330           std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
331 
332           code << "  CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n";
333         } else {
334           code << "  __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n";
335           code << "  LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n";
336         }
337       } else {
338         bool has_collo_grad = basis_data->d_collo_grad_1d;
339 
340         if (is_input) data->G.inputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
341         else data->G.outputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d;
342         if (has_collo_grad) {
343           if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) {
344             std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
345 
346             code << "  CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n";
347           } else {
348             code << "  __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n";
349             code << "  LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n";
350           }
351         } else {
352           if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) {
353             std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index));
354 
355             code << "  CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n";
356           } else {
357             code << "  __shared__ CeedScalar s_G" << var_suffix << "[" << P_name << "*" << Q_name << (is_tensor ? "" : "*dim")
358                  << (is_tensor ? "" : var_suffix) << "];\n";
359             code << "  LoadMatrix<" << P_name << ", " << Q_name << (is_tensor ? "" : "*dim") << (is_tensor ? "" : var_suffix) << ">(data, G."
360                  << option_name << "[" << i << "], s_G" << var_suffix << ");\n";
361           }
362         }
363       }
364       break;
365     case CEED_EVAL_WEIGHT:
366       break;  // No action
367       // LCOV_EXCL_START
368     case CEED_EVAL_DIV:
369     case CEED_EVAL_CURL:
370       break;  // TODO: Not implemented
371               // LCOV_EXCL_STOP
372   }
373   CeedCallBackend(CeedBasisDestroy(&basis));
374   return CEED_ERROR_SUCCESS;
375 }
376 
377 //------------------------------------------------------------------------------
378 // Restriction
379 //------------------------------------------------------------------------------
380 static int CeedOperatorBuildKernelRestriction_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedInt field_input_buffer[],
381                                                       CeedOperatorField op_field, CeedQFunctionField qf_field, CeedInt max_dim, CeedInt Q_1d,
382                                                       bool is_input, bool is_all_tensor, bool is_at_points, bool use_3d_slices) {
383   std::string              var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i);
384   std::string              P_name     = (is_all_tensor ? "P_1d" : "P") + var_suffix;
385   CeedEvalMode             eval_mode  = CEED_EVAL_NONE;
386   CeedInt                  elem_size = 0, num_comp = 0;
387   CeedSize                 l_size;
388   CeedRestrictionType      rstr_type = CEED_RESTRICTION_STANDARD;
389   CeedElemRestriction_Hip *rstr_data;
390   CeedElemRestriction      elem_rstr;
391 
392   // Get field data
393   CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr));
394   if (elem_rstr != CEED_ELEMRESTRICTION_NONE) {
395     CeedCallBackend(CeedElemRestrictionGetType(elem_rstr, &rstr_type));
396     CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size));
397     CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
398     CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data));
399   }
400   CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode));
401 
402   // Restriction
403   if (is_input) {
404     // Input
405     if (field_input_buffer[i] != i) {
406       std::string buffer_name = "r_e_in_" + std::to_string(field_input_buffer[i]);
407 
408       // Restriction was already done for previous input
409       code << "    CeedScalar *r_e" << var_suffix << " = " << buffer_name << ";\n";
410     } else if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_3d_slices && is_at_points)) {
411       if (eval_mode == CEED_EVAL_NONE && rstr_type != CEED_RESTRICTION_POINTS) {
412         // No basis action, so r_e_in_* in also r_q_in_* and needs to be allocated
413         code << "    CeedScalar r_e" << var_suffix << "[num_comp" << var_suffix << "*" << P_name << "];\n";
414       } else if (rstr_type != CEED_RESTRICTION_POINTS) {
415         // Otherwise we're using the scratch space
416         code << "    CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n";
417       }
418       switch (rstr_type) {
419         case CEED_RESTRICTION_STANDARD: {
420           CeedInt comp_stride;
421 
422           CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size));
423           code << "    const CeedInt l_size" << var_suffix << " = " << l_size << ";\n";
424           CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
425           code << "    // CompStride: " << comp_stride << "\n";
426           data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets;
427           code << "    ReadLVecStandard" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name
428                << ">(data, l_size" << var_suffix << ", elem, indices.inputs[" << i << "], d" << var_suffix << ", r_e" << var_suffix << ");\n";
429           break;
430         }
431         case CEED_RESTRICTION_STRIDED: {
432           bool    has_backend_strides;
433           CeedInt num_elem;
434 
435           CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides));
436           CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem));
437           CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
438 
439           if (!has_backend_strides) {
440             CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides));
441           }
442           code << "    // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
443           code << "    ReadLVecStrided" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", "
444                << strides[1] << ", " << strides[2] << ">(data, elem, d" << var_suffix << ", r_e" << var_suffix << ");\n";
445           break;
446         }
447         case CEED_RESTRICTION_POINTS: {
448           CeedInt comp_stride;
449 
450           CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
451           code << "    const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n";
452           data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets;
453           break;
454         }
455         // LCOV_EXCL_START
456         case CEED_RESTRICTION_ORIENTED:
457         case CEED_RESTRICTION_CURL_ORIENTED:
458           break;  // TODO: Not implemented
459                   // LCOV_EXCL_STOP
460       }
461     }
462   } else {
463     // Output
464     switch (rstr_type) {
465       case CEED_RESTRICTION_STANDARD: {
466         CeedInt comp_stride;
467 
468         CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size));
469         code << "    const CeedInt l_size" << var_suffix << " = " << l_size << ";\n";
470         CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
471         code << "    // CompStride: " << comp_stride << "\n";
472         data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets;
473         code << "    WriteLVecStandard" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name
474              << ">(data, l_size" << var_suffix << ", elem, indices.outputs[" << i << "], r_e" << var_suffix << ", d" << var_suffix << ");\n";
475         break;
476       }
477       case CEED_RESTRICTION_STRIDED: {
478         bool    has_backend_strides;
479         CeedInt num_elem;
480 
481         CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides));
482         CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem));
483         CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
484 
485         if (!has_backend_strides) {
486           CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides));
487         }
488         code << "    // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
489         code << "    WriteLVecStrided" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", "
490              << strides[1] << ", " << strides[2] << ">(data, elem, r_e" << var_suffix << ", d" << var_suffix << ");\n";
491         break;
492       }
493       case CEED_RESTRICTION_POINTS:
494         data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets;
495         break;
496       // LCOV_EXCL_START
497       case CEED_RESTRICTION_ORIENTED:
498       case CEED_RESTRICTION_CURL_ORIENTED:
499         break;  // TODO: Not implemented
500                 // LCOV_EXCL_STOP
501     }
502   }
503   CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
504   return CEED_ERROR_SUCCESS;
505 }
506 
507 //------------------------------------------------------------------------------
508 // Basis
509 //------------------------------------------------------------------------------
510 static int CeedOperatorBuildKernelBasis_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedOperatorField op_field,
511                                                 CeedQFunctionField qf_field, CeedInt max_dim, CeedInt Q_1d, bool is_input, bool is_all_tensor,
512                                                 bool is_at_points, bool use_3d_slices) {
513   bool      is_tensor = true;
514   CeedBasis basis;
515   CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis));
516   CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
517 
518   std::string         var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i);
519   std::string         P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q";
520   CeedEvalMode        eval_mode = CEED_EVAL_NONE;
521   CeedInt             dim = max_dim, elem_size = 0, num_comp = 0, P_1d = 0;
522   CeedElemRestriction elem_rstr;
523 
524   // Get field data
525   CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr));
526   if (elem_rstr != CEED_ELEMRESTRICTION_NONE) {
527     CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size));
528     CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
529   }
530   CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
531   if (basis != CEED_BASIS_NONE) {
532     CeedCallBackend(CeedBasisGetDimension(basis, &dim));
533     if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
534     else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d));
535   }
536   CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode));
537 
538   // Basis
539   code << "    // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
540   if (is_input) {
541     switch (eval_mode) {
542       case CEED_EVAL_NONE:
543         if (!use_3d_slices && !is_at_points) {
544           code << "    CeedScalar *r_q" << var_suffix << " = r_e" << var_suffix << ";\n";
545         }
546         break;
547       case CEED_EVAL_INTERP:
548         if (is_at_points) {
549           std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d";
550 
551           code << "    CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n";
552           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix
553                << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n";
554         } else {
555           std::string function_name = is_tensor
556                                           ? ((dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened"))
557                                           : "InterpNonTensor";
558           std::string op_t_1d_name  = (is_all_tensor || !is_tensor) ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name);
559 
560           code << "    CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_all_tensor && (dim >= 3) ? Q_name : "1") << "];\n";
561           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_e"
562                << var_suffix << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n";
563         }
564         break;
565       case CEED_EVAL_GRAD:
566         if (is_at_points) {
567           std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d";
568 
569           code << "    CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n";
570           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix
571                << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n";
572         } else if (use_3d_slices) {
573           std::string function_name = (dim > 1 ? "InterpTensor" : "Interp") + std::to_string(dim) + "d";
574 
575           code << "    CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n";
576           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix
577                << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n";
578         } else if (is_tensor) {
579           bool        is_collocated = dim == 3 && Q_1d >= P_1d;
580           std::string function_name = (dim == 1 ? "Grad" : (is_collocated ? "GradTensorCollocated" : "GradTensor")) + std::to_string(dim) + "d" +
581                                       (is_all_tensor ? "" : "Flattened");
582           std::string op_t_1d_name = is_all_tensor ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name);
583 
584           code << "    CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "*"
585                << (is_all_tensor && dim >= 3 ? Q_name : "1") << "];\n";
586           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_e"
587                << var_suffix << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n";
588         } else {
589           std::string function_name = "GradNonTensor";
590 
591           code << "    CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
592           code << "    " << function_name << "<num_comp" << var_suffix << ", dim" << var_suffix << ", " << P_name << ", " << Q_name
593                << ", OP_T_1D>(data, r_e" << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n";
594         }
595         break;
596       case CEED_EVAL_WEIGHT: {
597         if (is_at_points) {
598           code << "    // Nothing to do AtPoints\n";
599         } else {
600           CeedBasis_Hip_shared *basis_data;
601           std::string           function_name = is_tensor
602                                                     ? ((dim == 1 ? "Weight" : "WeightTensor") + std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened"))
603                                                     : "WeightNonTensor";
604 
605           code << "    CeedScalar r_q" << var_suffix << "[" << (is_all_tensor && (dim >= 3) ? Q_name : "1") << "];\n";
606           CeedCallBackend(CeedBasisGetData(basis, &basis_data));
607           data->W = basis_data->d_q_weight_1d;
608           code << "    " << function_name << "<" << P_name << ", " << Q_name << ">(data, W, r_q" << var_suffix << ");\n";
609         }
610         break;
611       }
612       // LCOV_EXCL_START
613       case CEED_EVAL_DIV:
614       case CEED_EVAL_CURL:
615         break;  // TODO: Not implemented
616                 // LCOV_EXCL_STOP
617     }
618   } else {
619     switch (eval_mode) {
620       case CEED_EVAL_NONE:
621         code << "    CeedScalar *r_e" << var_suffix << " = r_q" << var_suffix << ";\n";
622         break;  // No action
623       case CEED_EVAL_INTERP:
624         code << "    CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n";
625         if (is_at_points) {
626           std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d";
627 
628           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix
629                << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n";
630         } else {
631           std::string function_name =
632               is_tensor ? ((dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened"))
633                         : "InterpTransposeNonTensor";
634           std::string op_t_1d_name = (is_all_tensor || !is_tensor) ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name);
635 
636           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_q"
637                << var_suffix << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n";
638         }
639         break;
640       case CEED_EVAL_GRAD:
641         code << "    CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n";
642         if (is_at_points) {
643           std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d";
644 
645           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix
646                << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n";
647         } else if (use_3d_slices) {
648           std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d";
649 
650           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" << var_suffix
651                << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n";
652         } else if (is_tensor) {
653           bool        is_collocated = dim == 3 && Q_1d >= P_1d;
654           std::string function_name = (dim == 1 ? "GradTranspose" : (is_collocated ? "GradTransposeTensorCollocated" : "GradTransposeTensor")) +
655                                       std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened");
656           std::string op_t_1d_name = is_all_tensor ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name);
657 
658           code << "    " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_q"
659                << var_suffix << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n";
660         } else {
661           std::string function_name = "GradTransposeNonTensor";
662 
663           code << "    " << function_name << "<num_comp" << var_suffix << ", dim" << var_suffix << ", " << P_name << ", " << Q_name
664                << ", OP_T_1D>(data, r_q" << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n";
665         }
666         break;
667       // LCOV_EXCL_START
668       case CEED_EVAL_WEIGHT:
669         break;  // Should not occur
670       case CEED_EVAL_DIV:
671       case CEED_EVAL_CURL:
672         break;  // TODO: Not implemented
673                 // LCOV_EXCL_STOP
674     }
675   }
676   CeedCallBackend(CeedBasisDestroy(&basis));
677   return CEED_ERROR_SUCCESS;
678 }
679 
680 //------------------------------------------------------------------------------
681 // QFunction
682 //------------------------------------------------------------------------------
683 static int CeedOperatorBuildKernelQFunction_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt max_dim, CeedInt max_num_points,
684                                                     CeedInt num_input_fields, CeedOperatorField *op_input_fields, CeedQFunctionField *qf_input_fields,
685                                                     CeedInt num_output_fields, CeedOperatorField *op_output_fields,
686                                                     CeedQFunctionField *qf_output_fields, std::string qfunction_name, CeedInt Q_1d,
687                                                     bool is_all_tensor, bool is_at_points, bool use_3d_slices) {
688   std::string         Q_name    = is_all_tensor ? "Q_1d" : "Q";
689   CeedEvalMode        eval_mode = CEED_EVAL_NONE;
690   CeedElemRestriction elem_rstr;
691 
692   // Setup output arrays
693   code << "\n    // -- Output field setup\n";
694   for (CeedInt i = 0; i < num_output_fields; i++) {
695     const char *field_name;
696     std::string var_suffix = "_out_" + std::to_string(i);
697 
698     CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
699     code << "    // ---- Output field " << i << ": " << field_name << "\n";
700     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
701     switch (eval_mode) {
702       case CEED_EVAL_NONE:
703         if (is_at_points) {
704           code << "    CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "];\n";
705         } else {
706           code << "    CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_all_tensor && (max_dim >= 3) ? Q_name : "1")
707                << "];\n";
708         }
709         break;
710       case CEED_EVAL_INTERP:
711         if (is_at_points) {
712           // Accumulator for point data
713           code << "    CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "];\n";
714           code << "    for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "; i++) {\n";
715           code << "      r_c" << var_suffix << "[i] = 0.0;\n";
716           code << "    }\n";
717         } else {
718           code << "    CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_all_tensor && (max_dim >= 3) ? Q_name : "1")
719                << "];\n";
720         }
721         break;
722       case CEED_EVAL_GRAD:
723         if (is_at_points) {
724           // Accumulator for point data
725           code << "    CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "];\n";
726           code << "    for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "; i++) {\n";
727           code << "      r_c" << var_suffix << "[i] = 0.0;\n";
728           code << "    }\n";
729         } else if (use_3d_slices) {
730           // Accumulator for gradient slices
731           code << "    CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n";
732           code << "    for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << Q_name << "; i++) {\n";
733           code << "      r_q" << var_suffix << "[i] = 0.0;\n";
734           code << "    }\n";
735         } else {
736           code << "    CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "*"
737                << (is_all_tensor && (max_dim >= 3) ? Q_name : "1") << "];\n";
738         }
739         break;
740       case CEED_EVAL_WEIGHT:
741         break;
742         // LCOV_EXCL_START
743       case CEED_EVAL_DIV:
744       case CEED_EVAL_CURL:
745         break;  // TODO: Not implemented
746                 // LCOV_EXCL_STOP
747     }
748   }
749 
750   if (is_at_points) {
751     // We need to handle batches of points
752     code << "\n    // Note: Using batches of points\n";
753     code << "    const CeedInt point_loop_bound = (blockDim.x*blockDim.y) * ceil((1.0*max_num_points) / (blockDim.x*blockDim.y));\n\n";
754     code << "    #pragma unroll\n";
755     code << "    for (CeedInt i = threadIdx.x + threadIdx.y*blockDim.x; i < point_loop_bound; i += blockDim.x*blockDim.y) {\n";
756     code << "      const CeedInt p = i % max_num_points;\n\n";
757 
758     code << "      // -- Coordinates\n";
759     code << "      CeedScalar r_x[max_dim];\n";
760     code << "      ReadPoint<max_dim, coords_comp_stride, max_num_points>(data, elem, p, max_num_points, points.indices, points.coords, r_x);\n\n";
761 
762     code << "      // -- Input fields\n";
763     for (CeedInt i = 0; i < num_input_fields; i++) {
764       const char *field_name;
765       std::string var_suffix = "_in_" + std::to_string(i);
766       std::string P_name     = "P_1d" + var_suffix;
767 
768       CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name));
769       code << "      // ---- Input field " << i << ": " << field_name << "\n";
770       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
771       // Basis action
772       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
773       switch (eval_mode) {
774         case CEED_EVAL_NONE:
775           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
776           code << "      ReadPoint<num_comp" << var_suffix << ", comp_stride" << var_suffix
777                << ", max_num_points>(data, elem, p, max_num_points, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n";
778           break;
779         case CEED_EVAL_INTERP:
780           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
781           code << "      InterpAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name
782                << ">(data, i, r_c" << var_suffix << ", r_x, r_s" << var_suffix << ");\n";
783           break;
784         case CEED_EVAL_GRAD:
785           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
786           code << "      GradAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name
787                << ">(data, i, r_c" << var_suffix << ", r_x, r_s" << var_suffix << ");\n";
788           break;
789         case CEED_EVAL_WEIGHT:
790           code << "      CeedScalar r_s" << var_suffix << "[1];\n";
791           code << "      r_s" << var_suffix << "[0] = 1.0;\n";
792           break;
793           // LCOV_EXCL_START
794         case CEED_EVAL_DIV:
795         case CEED_EVAL_CURL:
796           break;  // TODO: Not implemented
797                   // LCOV_EXCL_STOP
798       }
799     }
800     code << "\n      // -- Output fields\n";
801     for (CeedInt i = 0; i < num_output_fields; i++) {
802       const char *field_name;
803       std::string var_suffix = "_out_" + std::to_string(i);
804 
805       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
806       code << "      // ---- Output field " << i << ": " << field_name << "\n";
807       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
808       // Basis action
809       switch (eval_mode) {
810         case CEED_EVAL_NONE:
811           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
812           break;
813         case CEED_EVAL_INTERP:
814           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
815           break;
816         case CEED_EVAL_GRAD:
817           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
818           break;
819           // LCOV_EXCL_START
820         case CEED_EVAL_WEIGHT:
821           break;  // Should not occur
822         case CEED_EVAL_DIV:
823         case CEED_EVAL_CURL:
824           break;  // TODO: Not implemented
825                   // LCOV_EXCL_STOP
826       }
827     }
828 
829   } else if (use_3d_slices) {
830     // We treat quadrature points per slice in 3d to save registers
831     code << "\n    // Note: Using planes of 3D elements\n";
832     code << "    #pragma unroll\n";
833     code << "    for (CeedInt q = 0; q < " << Q_name << "; q++) {\n";
834     code << "      // -- Input fields\n";
835     for (CeedInt i = 0; i < num_input_fields; i++) {
836       const char *field_name;
837       std::string var_suffix = "_in_" + std::to_string(i);
838 
839       CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name));
840       code << "      // ---- Input field " << i << ": " << field_name << "\n";
841       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
842       // Basis action
843       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
844       switch (eval_mode) {
845         case CEED_EVAL_NONE:
846           bool is_strided;
847 
848           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
849 
850           CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr));
851           CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided));
852           if (is_strided) {
853             bool    has_backend_strides;
854             CeedInt num_elem, elem_size;
855 
856             CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size));
857             CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides));
858             CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem));
859             CeedInt strides[3] = {1, elem_size * num_elem, elem_size};
860 
861             if (!has_backend_strides) {
862               CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides));
863             }
864             code << "      // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n";
865             code << "      ReadEVecSliceStrided3d<num_comp" << var_suffix << ", " << Q_name << ", " << strides[0] << ", " << strides[1] << ", "
866                  << strides[2] << ">(data, elem, q, d" << var_suffix << ", r_s" << var_suffix << ");\n";
867           } else {
868             CeedSize                 l_size = 0;
869             CeedInt                  comp_stride;
870             CeedElemRestriction_Hip *rstr_data;
871 
872             CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size));
873             code << "      const CeedInt l_size" << var_suffix << " = " << l_size << ";\n";
874             CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
875             code << "      // CompStride: " << comp_stride << "\n";
876             CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data));
877             data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets;
878             code << "      ReadEVecSliceStandard3d<num_comp" << var_suffix << ", " << comp_stride << ", " << Q_name << ">(data, l_size" << var_suffix
879                  << ", elem, q, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n";
880           }
881           CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
882           break;
883         case CEED_EVAL_INTERP:
884           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
885           code << "      for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) {\n";
886           code << "        r_s" << var_suffix << "[j] = r_q" << var_suffix << "[q + j*" << Q_name << "];\n";
887           code << "      }\n";
888           break;
889         case CEED_EVAL_GRAD:
890           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
891           code << "      GradColloSlice3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_q" << var_suffix << ", s_G"
892                << var_suffix << ", r_s" << var_suffix << ");\n";
893           break;
894         case CEED_EVAL_WEIGHT:
895           code << "      CeedScalar r_s" << var_suffix << "[1];\n";
896           code << "      r_s" << var_suffix << "[0] = r_q" << var_suffix << "[q];\n";
897           break;
898           // LCOV_EXCL_START
899         case CEED_EVAL_DIV:
900         case CEED_EVAL_CURL:
901           break;  // TODO: Not implemented
902                   // LCOV_EXCL_STOP
903       }
904     }
905     code << "\n      // -- Output fields\n";
906     for (CeedInt i = 0; i < num_output_fields; i++) {
907       const char *field_name;
908       std::string var_suffix = "_out_" + std::to_string(i);
909 
910       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
911       code << "      // ---- Output field " << i << ": " << field_name << "\n";
912       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
913       // Basis action
914       switch (eval_mode) {
915         case CEED_EVAL_NONE:
916           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
917           break;
918         case CEED_EVAL_INTERP:
919           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n";
920           break;
921         case CEED_EVAL_GRAD:
922           code << "      CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n";
923           break;
924           // LCOV_EXCL_START
925         case CEED_EVAL_WEIGHT:
926           break;  // Should not occur
927         case CEED_EVAL_DIV:
928         case CEED_EVAL_CURL:
929           break;  // TODO: Not implemented
930                   // LCOV_EXCL_STOP
931       }
932     }
933   } else {
934     code << "\n    // Note: Using full elements\n";
935     code << "    {\n";
936     code << "      // -- Input fields\n";
937     for (CeedInt i = 0; i < num_input_fields; i++) {
938       const char *field_name;
939 
940       CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name));
941       code << "      // ---- Input field " << i << ": " << field_name << "\n";
942       code << "      CeedScalar *r_s_in_" << i << " = r_q_in_" << i << ";\n";
943     }
944     code << "      // -- Output fields\n";
945     for (CeedInt i = 0; i < num_output_fields; i++) {
946       const char *field_name;
947 
948       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
949       code << "      // ---- Output field " << i << ": " << field_name << "\n";
950       code << "      CeedScalar *r_s_out_" << i << " = r_q_out_" << i << ";\n";
951     }
952   }
953 
954   // Input and output buffers
955   code << "\n      // -- QFunction inputs and outputs\n";
956   code << "      // ---- Inputs\n";
957   code << "      CeedScalar *inputs[" << CeedIntMax(num_input_fields, 1) << "];\n";
958   for (CeedInt i = 0; i < num_input_fields; i++) {
959     const char *field_name;
960 
961     CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name));
962     code << "      // ------ Input field " << i << ": " << field_name << "\n";
963     code << "      inputs[" << i << "] = r_s_in_" << i << ";\n";
964   }
965   code << "      // ---- Outputs\n";
966   code << "      CeedScalar *outputs[" << CeedIntMax(num_output_fields, 1) << "];\n";
967   for (CeedInt i = 0; i < num_output_fields; i++) {
968     const char *field_name;
969 
970     CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
971     code << "      // ------ Output field " << i << ": " << field_name << "\n";
972     code << "      outputs[" << i << "] = r_s_out_" << i << ";\n";
973   }
974 
975   // Apply QFunction
976   code << "\n      // -- Apply QFunction\n";
977   code << "      " << qfunction_name << "(ctx, ";
978   if (max_dim != 3 || is_at_points || use_3d_slices || !is_all_tensor) {
979     code << "1";
980   } else {
981     code << Q_name;
982   }
983   code << ", inputs, outputs);\n";
984 
985   if (is_at_points) {
986     // Map back to coefficients
987     code << "\n      // -- Output fields\n";
988     for (CeedInt i = 0; i < num_output_fields; i++) {
989       const char *field_name;
990       std::string var_suffix = "_out_" + std::to_string(i);
991       std::string P_name     = "P_1d" + var_suffix;
992 
993       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
994       code << "      // ---- Output field " << i << ": " << field_name << "\n";
995       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
996       // Basis action
997       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
998       switch (eval_mode) {
999         case CEED_EVAL_NONE: {
1000           CeedInt             comp_stride;
1001           CeedElemRestriction elem_rstr;
1002 
1003           CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr));
1004           CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride));
1005           CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
1006           code << "      const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n";
1007           code << "      WritePoint<num_comp" << var_suffix << ", comp_stride" << var_suffix
1008                << ", max_num_points>(data, elem, i, points.num_per_elem[elem], indices.outputs[" << i << "]"
1009                << ", r_s" << var_suffix << ", d" << var_suffix << ");\n";
1010           break;
1011         }
1012         case CEED_EVAL_INTERP:
1013           code << "      if (i >= points.num_per_elem[elem]) {\n";
1014           code << "        for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n";
1015           code << "      }\n";
1016           code << "      InterpTransposeAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name
1017                << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n";
1018           break;
1019         case CEED_EVAL_GRAD:
1020           code << "      if (i >= points.num_per_elem[elem]) {\n";
1021           code << "        for (CeedInt j = 0; j < num_comp" << var_suffix << "*dim" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n";
1022           code << "      }\n";
1023           code << "      GradTransposeAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name
1024                << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n";
1025           break;
1026           // LCOV_EXCL_START
1027         case CEED_EVAL_WEIGHT:
1028           break;  // Should not occur
1029         case CEED_EVAL_DIV:
1030         case CEED_EVAL_CURL:
1031           break;  // TODO: Not implemented
1032                   // LCOV_EXCL_STOP
1033       }
1034     }
1035   } else if (use_3d_slices) {
1036     // Copy or apply transpose grad, if needed
1037     code << "\n      // -- Output fields\n";
1038     for (CeedInt i = 0; i < num_output_fields; i++) {
1039       const char *field_name;
1040       std::string var_suffix = "_out_" + std::to_string(i);
1041       std::string P_name     = "P_1d" + var_suffix;
1042 
1043       CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
1044       code << "      // ---- Output field " << i << ": " << field_name << "\n";
1045       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
1046       // Basis action
1047       code << "      // EvalMode: " << CeedEvalModes[eval_mode] << "\n";
1048       switch (eval_mode) {
1049         case CEED_EVAL_NONE:
1050           code << "      for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n";
1051           code << "        r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n";
1052           code << "      }\n";
1053           break;
1054         case CEED_EVAL_INTERP:
1055           code << "      for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n";
1056           code << "        r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n";
1057           code << "      }\n";
1058           break;
1059         case CEED_EVAL_GRAD:
1060           code << "      GradColloSliceTranspose3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_s" << var_suffix << ", s_G"
1061                << var_suffix << ", r_q" << var_suffix << ");\n";
1062           break;
1063           // LCOV_EXCL_START
1064         case CEED_EVAL_WEIGHT:
1065           break;  // Should not occur
1066         case CEED_EVAL_DIV:
1067         case CEED_EVAL_CURL:
1068           break;  // TODO: Not implemented
1069                   // LCOV_EXCL_STOP
1070       }
1071     }
1072   }
1073   code << "    }\n";
1074   return CEED_ERROR_SUCCESS;
1075 }
1076 
1077 //------------------------------------------------------------------------------
1078 // Build single operator kernel
1079 //------------------------------------------------------------------------------
1080 extern "C" int CeedOperatorBuildKernel_Hip_gen(CeedOperator op, bool *is_good_build) {
1081   bool                   is_all_tensor = true, is_all_nontensor = true, is_at_points = false, use_3d_slices = false;
1082   Ceed                   ceed;
1083   CeedInt                Q = 0, Q_1d = 0, num_input_fields, num_output_fields, max_dim = 1, max_num_points = 0, coords_comp_stride = 0;
1084   CeedQFunctionField    *qf_input_fields, *qf_output_fields;
1085   CeedQFunction_Hip_gen *qf_data;
1086   CeedQFunction          qf;
1087   CeedOperatorField     *op_input_fields, *op_output_fields;
1088   CeedOperator_Hip_gen  *data;
1089   std::ostringstream     code;
1090 
1091   CeedCallBackend(CeedOperatorGetData(op, &data));
1092   {
1093     bool is_setup_done;
1094 
1095     CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done));
1096     if (is_setup_done) {
1097       *is_good_build = !data->use_fallback;
1098       return CEED_ERROR_SUCCESS;
1099     }
1100   }
1101 
1102   // Check field compatibility
1103   CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields));
1104   {
1105     bool has_shared_bases = true;
1106 
1107     for (CeedInt i = 0; i < num_input_fields; i++) {
1108       CeedBasis basis;
1109 
1110       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis));
1111       if (basis != CEED_BASIS_NONE) {
1112         bool        is_tensor = true;
1113         const char *resource;
1114         char       *resource_root;
1115         Ceed        basis_ceed;
1116 
1117         CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
1118         is_all_tensor    = is_all_tensor && is_tensor;
1119         is_all_nontensor = is_all_nontensor && !is_tensor;
1120         CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed));
1121         CeedCallBackend(CeedGetResource(basis_ceed, &resource));
1122         CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root));
1123         has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/hip/shared");
1124         CeedCallBackend(CeedFree(&resource_root));
1125         CeedCallBackend(CeedDestroy(&basis_ceed));
1126       }
1127       CeedCallBackend(CeedBasisDestroy(&basis));
1128     }
1129 
1130     for (CeedInt i = 0; i < num_output_fields; i++) {
1131       CeedBasis basis;
1132 
1133       CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis));
1134       if (basis != CEED_BASIS_NONE) {
1135         bool        is_tensor = true;
1136         const char *resource;
1137         char       *resource_root;
1138         Ceed        basis_ceed;
1139 
1140         CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor));
1141         is_all_tensor    = is_all_tensor && is_tensor;
1142         is_all_nontensor = is_all_nontensor && !is_tensor;
1143 
1144         CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed));
1145         CeedCallBackend(CeedGetResource(basis_ceed, &resource));
1146         CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root));
1147         has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/hip/shared");
1148         CeedCallBackend(CeedFree(&resource_root));
1149         CeedCallBackend(CeedDestroy(&basis_ceed));
1150       }
1151       CeedCallBackend(CeedBasisDestroy(&basis));
1152     }
1153     // -- Fallback to ref if not all bases are shared
1154     if (!has_shared_bases) {
1155       *is_good_build = false;
1156       return CEED_ERROR_SUCCESS;
1157     }
1158   }
1159   CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
1160   CeedCallBackend(CeedOperatorGetQFunction(op, &qf));
1161   CeedCallBackend(CeedQFunctionGetData(qf, &qf_data));
1162   CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields));
1163 
1164   // Get operator data
1165   CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points));
1166   {
1167     CeedInt max_P = 0, max_P_1d = 0;
1168 
1169     CeedCallBackend(CeedOperatorBuildKernelData_Hip_gen(ceed, num_input_fields, op_input_fields, qf_input_fields, num_output_fields, op_output_fields,
1170                                                         qf_output_fields, &max_P, &max_P_1d, &Q, &Q_1d, &max_dim, &is_all_tensor, &use_3d_slices));
1171     data->max_P_1d = is_all_tensor ? max_P_1d : max_P;
1172   }
1173   if (max_dim == 0) max_dim = 1;
1174   data->dim = max_dim;
1175   if (is_at_points) {
1176     CeedElemRestriction_Hip *rstr_data;
1177     CeedElemRestriction      rstr_points = NULL;
1178 
1179     CeedCallBackend(CeedOperatorAtPointsGetPoints(op, &rstr_points, NULL));
1180     CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr_points, &max_num_points));
1181     CeedCallBackend(CeedElemRestrictionGetCompStride(rstr_points, &coords_comp_stride));
1182     CeedCallBackend(CeedElemRestrictionGetData(rstr_points, &rstr_data));
1183     data->points.indices = (CeedInt *)rstr_data->d_offsets;
1184     CeedCallBackend(CeedElemRestrictionDestroy(&rstr_points));
1185   }
1186   if (is_at_points) use_3d_slices = false;
1187   if (Q_1d == 0) {
1188     if (is_at_points) Q_1d = max_num_points;
1189     else CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q_1d));
1190   }
1191   if (Q == 0) Q = Q_1d;
1192   data->Q    = Q;
1193   data->Q_1d = Q_1d;
1194 
1195   // Check for restriction only identity operator
1196   {
1197     bool is_identity_qf;
1198 
1199     CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf));
1200     if (is_identity_qf) {
1201       CeedEvalMode eval_mode_in, eval_mode_out;
1202 
1203       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in));
1204       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out));
1205       CeedCheck(eval_mode_in != CEED_EVAL_NONE || eval_mode_out != CEED_EVAL_NONE, ceed, CEED_ERROR_BACKEND,
1206                 "Backend does not implement restriction only identity operators");
1207     }
1208   }
1209 
1210   // Load basis source files
1211   if (!is_all_tensor) {
1212     code << "// Tensor basis source\n";
1213     code << "#include <ceed/jit-source/hip/hip-shared-basis-tensor-templates.h>\n\n";
1214   }
1215   if (!is_all_tensor) {
1216     code << "// Non-tensor basis source\n";
1217     code << "#include <ceed/jit-source/hip/hip-shared-basis-nontensor-templates.h>\n\n";
1218   }
1219   if (is_at_points) {
1220     code << "// AtPoints basis source\n";
1221     code << "#include <ceed/jit-source/hip/hip-shared-basis-tensor-at-points-templates.h>\n\n";
1222   }
1223   if (!is_all_tensor && !is_all_nontensor) {
1224     code << "// Tensor basis source\n";
1225     code << "#include <ceed/jit-source/hip/hip-shared-basis-tensor-flattened-templates.h>\n\n";
1226   }
1227   code << "// CodeGen operator source\n";
1228   code << "#include <ceed/jit-source/hip/hip-gen-templates.h>\n\n";
1229 
1230   // Get QFunction name
1231   std::string qfunction_name(qf_data->qfunction_name);
1232   std::string operator_name;
1233 
1234   operator_name = "CeedKernelHipGenOperator_" + qfunction_name;
1235 
1236   // Define CEED_Q_VLA
1237   code << "\n#undef CEED_Q_VLA\n";
1238   if (max_dim != 3 || is_at_points || use_3d_slices || !is_all_tensor) {
1239     code << "#define CEED_Q_VLA 1\n\n";
1240   } else {
1241     code << "#define CEED_Q_VLA " << Q_1d << "\n\n";
1242   }
1243 
1244   // Add user QFunction source
1245   {
1246     const char *source_path;
1247 
1248     CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path));
1249     CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/hip/gen backend requires QFunction source code file");
1250 
1251     code << "// User QFunction source\n";
1252     code << "#include \"" << source_path << "\"\n\n";
1253   }
1254 
1255   // Setup
1256   code << "\n// -----------------------------------------------------------------------------\n";
1257   code << "// Operator Kernel\n";
1258   code << "// \n";
1259   code << "// d_[in,out]_i:   CeedVector device array\n";
1260   code << "// r_[in,out]_e_i: Element vector register\n";
1261   code << "// r_[in,out]_q_i: Quadrature space vector register\n";
1262   code << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n";
1263   code << "// r_[in,out]_s_i: Quadrature space slice vector register\n";
1264   code << "// \n";
1265   code << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n";
1266   code << "// s_G_[in,out]_i: Gradient matrix, shared memory\n";
1267   code << "// -----------------------------------------------------------------------------\n";
1268   code << "\nextern \"C\" __launch_bounds__(BLOCK_SIZE)\n";
1269   code << "__global__ void " << operator_name
1270        << "(CeedInt num_elem, void* ctx, FieldsInt_Hip indices, Fields_Hip fields, Fields_Hip B, Fields_Hip G, CeedScalar* W, Points_Hip points) {\n";
1271 
1272   // Scratch buffers
1273   for (CeedInt i = 0; i < num_input_fields; i++) {
1274     CeedEvalMode eval_mode;
1275 
1276     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
1277     if (eval_mode != CEED_EVAL_WEIGHT) {  // Skip CEED_EVAL_WEIGHT
1278       code << "  const CeedScalar *__restrict__ d_in_" << i << " = fields.inputs[" << i << "];\n";
1279     }
1280   }
1281   for (CeedInt i = 0; i < num_output_fields; i++) {
1282     code << "  CeedScalar *__restrict__ d_out_" << i << " = fields.outputs[" << i << "];\n";
1283   }
1284 
1285   code << " const CeedInt max_dim = " << max_dim << ";\n";
1286   if (!is_all_tensor) {
1287     code << "  const CeedInt Q = " << Q << ";\n";
1288   }
1289   if (!is_all_nontensor) {
1290     code << "  const CeedInt Q_1d = " << Q_1d << ";\n";
1291   }
1292   if (is_at_points) {
1293     code << "  const CeedInt max_num_points = " << max_num_points << ";\n";
1294     code << "  const CeedInt coords_comp_stride = " << coords_comp_stride << ";\n";
1295   }
1296 
1297   // Shared data
1298   code << "  extern __shared__ CeedScalar slice[];\n";
1299   code << "  SharedData_Hip data;\n";
1300   code << "  data.t_id_x = threadIdx.x;\n";
1301   code << "  data.t_id_y = threadIdx.y;\n";
1302   code << "  data.t_id_z = threadIdx.z;\n";
1303   code << "  data.t_id   = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n";
1304   code << "  data.slice  = slice + data.t_id_z*OP_T_1D" << ((!is_all_tensor || max_dim == 1) ? "" : "*OP_T_1D") << ";\n";
1305 
1306   // -- Determine input mat reuse
1307   FieldReuse_Hip input_matrix_reuse[CEED_FIELD_MAX];
1308 
1309   for (CeedInt i = 0; i < num_input_fields; i++) {
1310     input_matrix_reuse[i].index = -1;
1311   }
1312   for (CeedInt i = 0; i < num_input_fields; i++) {
1313     bool         is_tensor = true;
1314     CeedEvalMode eval_mode_i;
1315     CeedBasis    basis_i;
1316 
1317     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i));
1318     if (eval_mode_i == CEED_EVAL_WEIGHT) continue;
1319     CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i));
1320     CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor));
1321     for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) {
1322       CeedEvalMode eval_mode_j;
1323       CeedBasis    basis_j;
1324 
1325       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j));
1326       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
1327       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j));
1328       if (basis_i == basis_j) {
1329         if (is_tensor) {
1330           input_matrix_reuse[i].index     = j;
1331           input_matrix_reuse[i].is_input  = true;
1332           input_matrix_reuse[i].eval_mode = eval_mode_j;
1333         } else {
1334           // For non-tensor can only re-use with the same eval mode
1335           if (eval_mode_i == eval_mode_j) {
1336             input_matrix_reuse[i].index     = j;
1337             input_matrix_reuse[i].is_input  = true;
1338             input_matrix_reuse[i].eval_mode = eval_mode_j;
1339           }
1340         }
1341       }
1342       CeedCallBackend(CeedBasisDestroy(&basis_j));
1343     }
1344     CeedCallBackend(CeedBasisDestroy(&basis_i));
1345   }
1346 
1347   // -- Determine output mat reuse
1348   FieldReuse_Hip output_matrix_reuse[CEED_FIELD_MAX];
1349 
1350   for (CeedInt i = 0; i < num_output_fields; i++) {
1351     output_matrix_reuse[i].index = -1;
1352   }
1353   for (CeedInt i = 0; i < num_output_fields; i++) {
1354     bool         is_tensor = true;
1355     CeedEvalMode eval_mode_i;
1356     CeedBasis    basis_i;
1357 
1358     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i));
1359     CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i));
1360     for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) {
1361       CeedEvalMode eval_mode_j;
1362       CeedBasis    basis_j;
1363 
1364       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j));
1365       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
1366       CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j));
1367       if (basis_i == basis_j) {
1368         if (is_tensor) {
1369           output_matrix_reuse[i].index     = j;
1370           output_matrix_reuse[i].is_input  = true;
1371           output_matrix_reuse[i].eval_mode = eval_mode_j;
1372         } else {
1373           // For non-tensor can only re-use with the same eval mode
1374           if (eval_mode_i == eval_mode_j) {
1375             output_matrix_reuse[i].index     = j;
1376             output_matrix_reuse[i].is_input  = true;
1377             output_matrix_reuse[i].eval_mode = eval_mode_j;
1378           }
1379         }
1380       }
1381       CeedCallBackend(CeedBasisDestroy(&basis_j));
1382     }
1383     for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) {
1384       CeedEvalMode eval_mode_j;
1385       CeedBasis    basis_j;
1386 
1387       CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j));
1388       if (eval_mode_j == CEED_EVAL_WEIGHT) continue;
1389       CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j));
1390       CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor));
1391       if (basis_i == basis_j) {
1392         if (is_tensor) {
1393           output_matrix_reuse[i].index     = j;
1394           output_matrix_reuse[i].is_input  = false;
1395           output_matrix_reuse[i].eval_mode = eval_mode_j;
1396         } else {
1397           // For non-tensor can only re-use with the same eval mode
1398           if (eval_mode_i == eval_mode_j) {
1399             output_matrix_reuse[i].index     = j;
1400             output_matrix_reuse[i].is_input  = false;
1401             output_matrix_reuse[i].eval_mode = eval_mode_j;
1402           }
1403         }
1404       }
1405       CeedCallBackend(CeedBasisDestroy(&basis_j));
1406     }
1407     CeedCallBackend(CeedBasisDestroy(&basis_i));
1408   }
1409 
1410   // Initialize constants, and matrices B and G
1411   code << "\n  // Input field constants and basis data\n";
1412   for (CeedInt i = 0; i < num_input_fields; i++) {
1413     CeedCallBackend(CeedOperatorBuildKernelFieldData_Hip_gen(code, data, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i], max_dim, Q,
1414                                                              Q_1d, true, is_all_tensor, is_at_points, use_3d_slices));
1415   }
1416   code << "\n  // Output field constants and basis data\n";
1417   for (CeedInt i = 0; i < num_output_fields; i++) {
1418     CeedCallBackend(CeedOperatorBuildKernelFieldData_Hip_gen(code, data, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i], max_dim,
1419                                                              Q, Q_1d, false, is_all_tensor, is_at_points, use_3d_slices));
1420   }
1421 
1422   // Loop over all elements
1423   code << "\n  // Element loop\n";
1424   code << "  __syncthreads();\n";
1425   code << "  for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n";
1426 
1427   // -- Compute minimum buffer space needed
1428   CeedInt max_rstr_buffer_size = 1;
1429 
1430   for (CeedInt i = 0; i < num_input_fields; i++) {
1431     CeedEvalMode eval_mode;
1432 
1433     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
1434     if (eval_mode != CEED_EVAL_NONE && eval_mode != CEED_EVAL_WEIGHT) {
1435       CeedInt             num_comp;
1436       CeedElemRestriction elem_rstr;
1437 
1438       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr));
1439       CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
1440       max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1));
1441       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
1442     }
1443   }
1444   for (CeedInt i = 0; i < num_output_fields; i++) {
1445     CeedEvalMode eval_mode;
1446 
1447     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
1448     if (eval_mode != CEED_EVAL_NONE) {
1449       CeedInt             num_comp;
1450       CeedElemRestriction elem_rstr;
1451 
1452       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr));
1453       CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp));
1454       max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1));
1455       CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr));
1456     }
1457   }
1458   code << "    // Scratch restriction buffer space\n";
1459   code << "    CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n";
1460 
1461   // -- Determine best input field processing order
1462   CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX];
1463 
1464   for (CeedInt i = 0; i < num_input_fields; i++) {
1465     field_rstr_in_buffer[i] = -1;
1466     input_field_order[i]    = -1;
1467   }
1468   {
1469     bool    is_ordered[CEED_FIELD_MAX];
1470     CeedInt curr_index = 0;
1471 
1472     for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false;
1473     for (CeedInt i = 0; i < num_input_fields; i++) {
1474       CeedVector          vec_i;
1475       CeedElemRestriction rstr_i;
1476 
1477       if (is_ordered[i]) continue;
1478       field_rstr_in_buffer[i]       = i;
1479       is_ordered[i]                 = true;
1480       input_field_order[curr_index] = i;
1481       curr_index++;
1482       CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i));
1483       if (vec_i == CEED_VECTOR_NONE) continue;  // CEED_EVAL_WEIGHT
1484       CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i));
1485       for (CeedInt j = i + 1; j < num_input_fields; j++) {
1486         CeedVector          vec_j;
1487         CeedElemRestriction rstr_j;
1488 
1489         CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j));
1490         CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j));
1491         if (rstr_i == rstr_j && vec_i == vec_j) {
1492           field_rstr_in_buffer[j]       = i;
1493           is_ordered[j]                 = true;
1494           input_field_order[curr_index] = j;
1495           curr_index++;
1496         }
1497         CeedCallBackend(CeedVectorDestroy(&vec_j));
1498         CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j));
1499       }
1500       CeedCallBackend(CeedVectorDestroy(&vec_i));
1501       CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i));
1502     }
1503   }
1504 
1505   // -- Input restriction and basis
1506   code << "\n    // -- Input field restrictions and basis actions\n";
1507   for (CeedInt i = 0; i < num_input_fields; i++) {
1508     const char   *field_name;
1509     const CeedInt f = input_field_order[i];
1510 
1511     CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name));
1512     code << "    // ---- Input field " << f << ": " << field_name << "\n";
1513 
1514     // ---- Restriction
1515     CeedCallBackend(CeedOperatorBuildKernelRestriction_Hip_gen(code, data, f, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f], max_dim,
1516                                                                Q_1d, true, is_all_tensor, is_at_points, use_3d_slices));
1517 
1518     // ---- Basis action
1519     CeedCallBackend(CeedOperatorBuildKernelBasis_Hip_gen(code, data, f, op_input_fields[f], qf_input_fields[f], max_dim, Q_1d, true, is_all_tensor,
1520                                                          is_at_points, use_3d_slices));
1521   }
1522 
1523   // -- Q function
1524   CeedCallBackend(CeedOperatorBuildKernelQFunction_Hip_gen(code, data, max_dim, max_num_points, num_input_fields, op_input_fields, qf_input_fields,
1525                                                            num_output_fields, op_output_fields, qf_output_fields, qfunction_name, Q_1d, is_all_tensor,
1526                                                            is_at_points, use_3d_slices));
1527 
1528   // -- Output basis and restriction
1529   code << "\n    // -- Output field basis action and restrictions\n";
1530   for (CeedInt i = 0; i < num_output_fields; i++) {
1531     const char *field_name;
1532 
1533     CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name));
1534     code << "    // ---- Output field " << i << ": " << field_name << "\n";
1535 
1536     // ---- Basis action
1537     CeedCallBackend(CeedOperatorBuildKernelBasis_Hip_gen(code, data, i, op_output_fields[i], qf_output_fields[i], max_dim, Q_1d, false, is_all_tensor,
1538                                                          is_at_points, use_3d_slices));
1539 
1540     // ---- Restriction
1541     CeedCallBackend(CeedOperatorBuildKernelRestriction_Hip_gen(code, data, i, NULL, op_output_fields[i], qf_output_fields[i], max_dim, Q_1d, false,
1542                                                                is_all_tensor, is_at_points, use_3d_slices));
1543   }
1544 
1545   // Close loop and function
1546   code << "  }\n";
1547   code << "}\n";
1548   code << "// -----------------------------------------------------------------------------\n\n";
1549 
1550   CeedInt block_sizes[3] = {0, 0, 0};
1551   CeedInt num_elem;
1552 
1553   // Compile
1554   CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem));
1555   CeedCallBackend(BlockGridCalculate_Hip_gen(is_all_tensor ? max_dim : 1, num_elem, data->max_P_1d, is_all_tensor ? Q_1d : Q, block_sizes));
1556   if (is_at_points) block_sizes[2] = 1;
1557   {
1558     bool is_compile_good = false;
1559 
1560     data->thread_1d = block_sizes[0];
1561     CeedCallBackend(CeedTryCompile_Hip(ceed, code.str().c_str(), &is_compile_good, &data->module, 2, "OP_T_1D", block_sizes[0], "BLOCK_SIZE",
1562                                        block_sizes[0] * block_sizes[1] * block_sizes[2]));
1563     if (is_compile_good) {
1564       *is_good_build = true;
1565       CeedCallBackend(CeedGetKernel_Hip(ceed, data->module, operator_name.c_str(), &data->op));
1566     } else {
1567       *is_good_build     = false;
1568       data->use_fallback = true;
1569     }
1570   }
1571   CeedCallBackend(CeedOperatorSetSetupDone(op));
1572   CeedCallBackend(CeedDestroy(&ceed));
1573   CeedCallBackend(CeedQFunctionDestroy(&qf));
1574   return CEED_ERROR_SUCCESS;
1575 }
1576 
1577 //------------------------------------------------------------------------------
1578