1 // Copyright (c) 2017-2025, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #define CEED_DEBUG_COLOR 12 9 10 #include <ceed.h> 11 #include <ceed/backend.h> 12 #include <ceed/gen-tools.h> 13 #include <ceed/jit-tools.h> 14 #include <cuda_runtime.h> 15 16 #include <iostream> 17 #include <sstream> 18 #include <string> 19 20 #include "../cuda-ref/ceed-cuda-ref.h" 21 #include "../cuda-shared/ceed-cuda-shared.h" 22 #include "../cuda/ceed-cuda-common.h" 23 #include "../cuda/ceed-cuda-compile.h" 24 #include "ceed-cuda-gen.h" 25 26 struct FieldReuse_Cuda { 27 CeedInt index; 28 bool is_input; 29 CeedEvalMode eval_mode; 30 }; 31 32 //------------------------------------------------------------------------------ 33 // Determine type of operator 34 //------------------------------------------------------------------------------ 35 static int CeedOperatorBuildKernelData_Cuda_gen(Ceed ceed, CeedInt num_input_fields, CeedOperatorField *op_input_fields, 36 CeedQFunctionField *qf_input_fields, CeedInt num_output_fields, CeedOperatorField *op_output_fields, 37 CeedQFunctionField *qf_output_fields, CeedInt *max_P, CeedInt *max_P_1d, CeedInt *Q, CeedInt *Q_1d, 38 CeedInt *max_dim, bool *is_all_tensor, bool *use_3d_slices) { 39 // Check if all are tensor 40 *is_all_tensor = true; 41 for (CeedInt i = 0; i < num_input_fields; i++) { 42 CeedBasis basis; 43 44 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 45 if (basis != CEED_BASIS_NONE) { 46 bool is_field_tensor; 47 48 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 49 *is_all_tensor = *is_all_tensor && is_field_tensor; 50 } 51 CeedCallBackend(CeedBasisDestroy(&basis)); 52 } 53 for (CeedInt i = 0; i < num_output_fields; i++) { 54 CeedBasis basis; 55 56 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 57 if (basis != CEED_BASIS_NONE) { 58 bool is_field_tensor; 59 60 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 61 *is_all_tensor = *is_all_tensor && is_field_tensor; 62 } 63 CeedCallBackend(CeedBasisDestroy(&basis)); 64 } 65 66 // Find max_P, max_P_1d, Q, and Q_1d 67 bool is_all_3d = true; 68 69 *max_P = 0; 70 *max_P_1d = 0; 71 *Q = 0; 72 *Q_1d = 0; 73 for (CeedInt i = 0; i < num_input_fields; i++) { 74 CeedBasis basis; 75 76 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 77 if (basis != CEED_BASIS_NONE) { 78 bool is_field_tensor; 79 CeedInt field_dim = 0, field_P = 0, field_P_1d = 0, field_Q = 0, field_Q_1d = 0; 80 81 // Check if 3D 82 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 83 is_all_3d = is_all_3d && (field_dim == 3); 84 *max_dim = CeedIntMax(*max_dim, field_dim); 85 86 // Collect P, P_1d, Q, and Q_1d 87 CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P)); 88 *max_P = CeedIntMax(*max_P, field_P); 89 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 90 if (is_field_tensor) { 91 CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 92 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 93 } 94 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q)); 95 CeedCheck(*Q == 0 || field_Q == *Q, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 96 *Q = field_Q; 97 if (is_field_tensor) { 98 CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 99 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 100 *Q_1d = field_Q_1d; 101 } 102 } 103 CeedCallBackend(CeedBasisDestroy(&basis)); 104 } 105 for (CeedInt i = 0; i < num_output_fields; i++) { 106 CeedBasis basis; 107 108 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 109 if (basis != CEED_BASIS_NONE) { 110 bool is_field_tensor; 111 CeedInt field_dim = 0, field_P = 0, field_P_1d = 0, field_Q = 0, field_Q_1d = 0; 112 113 // Check if 3D 114 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 115 is_all_3d = is_all_3d && (field_dim == 3); 116 *max_dim = CeedIntMax(*max_dim, field_dim); 117 118 // Collect P, P_1d, Q, and Q_1d 119 CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P)); 120 *max_P = CeedIntMax(*max_P, field_P); 121 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 122 if (is_field_tensor) { 123 CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 124 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 125 } 126 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q)); 127 CeedCheck(*Q == 0 || field_Q == *Q, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 128 *Q = field_Q; 129 if (is_field_tensor) { 130 CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 131 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 132 *Q_1d = field_Q_1d; 133 } 134 } 135 CeedCallBackend(CeedBasisDestroy(&basis)); 136 } 137 138 // Only use 3D collocated gradient parallelization strategy when gradient is computed 139 *use_3d_slices = false; 140 if (is_all_3d && *is_all_tensor) { 141 bool was_grad_found = false; 142 143 for (CeedInt i = 0; i < num_input_fields; i++) { 144 CeedEvalMode eval_mode; 145 146 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 147 if (eval_mode == CEED_EVAL_GRAD) { 148 CeedBasis_Cuda_shared *basis_data; 149 CeedBasis basis; 150 151 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 152 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 153 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 154 was_grad_found = true; 155 CeedCallBackend(CeedBasisDestroy(&basis)); 156 } 157 } 158 for (CeedInt i = 0; i < num_output_fields; i++) { 159 CeedEvalMode eval_mode; 160 161 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 162 if (eval_mode == CEED_EVAL_GRAD) { 163 CeedBasis_Cuda_shared *basis_data; 164 CeedBasis basis; 165 166 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 167 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 168 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 169 was_grad_found = true; 170 CeedCallBackend(CeedBasisDestroy(&basis)); 171 } 172 } 173 } 174 return CEED_ERROR_SUCCESS; 175 } 176 177 //------------------------------------------------------------------------------ 178 // Setup fields 179 //------------------------------------------------------------------------------ 180 static int CeedOperatorBuildKernelFieldData_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, Tab &tab, CeedInt i, 181 CeedOperatorField op_field, CeedQFunctionField qf_field, FieldReuse_Cuda field_reuse, 182 CeedInt max_dim, CeedInt Q, CeedInt Q_1d, bool is_input, bool is_all_tensor, bool is_at_points, 183 bool use_3d_slices) { 184 bool is_tensor = true; 185 CeedBasis basis; 186 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 187 if (basis != CEED_BASIS_NONE) CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 188 189 const char *field_name; 190 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 191 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 192 std::string option_name = (is_input ? "inputs" : "outputs"); 193 CeedEvalMode eval_mode = CEED_EVAL_NONE; 194 CeedInt elem_size = 0, num_comp = 0, dim = max_dim, P_1d = 0; 195 CeedElemRestriction elem_rstr; 196 CeedBasis_Cuda_shared *basis_data; 197 198 // Field reuse info 199 bool use_previous_field = field_reuse.index != -1; 200 201 CeedCallBackend(CeedOperatorFieldGetName(op_field, &field_name)); 202 code << tab << "// -- " << (is_input ? "Input" : "Output") << " field " << i << ": " << field_name << "\n"; 203 204 // Get field data 205 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 206 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 207 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 208 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 209 } 210 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 211 if (basis != CEED_BASIS_NONE) { 212 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 213 CeedCallBackend(CeedBasisGetDimension(basis, &dim)); 214 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 215 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 216 } 217 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 218 219 // Set field constants 220 code << tab << "const CeedInt dim" << var_suffix << " = " << dim << ";\n"; 221 if (is_tensor && !is_all_tensor) { 222 CeedInt P = 0; 223 224 CeedCallBackend(CeedBasisGetNumNodes(basis, &P)); 225 code << tab << "const CeedInt P" << var_suffix << " = " << (basis == CEED_BASIS_NONE ? Q : P) << ";\n"; 226 } 227 code << tab << "const CeedInt " << P_name << " = " << (basis == CEED_BASIS_NONE ? Q_1d : P_1d) << ";\n"; 228 if (eval_mode != CEED_EVAL_WEIGHT) { 229 code << tab << "const CeedInt num_comp" << var_suffix << " = " << num_comp << ";\n"; 230 } 231 232 // Load basis data 233 code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 234 switch (eval_mode) { 235 case CEED_EVAL_NONE: 236 break; 237 case CEED_EVAL_INTERP: 238 if (is_at_points) { 239 // AtPoints 240 if (!basis_data->d_chebyshev_interp_1d) { 241 CeedSize interp_bytes; 242 CeedScalar *chebyshev_interp_1d; 243 244 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 245 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 246 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 247 CeedCallCuda(CeedBasisReturnCeed(basis), cudaMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 248 CeedCallCuda(CeedBasisReturnCeed(basis), 249 cudaMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, cudaMemcpyHostToDevice)); 250 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 251 } 252 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 253 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 254 } else { 255 // Standard quadrature 256 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 257 else data->B.outputs[i] = basis_data->d_interp_1d; 258 } 259 if (use_previous_field) { 260 std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 261 262 code << tab << "CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 263 } else { 264 bool is_collocated = false; 265 266 CeedCallBackend(CeedBasisIsCollocated(basis, &is_collocated)); 267 if (is_collocated && !is_at_points) { 268 code << tab << "CeedScalar *s_B" << var_suffix << " = NULL;\n"; 269 } else { 270 code << tab << "__shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 271 code << tab << "LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 272 } 273 } 274 break; 275 case CEED_EVAL_GRAD: 276 if (is_at_points) { 277 // AtPoints 278 if (!basis_data->d_chebyshev_interp_1d) { 279 CeedSize interp_bytes; 280 CeedScalar *chebyshev_interp_1d; 281 282 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 283 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 284 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 285 CeedCallCuda(CeedBasisReturnCeed(basis), cudaMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 286 CeedCallCuda(CeedBasisReturnCeed(basis), 287 cudaMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, cudaMemcpyHostToDevice)); 288 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 289 } 290 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 291 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 292 } else { 293 // Standard quadrature 294 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 295 else data->B.outputs[i] = basis_data->d_interp_1d; 296 } 297 if (is_tensor) { 298 if (use_previous_field) { 299 std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 300 301 code << tab << "CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 302 } else { 303 bool is_collocated = false; 304 305 CeedCallBackend(CeedBasisIsCollocated(basis, &is_collocated)); 306 if (is_collocated && !is_at_points) { 307 code << tab << "CeedScalar *s_B" << var_suffix << " = NULL;\n"; 308 } else { 309 code << tab << "__shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 310 code << tab << "LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 311 } 312 } 313 } 314 if (is_at_points) break; // No G mat for AtPoints 315 if (use_3d_slices) { 316 if (is_input) data->G.inputs[i] = basis_data->d_collo_grad_1d; 317 else data->G.outputs[i] = basis_data->d_collo_grad_1d; 318 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 319 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 320 321 code << tab << "CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 322 } else { 323 code << tab << "__shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 324 code << tab << "LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 325 } 326 } else { 327 bool has_collo_grad = basis_data->d_collo_grad_1d; 328 329 if (is_input) data->G.inputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 330 else data->G.outputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 331 if (has_collo_grad) { 332 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 333 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 334 335 code << tab << "CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 336 } else { 337 code << tab << "__shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 338 code << tab << "LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 339 } 340 } else { 341 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 342 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 343 344 code << tab << "CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 345 } else { 346 code << tab << "__shared__ CeedScalar s_G" << var_suffix << "[" << P_name << "*" << Q_name << (is_tensor ? "" : "*dim") 347 << (is_tensor ? "" : var_suffix) << "];\n"; 348 code << tab << "LoadMatrix<" << P_name << ", " << Q_name << (is_tensor ? "" : "*dim") << (is_tensor ? "" : var_suffix) << ">(data, G." 349 << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 350 } 351 } 352 } 353 break; 354 case CEED_EVAL_WEIGHT: 355 break; // No action 356 // LCOV_EXCL_START 357 case CEED_EVAL_DIV: 358 case CEED_EVAL_CURL: 359 break; // TODO: Not implemented 360 // LCOV_EXCL_STOP 361 } 362 CeedCallBackend(CeedBasisDestroy(&basis)); 363 return CEED_ERROR_SUCCESS; 364 } 365 366 //------------------------------------------------------------------------------ 367 // Restriction 368 //------------------------------------------------------------------------------ 369 static int CeedOperatorBuildKernelRestriction_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, Tab &tab, CeedInt i, 370 CeedInt field_input_buffer[], CeedOperatorField op_field, CeedQFunctionField qf_field, 371 CeedInt max_dim, CeedInt Q_1d, bool is_input, bool is_all_tensor, bool is_at_points, 372 bool use_3d_slices) { 373 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 374 std::string P_name = (is_all_tensor ? "P_1d" : "P") + var_suffix; 375 CeedEvalMode eval_mode = CEED_EVAL_NONE; 376 CeedInt elem_size = 0, num_comp = 0; 377 CeedSize l_size; 378 CeedRestrictionType rstr_type = CEED_RESTRICTION_STANDARD; 379 CeedElemRestriction_Cuda *rstr_data; 380 CeedElemRestriction elem_rstr; 381 382 // Get field data 383 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 384 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 385 CeedCallBackend(CeedElemRestrictionGetType(elem_rstr, &rstr_type)); 386 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 387 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 388 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 389 } 390 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 391 392 // Restriction 393 if (is_input) { 394 // Input 395 if (field_input_buffer[i] != i) { 396 std::string buffer_name = "r_e_in_" + std::to_string(field_input_buffer[i]); 397 398 // Restriction was already done for previous input 399 code << tab << "CeedScalar *r_e" << var_suffix << " = " << buffer_name << ";\n"; 400 } else if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_3d_slices && is_at_points)) { 401 if (eval_mode == CEED_EVAL_NONE && rstr_type != CEED_RESTRICTION_POINTS) { 402 // No basis action, so r_e_in_* in also r_q_in_* and needs to be allocated 403 code << tab << "CeedScalar r_e" << var_suffix << "[num_comp" << var_suffix << "*" << P_name << "];\n"; 404 } else if (rstr_type != CEED_RESTRICTION_POINTS) { 405 // Otherwise we're using the scratch space 406 code << tab << "CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 407 } 408 switch (rstr_type) { 409 case CEED_RESTRICTION_STANDARD: { 410 CeedInt comp_stride; 411 412 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 413 code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 414 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 415 code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 416 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 417 code << tab << "ReadLVecStandard" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", " 418 << P_name << ">(data, l_size" << var_suffix << ", elem, indices.inputs[" << i << "], d" << var_suffix << ", r_e" << var_suffix 419 << ");\n"; 420 break; 421 } 422 case CEED_RESTRICTION_STRIDED: { 423 bool has_backend_strides; 424 CeedInt num_elem; 425 426 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 427 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 428 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 429 430 if (!has_backend_strides) { 431 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 432 } 433 code << tab << "const CeedInt strides" << var_suffix << "_0 = " << strides[0] << ", strides" << var_suffix << "_1 = " << strides[1] 434 << ", strides" << var_suffix << "_2 = " << strides[2] << ";\n"; 435 code << tab << "ReadLVecStrided" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", strides" 436 << var_suffix << "_0, strides" << var_suffix << "_1, strides" << var_suffix << "_2>(data, elem, d" << var_suffix << ", r_e" 437 << var_suffix << ");\n"; 438 break; 439 } 440 case CEED_RESTRICTION_POINTS: { 441 CeedInt comp_stride; 442 443 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 444 code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 445 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 446 break; 447 } 448 // LCOV_EXCL_START 449 case CEED_RESTRICTION_ORIENTED: 450 case CEED_RESTRICTION_CURL_ORIENTED: 451 break; // TODO: Not implemented 452 // LCOV_EXCL_STOP 453 } 454 } 455 } else { 456 // Output 457 switch (rstr_type) { 458 case CEED_RESTRICTION_STANDARD: { 459 CeedInt comp_stride; 460 461 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 462 code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 463 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 464 code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 465 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 466 code << tab << "WriteLVecStandard" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", " 467 << P_name << ">(data, l_size" << var_suffix << ", elem, indices.outputs[" << i << "], r_e" << var_suffix << ", d" << var_suffix 468 << ");\n"; 469 break; 470 } 471 case CEED_RESTRICTION_STRIDED: { 472 bool has_backend_strides; 473 CeedInt num_elem; 474 475 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 476 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 477 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 478 479 if (!has_backend_strides) { 480 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 481 } 482 code << tab << "const CeedInt strides" << var_suffix << "_0 = " << strides[0] << ", strides" << var_suffix << "_1 = " << strides[1] 483 << ", strides" << var_suffix << "_2 = " << strides[2] << ";\n"; 484 code << tab << "WriteLVecStrided" << (is_all_tensor ? max_dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", strides" 485 << var_suffix << "_0, strides" << var_suffix << "_1, strides" << var_suffix << "_2>(data, elem, r_e" << var_suffix << ", d" << var_suffix 486 << ");\n"; 487 break; 488 } 489 case CEED_RESTRICTION_POINTS: 490 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 491 break; 492 // LCOV_EXCL_START 493 case CEED_RESTRICTION_ORIENTED: 494 case CEED_RESTRICTION_CURL_ORIENTED: 495 break; // TODO: Not implemented 496 // LCOV_EXCL_STOP 497 } 498 } 499 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 500 return CEED_ERROR_SUCCESS; 501 } 502 503 //------------------------------------------------------------------------------ 504 // Basis 505 //------------------------------------------------------------------------------ 506 static int CeedOperatorBuildKernelBasis_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, Tab &tab, CeedInt i, 507 CeedOperatorField op_field, CeedQFunctionField qf_field, CeedInt max_dim, CeedInt Q_1d, 508 bool is_input, bool is_all_tensor, bool is_at_points, bool use_3d_slices) { 509 bool is_tensor = true, is_collocated = true; 510 CeedBasis basis; 511 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 512 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 513 CeedCallBackend(CeedBasisIsCollocated(basis, &is_collocated)); 514 515 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 516 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 517 CeedEvalMode eval_mode = CEED_EVAL_NONE; 518 CeedInt dim = max_dim, elem_size = 0, num_comp = 0, P_1d = 0; 519 CeedElemRestriction elem_rstr; 520 521 // Get field data 522 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 523 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 524 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 525 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 526 } 527 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 528 if (basis != CEED_BASIS_NONE) { 529 CeedCallBackend(CeedBasisGetDimension(basis, &dim)); 530 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 531 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 532 } 533 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 534 535 // Basis 536 code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 537 if (is_input) { 538 switch (eval_mode) { 539 case CEED_EVAL_NONE: 540 if (!use_3d_slices && !is_at_points) { 541 code << tab << "CeedScalar *r_q" << var_suffix << " = r_e" << var_suffix << ";\n"; 542 } 543 break; 544 case CEED_EVAL_INTERP: 545 if (is_at_points) { 546 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 547 548 code << tab << "CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 549 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 550 << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n"; 551 } else { 552 std::string function_name = is_tensor ? ((dim == 1 ? "Interp" : "InterpTensor") + std::string(is_collocated ? "CollocatedNodes" : "") + 553 std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened")) 554 : "InterpNonTensor"; 555 std::string op_t_1d_name = (is_all_tensor || !is_tensor) ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name); 556 557 code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_all_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 558 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_e" 559 << var_suffix << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n"; 560 } 561 break; 562 case CEED_EVAL_GRAD: 563 if (is_at_points) { 564 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 565 566 code << tab << "CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 567 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 568 << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n"; 569 } else if (use_3d_slices) { 570 std::string function_name = 571 (dim > 1 ? "InterpTensor" : "Interp") + std::string(is_collocated ? "CollocatedNodes" : "") + std::to_string(dim) + "d"; 572 573 code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 574 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 575 << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n"; 576 } else if (is_tensor) { 577 bool is_collocated_grad = dim == 3 && Q_1d >= P_1d; 578 std::string function_name = 579 (dim == 1 ? "Grad" 580 : ("GradTensor" + std::string(is_collocated ? "CollocatedNodes" : (is_collocated_grad ? "Collocated" : ""))) + 581 std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened")); 582 std::string op_t_1d_name = is_all_tensor ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name); 583 584 code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "*" 585 << (is_all_tensor && dim >= 3 ? Q_name : "1") << "];\n"; 586 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_e" 587 << var_suffix << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 588 } else { 589 std::string function_name = "GradNonTensor"; 590 591 code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n"; 592 code << tab << function_name << "<num_comp" << var_suffix << ", dim" << var_suffix << ", " << P_name << ", " << Q_name 593 << ", OP_T_1D>(data, r_e" << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 594 } 595 break; 596 case CEED_EVAL_WEIGHT: { 597 if (is_at_points) { 598 code << tab << "// Nothing to do AtPoints\n"; 599 } else { 600 CeedBasis_Cuda_shared *basis_data; 601 std::string function_name = is_tensor 602 ? ((dim == 1 ? "Weight" : "WeightTensor") + std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened")) 603 : "WeightNonTensor"; 604 605 code << tab << "CeedScalar r_q" << var_suffix << "[" << (is_all_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 606 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 607 data->W = basis_data->d_q_weight_1d; 608 code << tab << function_name << "<" << P_name << ", " << Q_name << ">(data, W, r_q" << var_suffix << ");\n"; 609 } 610 break; 611 } 612 // LCOV_EXCL_START 613 case CEED_EVAL_DIV: 614 case CEED_EVAL_CURL: 615 break; // TODO: Not implemented 616 // LCOV_EXCL_STOP 617 } 618 } else { 619 switch (eval_mode) { 620 case CEED_EVAL_NONE: 621 code << tab << "CeedScalar *r_e" << var_suffix << " = r_q" << var_suffix << ";\n"; 622 break; // No action 623 case CEED_EVAL_INTERP: 624 code << tab << "CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 625 if (is_at_points) { 626 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 627 628 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix 629 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 630 } else { 631 std::string function_name = 632 is_tensor ? ((dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::string(is_collocated ? "CollocatedNodes" : "") + 633 std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened")) 634 : "InterpTransposeNonTensor"; 635 std::string op_t_1d_name = (is_all_tensor || !is_tensor) ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name); 636 637 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_q" 638 << var_suffix << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 639 } 640 break; 641 case CEED_EVAL_GRAD: 642 code << tab << "CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 643 if (is_at_points) { 644 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 645 646 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix 647 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 648 } else if (use_3d_slices) { 649 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::string(is_collocated ? "CollocatedNodes" : "") + 650 std::to_string(dim) + "d"; 651 652 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" << var_suffix 653 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 654 } else if (is_tensor) { 655 bool is_collocated_grad = dim == 3 && Q_1d >= P_1d; 656 std::string function_name = 657 (dim == 1 ? "GradTranspose" 658 : ("GradTransposeTensor" + std::string(is_collocated ? "CollocatedNodes" : (is_collocated_grad ? "Collocated" : "")))) + 659 std::to_string(dim) + "d" + (is_all_tensor ? "" : "Flattened"); 660 std::string op_t_1d_name = is_all_tensor ? "OP_T_1D" : (P_1d > Q_1d ? P_name : Q_name); 661 662 code << tab << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", " << op_t_1d_name << ">(data, r_q" 663 << var_suffix << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 664 } else { 665 std::string function_name = "GradTransposeNonTensor"; 666 667 code << tab << function_name << "<num_comp" << var_suffix << ", dim" << var_suffix << ", " << P_name << ", " << Q_name 668 << ", OP_T_1D>(data, r_q" << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 669 } 670 break; 671 // LCOV_EXCL_START 672 case CEED_EVAL_WEIGHT: 673 break; // Should not occur 674 case CEED_EVAL_DIV: 675 case CEED_EVAL_CURL: 676 break; // TODO: Not implemented 677 // LCOV_EXCL_STOP 678 } 679 } 680 CeedCallBackend(CeedBasisDestroy(&basis)); 681 return CEED_ERROR_SUCCESS; 682 } 683 684 //------------------------------------------------------------------------------ 685 // QFunction 686 //------------------------------------------------------------------------------ 687 static int CeedOperatorBuildKernelQFunction_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, Tab &tab, CeedInt max_dim, 688 CeedInt max_num_points, CeedInt num_input_fields, CeedOperatorField *op_input_fields, 689 CeedQFunctionField *qf_input_fields, CeedInt num_output_fields, 690 CeedOperatorField *op_output_fields, CeedQFunctionField *qf_output_fields, 691 std::string qfunction_name, CeedInt Q_1d, bool is_all_tensor, bool is_at_points, 692 bool use_3d_slices) { 693 std::string Q_name = is_all_tensor ? "Q_1d" : "Q"; 694 CeedEvalMode eval_mode = CEED_EVAL_NONE; 695 CeedElemRestriction elem_rstr; 696 697 // Setup output arrays 698 code << "\n"; 699 code << tab << "// -- Output field setup\n"; 700 for (CeedInt i = 0; i < num_output_fields; i++) { 701 const char *field_name; 702 std::string var_suffix = "_out_" + std::to_string(i); 703 704 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 705 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 706 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 707 switch (eval_mode) { 708 case CEED_EVAL_NONE: 709 if (is_at_points) { 710 code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "];\n"; 711 } else { 712 code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_all_tensor && (max_dim >= 3) ? Q_name : "1") 713 << "];\n"; 714 } 715 break; 716 case CEED_EVAL_INTERP: 717 if (is_at_points) { 718 // Accumulator for point data 719 code << tab << "CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "];\n"; 720 code << tab << "for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "; i++) r_c" << var_suffix 721 << "[i] = 0.0;\n"; 722 } else { 723 code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_all_tensor && (max_dim >= 3) ? Q_name : "1") 724 << "];\n"; 725 } 726 break; 727 case CEED_EVAL_GRAD: 728 if (is_at_points) { 729 // Accumulator for point data 730 code << tab << "CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "];\n"; 731 code << tab << "for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (max_dim >= 3 ? Q_name : "1") << "; i++) r_c" << var_suffix 732 << "[i] = 0.0;\n"; 733 } else if (use_3d_slices) { 734 // Accumulator for gradient slices 735 code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 736 code << tab << "for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << Q_name << "; i++) r_q" << var_suffix << "[i] = 0.0;\n"; 737 } else { 738 code << tab << "CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "*" 739 << (is_all_tensor && (max_dim >= 3) ? Q_name : "1") << "];\n"; 740 } 741 break; 742 case CEED_EVAL_WEIGHT: 743 break; 744 // LCOV_EXCL_START 745 case CEED_EVAL_DIV: 746 case CEED_EVAL_CURL: 747 break; // TODO: Not implemented 748 // LCOV_EXCL_STOP 749 } 750 } 751 752 if (is_at_points) { 753 // We need to handle batches of points 754 code << "\n"; 755 code << tab << "// Note: Using batches of points\n"; 756 code << tab << "const CeedInt point_loop_bound = (blockDim.x*blockDim.y) * ceil((1.0*max_num_points) / (blockDim.x*blockDim.y));\n\n"; 757 code << tab << "#pragma unroll\n"; 758 code << tab << "for (CeedInt i = threadIdx.x + threadIdx.y*blockDim.x; i < point_loop_bound; i += blockDim.x*blockDim.y) {\n"; 759 tab.push(); 760 code << tab << "const CeedInt p = i % max_num_points;\n\n"; 761 762 code << tab << "// -- Coordinates\n"; 763 code << tab << "CeedScalar r_x[max_dim];\n"; 764 code << tab << "ReadPoint<max_dim, coords_comp_stride, max_num_points>(data, elem, p, max_num_points, points.indices, points.coords, r_x);\n\n"; 765 766 code << tab << "// -- Input fields\n"; 767 for (CeedInt i = 0; i < num_input_fields; i++) { 768 const char *field_name; 769 std::string var_suffix = "_in_" + std::to_string(i); 770 std::string P_name = "P_1d" + var_suffix; 771 772 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 773 code << tab << "// ---- Input field " << i << ": " << field_name << "\n"; 774 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 775 // Basis action 776 code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 777 switch (eval_mode) { 778 case CEED_EVAL_NONE: 779 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 780 code << tab << "ReadPoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 781 << ", max_num_points>(data, elem, p, max_num_points, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 782 break; 783 case CEED_EVAL_INTERP: 784 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 785 code << tab << "InterpAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name 786 << ">(data, i, r_c" << var_suffix << ", r_x, r_s" << var_suffix << ");\n"; 787 break; 788 case CEED_EVAL_GRAD: 789 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n"; 790 code << tab << "GradAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name 791 << ">(data, i, r_c" << var_suffix << ", r_x, r_s" << var_suffix << ");\n"; 792 break; 793 case CEED_EVAL_WEIGHT: 794 code << tab << "CeedScalar r_s" << var_suffix << "[1];\n"; 795 code << tab << "r_s" << var_suffix << "[0] = 1.0;\n"; 796 break; 797 // LCOV_EXCL_START 798 case CEED_EVAL_DIV: 799 case CEED_EVAL_CURL: 800 break; // TODO: Not implemented 801 // LCOV_EXCL_STOP 802 } 803 } 804 code << "\n"; 805 code << tab << "// -- Output fields\n"; 806 for (CeedInt i = 0; i < num_output_fields; i++) { 807 const char *field_name; 808 std::string var_suffix = "_out_" + std::to_string(i); 809 810 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 811 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 812 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 813 // Basis action 814 switch (eval_mode) { 815 case CEED_EVAL_NONE: 816 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 817 break; 818 case CEED_EVAL_INTERP: 819 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 820 break; 821 case CEED_EVAL_GRAD: 822 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n"; 823 break; 824 // LCOV_EXCL_START 825 case CEED_EVAL_WEIGHT: 826 break; // Should not occur 827 case CEED_EVAL_DIV: 828 case CEED_EVAL_CURL: 829 break; // TODO: Not implemented 830 // LCOV_EXCL_STOP 831 } 832 } 833 834 } else if (use_3d_slices) { 835 // We treat quadrature points per slice in 3d to save registers 836 code << "\n"; 837 code << tab << "// Note: Using planes of 3D elements\n"; 838 code << tab << "#pragma unroll\n"; 839 code << tab << "for (CeedInt q = 0; q < " << Q_name << "; q++) {\n"; 840 tab.push(); 841 code << tab << "// -- Input fields\n"; 842 for (CeedInt i = 0; i < num_input_fields; i++) { 843 const char *field_name; 844 std::string var_suffix = "_in_" + std::to_string(i); 845 846 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 847 code << tab << "// ---- Input field " << i << ": " << field_name << "\n"; 848 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 849 // Basis action 850 code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 851 switch (eval_mode) { 852 case CEED_EVAL_NONE: 853 bool is_strided; 854 855 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 856 857 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 858 CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided)); 859 if (is_strided) { 860 bool has_backend_strides; 861 CeedInt num_elem, elem_size; 862 863 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 864 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 865 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 866 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 867 868 if (!has_backend_strides) { 869 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 870 } 871 code << tab << "const CeedInt strides" << var_suffix << "_0 = " << strides[0] << ", strides" << var_suffix << "_1 = " << strides[1] 872 << ", strides" << var_suffix << "_2 = " << strides[2] << ";\n"; 873 code << tab << "ReadEVecSliceStrided3d<num_comp" << var_suffix << ", " << Q_name << ", strides" << var_suffix << "_0, strides" 874 << var_suffix << "_1, strides" << var_suffix << "_2>(data, elem, q, d" << var_suffix << ", r_s" << var_suffix << ");\n"; 875 } else { 876 CeedSize l_size = 0; 877 CeedInt comp_stride; 878 CeedElemRestriction_Cuda *rstr_data; 879 880 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 881 code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 882 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 883 code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 884 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 885 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 886 code << tab << "ReadEVecSliceStandard3d<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", " << Q_name << ">(data, l_size" 887 << var_suffix << ", elem, q, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 888 } 889 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 890 break; 891 case CEED_EVAL_INTERP: 892 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 893 code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) {\n"; 894 tab.push(); 895 code << tab << "r_s" << var_suffix << "[j] = r_q" << var_suffix << "[q + j*" << Q_name << "];\n"; 896 tab.pop(); 897 code << tab << "}\n"; 898 break; 899 case CEED_EVAL_GRAD: 900 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n"; 901 code << tab << "GradColloSlice3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_q" << var_suffix << ", s_G" 902 << var_suffix << ", r_s" << var_suffix << ");\n"; 903 break; 904 case CEED_EVAL_WEIGHT: 905 code << tab << "CeedScalar r_s" << var_suffix << "[1];\n"; 906 code << tab << "r_s" << var_suffix << "[0] = r_q" << var_suffix << "[q];\n"; 907 break; 908 // LCOV_EXCL_START 909 case CEED_EVAL_DIV: 910 case CEED_EVAL_CURL: 911 break; // TODO: Not implemented 912 // LCOV_EXCL_STOP 913 } 914 } 915 code << "\n"; 916 code << tab << "// -- Output fields\n"; 917 for (CeedInt i = 0; i < num_output_fields; i++) { 918 const char *field_name; 919 std::string var_suffix = "_out_" + std::to_string(i); 920 921 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 922 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 923 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 924 // Basis action 925 switch (eval_mode) { 926 case CEED_EVAL_NONE: 927 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 928 break; 929 case CEED_EVAL_INTERP: 930 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 931 break; 932 case CEED_EVAL_GRAD: 933 code << tab << "CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim" << var_suffix << "];\n"; 934 break; 935 // LCOV_EXCL_START 936 case CEED_EVAL_WEIGHT: 937 break; // Should not occur 938 case CEED_EVAL_DIV: 939 case CEED_EVAL_CURL: 940 break; // TODO: Not implemented 941 // LCOV_EXCL_STOP 942 } 943 } 944 } else { 945 code << "\n"; 946 code << tab << "// Note: Using full elements\n"; 947 code << tab << "{\n"; 948 tab.push(); 949 code << tab << "// -- Input fields\n"; 950 for (CeedInt i = 0; i < num_input_fields; i++) { 951 const char *field_name; 952 953 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 954 code << tab << "// ---- Input field " << i << ": " << field_name << "\n"; 955 code << tab << "CeedScalar *r_s_in_" << i << " = r_q_in_" << i << ";\n"; 956 } 957 code << tab << "// -- Output fields\n"; 958 for (CeedInt i = 0; i < num_output_fields; i++) { 959 const char *field_name; 960 961 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 962 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 963 code << tab << "CeedScalar *r_s_out_" << i << " = r_q_out_" << i << ";\n"; 964 } 965 } 966 967 // Input and output buffers 968 code << "\n"; 969 code << tab << "// -- QFunction inputs and outputs\n"; 970 code << tab << "// ---- Inputs\n"; 971 code << tab << "CeedScalar *inputs[" << CeedIntMax(num_input_fields, 1) << "];\n"; 972 for (CeedInt i = 0; i < num_input_fields; i++) { 973 const char *field_name; 974 975 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 976 code << tab << "// ------ Input field " << i << ": " << field_name << "\n"; 977 code << tab << "inputs[" << i << "] = r_s_in_" << i << ";\n"; 978 } 979 code << tab << "// ---- Outputs\n"; 980 code << tab << "CeedScalar *outputs[" << CeedIntMax(num_output_fields, 1) << "];\n"; 981 for (CeedInt i = 0; i < num_output_fields; i++) { 982 const char *field_name; 983 984 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 985 code << tab << "// ------ Output field " << i << ": " << field_name << "\n"; 986 code << tab << "outputs[" << i << "] = r_s_out_" << i << ";\n"; 987 } 988 989 // Apply QFunction 990 code << "\n"; 991 code << tab << "// -- Apply QFunction\n"; 992 code << tab << "" << qfunction_name << "(ctx, "; 993 if (max_dim != 3 || is_at_points || use_3d_slices || !is_all_tensor) { 994 code << "1"; 995 } else { 996 code << Q_name; 997 } 998 code << ", inputs, outputs);\n"; 999 1000 if (is_at_points) { 1001 // Map back to coefficients 1002 code << "\n"; 1003 code << tab << "// -- Output fields\n"; 1004 for (CeedInt i = 0; i < num_output_fields; i++) { 1005 const char *field_name; 1006 std::string var_suffix = "_out_" + std::to_string(i); 1007 std::string P_name = "P_1d" + var_suffix; 1008 1009 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 1010 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 1011 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 1012 // Basis action 1013 code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 1014 switch (eval_mode) { 1015 case CEED_EVAL_NONE: { 1016 CeedInt comp_stride; 1017 CeedElemRestriction elem_rstr; 1018 1019 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 1020 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 1021 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1022 code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 1023 code << tab << "WritePoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 1024 << ", max_num_points>(data, elem, i, points.num_per_elem[elem], indices.outputs[" << i << "]" 1025 << ", r_s" << var_suffix << ", d" << var_suffix << ");\n"; 1026 break; 1027 } 1028 case CEED_EVAL_INTERP: 1029 code << tab << "if (i >= points.num_per_elem[elem]) {\n"; 1030 tab.push(); 1031 code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 1032 tab.pop(); 1033 code << tab << "}\n"; 1034 code << tab << "InterpTransposeAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name 1035 << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 1036 break; 1037 case CEED_EVAL_GRAD: 1038 code << tab << "if (i >= points.num_per_elem[elem]) {\n"; 1039 tab.push(); 1040 code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << "*dim" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 1041 tab.pop(); 1042 code << tab << "}\n"; 1043 code << tab << "GradTransposeAtPoints" << max_dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name 1044 << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 1045 break; 1046 // LCOV_EXCL_START 1047 case CEED_EVAL_WEIGHT: 1048 break; // Should not occur 1049 case CEED_EVAL_DIV: 1050 case CEED_EVAL_CURL: 1051 break; // TODO: Not implemented 1052 // LCOV_EXCL_STOP 1053 } 1054 } 1055 } else if (use_3d_slices) { 1056 // Copy or apply transpose grad, if needed 1057 code << "\n"; 1058 code << tab << "// -- Output fields\n"; 1059 for (CeedInt i = 0; i < num_output_fields; i++) { 1060 const char *field_name; 1061 std::string var_suffix = "_out_" + std::to_string(i); 1062 std::string P_name = "P_1d" + var_suffix; 1063 1064 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 1065 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 1066 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 1067 // Basis action 1068 code << tab << "// EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 1069 switch (eval_mode) { 1070 case CEED_EVAL_NONE: 1071 code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 1072 tab.push(); 1073 code << tab << "r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 1074 tab.pop(); 1075 code << tab << "}\n"; 1076 break; 1077 case CEED_EVAL_INTERP: 1078 code << tab << "for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 1079 tab.push(); 1080 code << tab << "r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 1081 tab.pop(); 1082 code << tab << "}\n"; 1083 break; 1084 case CEED_EVAL_GRAD: 1085 code << tab << "GradColloSliceTranspose3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_s" << var_suffix << ", s_G" 1086 << var_suffix << ", r_q" << var_suffix << ");\n"; 1087 break; 1088 // LCOV_EXCL_START 1089 case CEED_EVAL_WEIGHT: 1090 break; // Should not occur 1091 case CEED_EVAL_DIV: 1092 case CEED_EVAL_CURL: 1093 break; // TODO: Not implemented 1094 // LCOV_EXCL_STOP 1095 } 1096 } 1097 } 1098 tab.pop(); 1099 code << tab << "}\n"; 1100 return CEED_ERROR_SUCCESS; 1101 } 1102 1103 //------------------------------------------------------------------------------ 1104 // Build single operator kernel 1105 //------------------------------------------------------------------------------ 1106 extern "C" int CeedOperatorBuildKernel_Cuda_gen(CeedOperator op, bool *is_good_build) { 1107 bool is_all_tensor = true, is_all_nontensor = true, is_at_points = false, use_3d_slices = false; 1108 Ceed ceed; 1109 CeedInt Q = 0, Q_1d = 0, num_input_fields, num_output_fields, max_dim = 1, max_num_points = 0, coords_comp_stride = 0; 1110 CeedQFunctionField *qf_input_fields, *qf_output_fields; 1111 CeedQFunction_Cuda_gen *qf_data; 1112 CeedQFunction qf; 1113 CeedOperatorField *op_input_fields, *op_output_fields; 1114 CeedOperator_Cuda_gen *data; 1115 std::ostringstream code; 1116 Tab tab; 1117 1118 CeedCallBackend(CeedOperatorGetData(op, &data)); 1119 { 1120 bool is_setup_done; 1121 1122 CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done)); 1123 if (is_setup_done) { 1124 *is_good_build = !data->use_fallback; 1125 return CEED_ERROR_SUCCESS; 1126 } 1127 } 1128 1129 // Check field compatibility 1130 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 1131 { 1132 bool has_shared_bases = true; 1133 1134 for (CeedInt i = 0; i < num_input_fields; i++) { 1135 CeedBasis basis; 1136 1137 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 1138 if (basis != CEED_BASIS_NONE) { 1139 bool is_tensor = true; 1140 const char *resource; 1141 char *resource_root; 1142 Ceed basis_ceed; 1143 1144 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1145 is_all_tensor = is_all_tensor && is_tensor; 1146 is_all_nontensor = is_all_nontensor && !is_tensor; 1147 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1148 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1149 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1150 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared"); 1151 CeedCallBackend(CeedFree(&resource_root)); 1152 CeedCallBackend(CeedDestroy(&basis_ceed)); 1153 } 1154 CeedCallBackend(CeedBasisDestroy(&basis)); 1155 } 1156 1157 for (CeedInt i = 0; i < num_output_fields; i++) { 1158 CeedBasis basis; 1159 1160 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 1161 if (basis != CEED_BASIS_NONE) { 1162 bool is_tensor = true; 1163 const char *resource; 1164 char *resource_root; 1165 Ceed basis_ceed; 1166 1167 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1168 is_all_tensor = is_all_tensor && is_tensor; 1169 is_all_nontensor = is_all_nontensor && !is_tensor; 1170 1171 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1172 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1173 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1174 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared"); 1175 CeedCallBackend(CeedFree(&resource_root)); 1176 CeedCallBackend(CeedDestroy(&basis_ceed)); 1177 } 1178 CeedCallBackend(CeedBasisDestroy(&basis)); 1179 } 1180 // -- Fallback to ref if not all bases are shared 1181 if (!has_shared_bases) { 1182 *is_good_build = false; 1183 return CEED_ERROR_SUCCESS; 1184 } 1185 } 1186 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1187 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 1188 CeedCallBackend(CeedQFunctionGetData(qf, &qf_data)); 1189 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 1190 1191 // Get operator data 1192 CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points)); 1193 { 1194 CeedInt max_P = 0, max_P_1d = 0; 1195 1196 CeedCallBackend(CeedOperatorBuildKernelData_Cuda_gen(ceed, num_input_fields, op_input_fields, qf_input_fields, num_output_fields, 1197 op_output_fields, qf_output_fields, &max_P, &max_P_1d, &Q, &Q_1d, &max_dim, &is_all_tensor, 1198 &use_3d_slices)); 1199 data->max_P_1d = is_all_tensor ? max_P_1d : max_P; 1200 } 1201 if (max_dim == 0) max_dim = 1; 1202 data->dim = max_dim; 1203 if (is_at_points) { 1204 CeedElemRestriction_Cuda *rstr_data; 1205 CeedElemRestriction rstr_points = NULL; 1206 1207 CeedCallBackend(CeedOperatorAtPointsGetPoints(op, &rstr_points, NULL)); 1208 CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr_points, &max_num_points)); 1209 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr_points, &coords_comp_stride)); 1210 CeedCallBackend(CeedElemRestrictionGetData(rstr_points, &rstr_data)); 1211 data->points.indices = (CeedInt *)rstr_data->d_offsets; 1212 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_points)); 1213 } 1214 if (is_at_points) use_3d_slices = false; 1215 if (Q_1d == 0) { 1216 if (is_at_points) Q_1d = max_num_points; 1217 else CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q_1d)); 1218 } 1219 if (Q == 0) Q = Q_1d; 1220 data->Q = Q; 1221 data->Q_1d = Q_1d; 1222 1223 // Check for restriction only identity operator 1224 { 1225 bool is_identity_qf; 1226 1227 CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf)); 1228 if (is_identity_qf) { 1229 CeedEvalMode eval_mode_in, eval_mode_out; 1230 1231 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in)); 1232 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out)); 1233 CeedCheck(eval_mode_in != CEED_EVAL_NONE || eval_mode_out != CEED_EVAL_NONE, ceed, CEED_ERROR_BACKEND, 1234 "Backend does not implement restriction only identity operators"); 1235 } 1236 } 1237 1238 // Add atomicAdd function for old NVidia architectures 1239 { 1240 Ceed_Cuda *ceed_data; 1241 struct cudaDeviceProp prop; 1242 1243 CeedCallBackend(CeedGetData(ceed, &ceed_data)); 1244 CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id)); 1245 if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) { 1246 code << tab << "// AtomicAdd fallback source\n"; 1247 code << tab << "#include <ceed/jit-source/cuda/cuda-atomic-add-fallback.h>\n\n"; 1248 } 1249 } 1250 1251 // Load basis source files 1252 if (!is_all_nontensor) { 1253 code << tab << "// Tensor basis source\n"; 1254 code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h>\n\n"; 1255 } 1256 if (!is_all_tensor) { 1257 code << tab << "// Non-tensor basis source\n"; 1258 code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-nontensor-templates.h>\n\n"; 1259 } 1260 if (!is_all_tensor && !is_all_nontensor) { 1261 code << "// Tensor basis source\n"; 1262 code << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-flattened-templates.h>\n\n"; 1263 } 1264 if (is_at_points) { 1265 code << "// AtPoints basis source\n"; 1266 code << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-at-points-templates.h>\n\n"; 1267 } 1268 code << "// CodeGen operator source\n"; 1269 code << "#include <ceed/jit-source/cuda/cuda-gen-templates.h>\n\n"; 1270 1271 // Get QFunction name 1272 std::string qfunction_name(qf_data->qfunction_name); 1273 std::string operator_name; 1274 1275 operator_name = "CeedKernelCudaGenOperator_" + qfunction_name; 1276 1277 // Define CEED_Q_VLA 1278 code << "\n" << tab << "#undef CEED_Q_VLA\n"; 1279 if (max_dim != 3 || is_at_points || use_3d_slices || !is_all_tensor) { 1280 code << tab << "#define CEED_Q_VLA 1\n\n"; 1281 } else { 1282 code << tab << "#define CEED_Q_VLA " << Q_1d << "\n\n"; 1283 } 1284 1285 // Add user QFunction source 1286 { 1287 const char *source_path; 1288 1289 CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path)); 1290 CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/cuda/gen backend requires QFunction source code file"); 1291 1292 code << tab << "// User QFunction source\n"; 1293 code << tab << "#include \"" << source_path << "\"\n\n"; 1294 } 1295 1296 // Setup 1297 code << "\n" << tab << "// -----------------------------------------------------------------------------\n"; 1298 code << tab << "// Operator Kernel\n"; 1299 code << tab << "// \n"; 1300 code << tab << "// d_[in,out]_i: CeedVector device array\n"; 1301 code << tab << "// r_[in,out]_e_i: Element vector register\n"; 1302 code << tab << "// r_[in,out]_q_i: Quadrature space vector register\n"; 1303 code << tab << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n"; 1304 code << tab << "// r_[in,out]_s_i: Quadrature space slice vector register\n"; 1305 code << tab << "// \n"; 1306 code << tab << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n"; 1307 code << tab << "// s_G_[in,out]_i: Gradient matrix, shared memory\n"; 1308 code << tab << "// -----------------------------------------------------------------------------\n"; 1309 code << tab << "extern \"C\" __global__ void " << operator_name 1310 << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar *W, Points_Cuda " 1311 "points) {\n"; 1312 tab.push(); 1313 1314 // Scratch buffers 1315 for (CeedInt i = 0; i < num_input_fields; i++) { 1316 CeedEvalMode eval_mode; 1317 1318 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 1319 if (eval_mode != CEED_EVAL_WEIGHT) { // Skip CEED_EVAL_WEIGHT 1320 code << tab << "const CeedScalar *__restrict__ d_in_" << i << " = fields.inputs[" << i << "];\n"; 1321 } 1322 } 1323 for (CeedInt i = 0; i < num_output_fields; i++) { 1324 code << tab << "CeedScalar *__restrict__ d_out_" << i << " = fields.outputs[" << i << "];\n"; 1325 } 1326 1327 code << tab << "const CeedInt max_dim = " << max_dim << ";\n"; 1328 if (!is_all_tensor) { 1329 code << tab << "const CeedInt Q = " << Q << ";\n"; 1330 } 1331 if (!is_all_nontensor) { 1332 code << tab << "const CeedInt Q_1d = " << Q_1d << ";\n"; 1333 } 1334 if (is_at_points) { 1335 code << tab << "const CeedInt max_num_points = " << max_num_points << ";\n"; 1336 code << tab << "const CeedInt coords_comp_stride = " << coords_comp_stride << ";\n"; 1337 } 1338 1339 // Shared data 1340 code << tab << "extern __shared__ CeedScalar slice[];\n"; 1341 code << tab << "SharedData_Cuda data;\n"; 1342 code << tab << "data.t_id_x = threadIdx.x;\n"; 1343 code << tab << "data.t_id_y = threadIdx.y;\n"; 1344 code << tab << "data.t_id_z = threadIdx.z;\n"; 1345 code << tab << "data.t_id = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n"; 1346 code << tab << "data.slice = slice + data.t_id_z*OP_T_1D" << ((!is_all_tensor || max_dim == 1) ? "" : "*OP_T_1D") << ";\n"; 1347 1348 // -- Determine input mat reuse 1349 FieldReuse_Cuda input_matrix_reuse[CEED_FIELD_MAX]; 1350 1351 for (CeedInt i = 0; i < num_input_fields; i++) { 1352 input_matrix_reuse[i].index = -1; 1353 } 1354 for (CeedInt i = 0; i < num_input_fields; i++) { 1355 bool is_tensor = true; 1356 CeedEvalMode eval_mode_i; 1357 CeedBasis basis_i; 1358 1359 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i)); 1360 if (eval_mode_i == CEED_EVAL_WEIGHT) continue; 1361 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i)); 1362 CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor)); 1363 for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) { 1364 CeedEvalMode eval_mode_j; 1365 CeedBasis basis_j; 1366 1367 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1368 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1369 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1370 if (basis_i == basis_j) { 1371 if (is_tensor) { 1372 input_matrix_reuse[i].index = j; 1373 input_matrix_reuse[i].is_input = true; 1374 input_matrix_reuse[i].eval_mode = eval_mode_j; 1375 } else { 1376 // For non-tensor can only re-use with the same eval mode 1377 if (eval_mode_i == eval_mode_j) { 1378 input_matrix_reuse[i].index = j; 1379 input_matrix_reuse[i].is_input = true; 1380 input_matrix_reuse[i].eval_mode = eval_mode_j; 1381 } 1382 } 1383 } 1384 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1385 } 1386 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1387 } 1388 1389 // -- Determine output mat reuse 1390 FieldReuse_Cuda output_matrix_reuse[CEED_FIELD_MAX]; 1391 1392 for (CeedInt i = 0; i < num_output_fields; i++) { 1393 output_matrix_reuse[i].index = -1; 1394 } 1395 for (CeedInt i = 0; i < num_output_fields; i++) { 1396 bool is_tensor = true; 1397 CeedEvalMode eval_mode_i; 1398 CeedBasis basis_i; 1399 1400 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i)); 1401 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i)); 1402 CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor)); 1403 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) { 1404 CeedEvalMode eval_mode_j; 1405 CeedBasis basis_j; 1406 1407 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1408 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1409 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1410 if (basis_i == basis_j) { 1411 if (is_tensor) { 1412 output_matrix_reuse[i].index = j; 1413 output_matrix_reuse[i].is_input = true; 1414 output_matrix_reuse[i].eval_mode = eval_mode_j; 1415 } else { 1416 // For non-tensor can only re-use with the same eval mode 1417 if (eval_mode_i == eval_mode_j) { 1418 output_matrix_reuse[i].index = j; 1419 output_matrix_reuse[i].is_input = true; 1420 output_matrix_reuse[i].eval_mode = eval_mode_j; 1421 } 1422 } 1423 } 1424 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1425 } 1426 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) { 1427 CeedEvalMode eval_mode_j; 1428 CeedBasis basis_j; 1429 1430 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j)); 1431 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1432 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j)); 1433 if (basis_i == basis_j) { 1434 if (is_tensor) { 1435 output_matrix_reuse[i].index = j; 1436 output_matrix_reuse[i].is_input = false; 1437 output_matrix_reuse[i].eval_mode = eval_mode_j; 1438 } else { 1439 // For non-tensor can only re-use with the same eval mode 1440 if (eval_mode_i == eval_mode_j) { 1441 output_matrix_reuse[i].index = j; 1442 output_matrix_reuse[i].is_input = false; 1443 output_matrix_reuse[i].eval_mode = eval_mode_j; 1444 } 1445 } 1446 } 1447 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1448 } 1449 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1450 } 1451 1452 // Initialize constants, and matrices B and G 1453 code << "\n" << tab << "// Input field constants and basis data\n"; 1454 for (CeedInt i = 0; i < num_input_fields; i++) { 1455 CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i], 1456 max_dim, Q, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices)); 1457 } 1458 code << "\n" << tab << "// Output field constants and basis data\n"; 1459 for (CeedInt i = 0; i < num_output_fields; i++) { 1460 CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i], 1461 max_dim, Q, Q_1d, false, is_all_tensor, is_at_points, use_3d_slices)); 1462 } 1463 1464 // Loop over all elements 1465 code << "\n" << tab << "// Element loop\n"; 1466 code << tab << "__syncthreads();\n"; 1467 code << tab << "for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n"; 1468 tab.push(); 1469 1470 // -- Compute minimum buffer space needed 1471 CeedInt max_rstr_buffer_size = 1; 1472 1473 for (CeedInt i = 0; i < num_input_fields; i++) { 1474 CeedEvalMode eval_mode; 1475 1476 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 1477 if (eval_mode != CEED_EVAL_NONE && eval_mode != CEED_EVAL_WEIGHT) { 1478 CeedInt num_comp; 1479 CeedElemRestriction elem_rstr; 1480 1481 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 1482 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1483 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1)); 1484 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1485 } 1486 } 1487 for (CeedInt i = 0; i < num_output_fields; i++) { 1488 CeedEvalMode eval_mode; 1489 1490 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 1491 if (eval_mode != CEED_EVAL_NONE) { 1492 CeedInt num_comp; 1493 CeedElemRestriction elem_rstr; 1494 1495 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 1496 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1497 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1)); 1498 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1499 } 1500 } 1501 code << tab << "// Scratch restriction buffer space\n"; 1502 code << tab << "CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n"; 1503 1504 // -- Determine best input field processing order 1505 CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX]; 1506 1507 for (CeedInt i = 0; i < num_input_fields; i++) { 1508 field_rstr_in_buffer[i] = -1; 1509 input_field_order[i] = -1; 1510 } 1511 { 1512 bool is_ordered[CEED_FIELD_MAX]; 1513 CeedInt curr_index = 0; 1514 1515 for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false; 1516 for (CeedInt i = 0; i < num_input_fields; i++) { 1517 CeedVector vec_i; 1518 CeedElemRestriction rstr_i; 1519 1520 if (is_ordered[i]) continue; 1521 field_rstr_in_buffer[i] = i; 1522 is_ordered[i] = true; 1523 input_field_order[curr_index] = i; 1524 curr_index++; 1525 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i)); 1526 if (vec_i == CEED_VECTOR_NONE) continue; // CEED_EVAL_WEIGHT 1527 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i)); 1528 for (CeedInt j = i + 1; j < num_input_fields; j++) { 1529 CeedVector vec_j; 1530 CeedElemRestriction rstr_j; 1531 1532 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j)); 1533 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j)); 1534 if (rstr_i == rstr_j && vec_i == vec_j) { 1535 field_rstr_in_buffer[j] = i; 1536 is_ordered[j] = true; 1537 input_field_order[curr_index] = j; 1538 curr_index++; 1539 } 1540 CeedCallBackend(CeedVectorDestroy(&vec_j)); 1541 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j)); 1542 } 1543 CeedCallBackend(CeedVectorDestroy(&vec_i)); 1544 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i)); 1545 } 1546 } 1547 1548 // -- Input restriction and basis 1549 code << "\n" << tab << "// -- Input field restrictions and basis actions\n"; 1550 for (CeedInt i = 0; i < num_input_fields; i++) { 1551 const char *field_name; 1552 const CeedInt f = input_field_order[i]; 1553 1554 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name)); 1555 code << tab << "// ---- Input field " << f << ": " << field_name << "\n"; 1556 1557 // ---- Restriction 1558 CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, tab, f, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f], 1559 max_dim, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices)); 1560 1561 // ---- Basis action 1562 CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, f, op_input_fields[f], qf_input_fields[f], max_dim, Q_1d, true, 1563 is_all_tensor, is_at_points, use_3d_slices)); 1564 } 1565 1566 // -- Q function 1567 CeedCallBackend(CeedOperatorBuildKernelQFunction_Cuda_gen(code, data, tab, max_dim, max_num_points, num_input_fields, op_input_fields, 1568 qf_input_fields, num_output_fields, op_output_fields, qf_output_fields, qfunction_name, 1569 Q_1d, is_all_tensor, is_at_points, use_3d_slices)); 1570 1571 // -- Output basis and restriction 1572 code << "\n" << tab << "// -- Output field basis action and restrictions\n"; 1573 for (CeedInt i = 0; i < num_output_fields; i++) { 1574 const char *field_name; 1575 1576 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 1577 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 1578 1579 // ---- Basis action 1580 CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], max_dim, Q_1d, false, 1581 is_all_tensor, is_at_points, use_3d_slices)); 1582 1583 // ---- Restriction 1584 CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, tab, i, NULL, op_output_fields[i], qf_output_fields[i], max_dim, Q_1d, 1585 false, is_all_tensor, is_at_points, use_3d_slices)); 1586 } 1587 1588 // Close loop and function 1589 tab.pop(); 1590 code << tab << "}\n"; 1591 tab.pop(); 1592 code << tab << "}\n"; 1593 code << tab << "// -----------------------------------------------------------------------------\n\n"; 1594 1595 // Compile 1596 { 1597 bool is_compile_good = false; 1598 const CeedInt T_1d = CeedIntMax(is_all_tensor ? Q_1d : Q, data->max_P_1d); 1599 1600 data->thread_1d = T_1d; 1601 CeedCallBackend(CeedTryCompile_Cuda(ceed, code.str().c_str(), &is_compile_good, &data->module, 1, "OP_T_1D", T_1d)); 1602 if (is_compile_good) { 1603 *is_good_build = true; 1604 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, operator_name.c_str(), &data->op)); 1605 } else { 1606 *is_good_build = false; 1607 data->use_fallback = true; 1608 } 1609 } 1610 CeedCallBackend(CeedOperatorSetSetupDone(op)); 1611 CeedCallBackend(CeedDestroy(&ceed)); 1612 CeedCallBackend(CeedQFunctionDestroy(&qf)); 1613 return CEED_ERROR_SUCCESS; 1614 } 1615 1616 //------------------------------------------------------------------------------ 1617 // Build AtPoints assembly operator kernel 1618 //------------------------------------------------------------------------------ 1619 static int CeedOperatorBuildKernelAssemblyAtPoints_Cuda_gen(CeedOperator op, bool is_full, bool *is_good_build) { 1620 bool is_all_tensor = true, is_at_points = false, use_3d_slices = false; 1621 Ceed ceed; 1622 CeedInt Q, Q_1d, num_input_fields, num_output_fields, max_dim = 1, max_num_points = 0, coords_comp_stride = 0; 1623 CeedQFunctionField *qf_input_fields, *qf_output_fields; 1624 CeedQFunction_Cuda_gen *qf_data; 1625 CeedQFunction qf; 1626 CeedOperatorField *op_input_fields, *op_output_fields; 1627 CeedOperator_Cuda_gen *data; 1628 std::ostringstream code; 1629 Tab tab; 1630 1631 // Check compatibility 1632 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1633 CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points)); 1634 CeedCheck(is_at_points, ceed, CEED_ERROR_BACKEND, "Only AtPoints operator assembly supported"); 1635 1636 // Retrieve operator data 1637 CeedCallBackend(CeedOperatorGetData(op, &data)); 1638 Q = data->Q; 1639 Q_1d = data->Q_1d; 1640 max_dim = data->dim; 1641 { 1642 CeedElemRestriction rstr_points = NULL; 1643 1644 CeedCallBackend(CeedOperatorAtPointsGetPoints(op, &rstr_points, NULL)); 1645 CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr_points, &max_num_points)); 1646 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr_points, &coords_comp_stride)); 1647 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_points)); 1648 } 1649 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 1650 CeedCallBackend(CeedQFunctionGetData(qf, &qf_data)); 1651 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 1652 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 1653 1654 // Add atomicAdd function for old NVidia architectures 1655 { 1656 Ceed_Cuda *ceed_data; 1657 struct cudaDeviceProp prop; 1658 1659 CeedCallBackend(CeedGetData(ceed, &ceed_data)); 1660 CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id)); 1661 if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) { 1662 code << tab << "// AtomicAdd fallback source\n"; 1663 code << tab << "#include <ceed/jit-source/cuda/cuda-atomic-add-fallback.h>\n\n"; 1664 } 1665 } 1666 1667 // Load basis source files 1668 code << tab << "// Tensor basis source\n"; 1669 code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h>\n\n"; 1670 code << tab << "// AtPoints basis source\n"; 1671 code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-at-points-templates.h>\n\n"; 1672 code << tab << "// CodeGen operator source\n"; 1673 code << tab << "#include <ceed/jit-source/cuda/cuda-gen-templates.h>\n\n"; 1674 1675 // Get QFunction name 1676 std::string qfunction_name(qf_data->qfunction_name); 1677 std::string operator_name; 1678 1679 if (is_full) { 1680 operator_name = "CeedKernelCudaGenOperatorFullAssembly_" + qfunction_name; 1681 } else { 1682 operator_name = "CeedKernelCudaGenOperatorDiagonalAssembly_" + qfunction_name; 1683 } 1684 1685 // Define CEED_Q_VLA 1686 code << "\n" << tab << "#undef CEED_Q_VLA\n"; 1687 code << tab << "#define CEED_Q_VLA 1\n\n"; 1688 1689 // Add user QFunction source 1690 { 1691 const char *source_path; 1692 1693 CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path)); 1694 CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/cuda/gen backend requires QFunction source code file"); 1695 1696 code << tab << "// User QFunction source\n"; 1697 code << tab << "#include \"" << source_path << "\"\n\n"; 1698 } 1699 1700 // Setup 1701 code << "\n" << tab << "// -----------------------------------------------------------------------------\n"; 1702 code << tab << "// Operator Assembly Kernel\n"; 1703 code << tab << "// \n"; 1704 code << tab << "// d_[in,out]_i: CeedVector device array\n"; 1705 code << tab << "// r_[in,out]_e_i: Element vector register\n"; 1706 code << tab << "// r_[in,out]_q_i: Quadrature space vector register\n"; 1707 code << tab << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n"; 1708 code << tab << "// r_[in,out]_s_i: Quadrature space slice vector register\n"; 1709 code << tab << "// \n"; 1710 code << tab << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n"; 1711 code << tab << "// s_G_[in,out]_i: Gradient matrix, shared memory\n"; 1712 code << tab << "// -----------------------------------------------------------------------------\n"; 1713 code << tab << "extern \"C\" __global__ void " << operator_name 1714 << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar *W, Points_Cuda " 1715 "points, CeedScalar *__restrict__ values_array) {\n"; 1716 tab.push(); 1717 1718 // Scratch buffers 1719 for (CeedInt i = 0; i < num_input_fields; i++) { 1720 CeedEvalMode eval_mode; 1721 1722 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 1723 if (eval_mode != CEED_EVAL_WEIGHT) { // Skip CEED_EVAL_WEIGHT 1724 code << tab << "const CeedScalar *__restrict__ d_in_" << i << " = fields.inputs[" << i << "];\n"; 1725 } 1726 } 1727 for (CeedInt i = 0; i < num_output_fields; i++) { 1728 code << tab << "CeedScalar *__restrict__ d_out_" << i << " = fields.outputs[" << i << "];\n"; 1729 } 1730 1731 code << tab << "const CeedInt max_dim = " << max_dim << ";\n"; 1732 code << tab << "const CeedInt Q_1d = " << Q_1d << ";\n"; 1733 code << tab << "const CeedInt max_num_points = " << max_num_points << ";\n"; 1734 code << tab << "const CeedInt coords_comp_stride = " << coords_comp_stride << ";\n"; 1735 1736 // Shared data 1737 code << tab << "extern __shared__ CeedScalar slice[];\n"; 1738 code << tab << "SharedData_Cuda data;\n"; 1739 code << tab << "data.t_id_x = threadIdx.x;\n"; 1740 code << tab << "data.t_id_y = threadIdx.y;\n"; 1741 code << tab << "data.t_id_z = threadIdx.z;\n"; 1742 code << tab << "data.t_id = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n"; 1743 code << tab << "data.slice = slice + data.t_id_z*OP_T_1D" << ((!is_all_tensor || max_dim == 1) ? "" : "*OP_T_1D") << ";\n"; 1744 1745 // -- Determine input mat reuse 1746 FieldReuse_Cuda input_matrix_reuse[CEED_FIELD_MAX]; 1747 1748 for (CeedInt i = 0; i < num_input_fields; i++) { 1749 input_matrix_reuse[i].index = -1; 1750 } 1751 for (CeedInt i = 0; i < num_input_fields; i++) { 1752 CeedEvalMode eval_mode_i; 1753 CeedBasis basis_i; 1754 1755 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i)); 1756 if (eval_mode_i == CEED_EVAL_WEIGHT) continue; 1757 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i)); 1758 for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) { 1759 CeedEvalMode eval_mode_j; 1760 CeedBasis basis_j; 1761 1762 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1763 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1764 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1765 if (basis_i == basis_j) { 1766 input_matrix_reuse[i].index = j; 1767 input_matrix_reuse[i].is_input = true; 1768 input_matrix_reuse[i].eval_mode = eval_mode_j; 1769 } 1770 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1771 } 1772 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1773 } 1774 1775 // -- Determine output mat reuse 1776 FieldReuse_Cuda output_matrix_reuse[CEED_FIELD_MAX]; 1777 1778 for (CeedInt i = 0; i < num_output_fields; i++) { 1779 output_matrix_reuse[i].index = -1; 1780 } 1781 for (CeedInt i = 0; i < num_output_fields; i++) { 1782 CeedEvalMode eval_mode_i; 1783 CeedBasis basis_i; 1784 1785 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i)); 1786 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i)); 1787 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) { 1788 CeedEvalMode eval_mode_j; 1789 CeedBasis basis_j; 1790 1791 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1792 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1793 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1794 if (basis_i == basis_j) { 1795 output_matrix_reuse[i].index = j; 1796 output_matrix_reuse[i].is_input = true; 1797 output_matrix_reuse[i].eval_mode = eval_mode_j; 1798 } 1799 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1800 } 1801 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) { 1802 CeedEvalMode eval_mode_j; 1803 CeedBasis basis_j; 1804 1805 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j)); 1806 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1807 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j)); 1808 if (basis_i == basis_j) { 1809 output_matrix_reuse[i].index = j; 1810 output_matrix_reuse[i].is_input = false; 1811 output_matrix_reuse[i].eval_mode = eval_mode_j; 1812 } 1813 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1814 } 1815 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1816 } 1817 1818 // Initialize constants, and matrices B and G 1819 code << "\n" << tab << "// Input field constants and basis data\n"; 1820 for (CeedInt i = 0; i < num_input_fields; i++) { 1821 CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i], 1822 max_dim, Q, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices)); 1823 } 1824 code << "\n" << tab << "// Output field constants and basis data\n"; 1825 for (CeedInt i = 0; i < num_output_fields; i++) { 1826 CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i], 1827 max_dim, Q, Q_1d, false, is_all_tensor, is_at_points, use_3d_slices)); 1828 } 1829 1830 // Loop over all elements 1831 code << "\n" << tab << "// Element loop\n"; 1832 code << tab << "__syncthreads();\n"; 1833 code << tab << "for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n"; 1834 tab.push(); 1835 1836 // -- Compute minimum buffer space needed 1837 CeedInt max_rstr_buffer_size = 1; 1838 1839 for (CeedInt i = 0; i < num_input_fields; i++) { 1840 CeedEvalMode eval_mode; 1841 1842 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 1843 if (eval_mode != CEED_EVAL_NONE && eval_mode != CEED_EVAL_WEIGHT) { 1844 CeedInt num_comp; 1845 CeedElemRestriction elem_rstr; 1846 1847 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 1848 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1849 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1)); 1850 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1851 } 1852 } 1853 for (CeedInt i = 0; i < num_output_fields; i++) { 1854 CeedEvalMode eval_mode; 1855 1856 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 1857 if (eval_mode != CEED_EVAL_NONE) { 1858 CeedInt num_comp; 1859 CeedElemRestriction elem_rstr; 1860 1861 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 1862 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1863 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1)); 1864 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1865 } 1866 } 1867 code << tab << "// Scratch restriction buffer space\n"; 1868 code << tab << "CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n"; 1869 1870 // -- Determine best input field processing order 1871 CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX]; 1872 1873 for (CeedInt i = 0; i < num_input_fields; i++) { 1874 field_rstr_in_buffer[i] = -1; 1875 input_field_order[i] = -1; 1876 } 1877 { 1878 bool is_ordered[CEED_FIELD_MAX]; 1879 CeedInt curr_index = 0; 1880 1881 for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false; 1882 for (CeedInt i = 0; i < num_input_fields; i++) { 1883 CeedVector vec_i; 1884 CeedElemRestriction rstr_i; 1885 1886 if (is_ordered[i]) continue; 1887 field_rstr_in_buffer[i] = i; 1888 is_ordered[i] = true; 1889 input_field_order[curr_index] = i; 1890 curr_index++; 1891 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i)); 1892 if (vec_i == CEED_VECTOR_NONE) continue; // CEED_EVAL_WEIGHT 1893 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i)); 1894 for (CeedInt j = i + 1; j < num_input_fields; j++) { 1895 CeedVector vec_j; 1896 CeedElemRestriction rstr_j; 1897 1898 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j)); 1899 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j)); 1900 if (rstr_i == rstr_j && vec_i == vec_j) { 1901 field_rstr_in_buffer[j] = i; 1902 is_ordered[j] = true; 1903 input_field_order[curr_index] = j; 1904 curr_index++; 1905 } 1906 CeedCallBackend(CeedVectorDestroy(&vec_j)); 1907 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j)); 1908 } 1909 CeedCallBackend(CeedVectorDestroy(&vec_i)); 1910 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i)); 1911 } 1912 } 1913 1914 // -- Input restriction and basis 1915 code << "\n" << tab << "// -- Input field restrictions and basis actions\n"; 1916 CeedInt active_field_index = -1; 1917 1918 for (CeedInt i = 0; i < num_input_fields; i++) { 1919 bool is_active = false; 1920 const char *field_name; 1921 const CeedInt f = input_field_order[i]; 1922 1923 { 1924 CeedVector vec; 1925 1926 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[f], &vec)); 1927 is_active = vec == CEED_VECTOR_ACTIVE; 1928 CeedCallBackend(CeedVectorDestroy(&vec)); 1929 } 1930 1931 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name)); 1932 code << tab << "// ---- Input field " << f << ": " << field_name << "\n"; 1933 1934 if (is_active) { 1935 std::string var_suffix = "_in_" + std::to_string(f); 1936 1937 code << tab << "// Active field - no restriction or basis action here\n"; 1938 if (active_field_index == -1) { 1939 active_field_index = f; 1940 code << tab << "CeedScalar r_e" << var_suffix << "[num_comp" << var_suffix << "*" << (max_dim >= 3 ? "P_1d" + var_suffix : "1") 1941 << "] = {0.0};\n"; 1942 } else { 1943 code << tab << "CeedScalar *r_e" << var_suffix << " = r_e_in_" << active_field_index << ";\n"; 1944 } 1945 } else { 1946 // ---- Restriction 1947 CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, tab, f, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f], 1948 max_dim, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices)); 1949 1950 // ---- Basis action 1951 CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, f, op_input_fields[f], qf_input_fields[f], max_dim, Q_1d, true, 1952 is_all_tensor, is_at_points, use_3d_slices)); 1953 } 1954 } 1955 1956 // -- Loop over active field 1957 std::string active_var_suffix = "_in_" + std::to_string(active_field_index); 1958 1959 code << "\n" << tab << "// Loop over nodes in active field\n"; 1960 code << tab << "for (CeedInt n = 0; n < num_comp" << active_var_suffix << "*P_1d" << active_var_suffix 1961 << (max_dim > 1 ? "*P_1d" + active_var_suffix : "") << (max_dim > 2 ? "*P_1d" + active_var_suffix : "") << "; n++) {\n"; 1962 tab.push(); 1963 1964 // -- Set current active node and component to 1 1965 code << tab << "// Set current active node and component to 1.0\n"; 1966 code << tab << "SetEVecStandard" << max_dim << "d_Single<num_comp" << active_var_suffix << ", P_1d" << active_var_suffix << ">(data, n, 1.0, r_e" 1967 << active_var_suffix << ");\n\n"; 1968 1969 for (CeedInt i = 0; i < num_input_fields; i++) { 1970 bool is_active = false; 1971 const char *field_name; 1972 const CeedInt f = input_field_order[i]; 1973 1974 { 1975 CeedVector vec; 1976 1977 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[f], &vec)); 1978 is_active = vec == CEED_VECTOR_ACTIVE; 1979 CeedCallBackend(CeedVectorDestroy(&vec)); 1980 } 1981 if (!is_active) continue; 1982 1983 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name)); 1984 code << tab << "// ---- Input field " << f << ": " << field_name << "\n"; 1985 1986 // ---- Basis action 1987 CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, f, op_input_fields[f], qf_input_fields[f], max_dim, Q_1d, true, 1988 is_all_tensor, is_at_points, use_3d_slices)); 1989 } 1990 1991 // -- Q function 1992 CeedCallBackend(CeedOperatorBuildKernelQFunction_Cuda_gen(code, data, tab, max_dim, max_num_points, num_input_fields, op_input_fields, 1993 qf_input_fields, num_output_fields, op_output_fields, qf_output_fields, qfunction_name, 1994 Q_1d, is_all_tensor, is_at_points, use_3d_slices)); 1995 1996 // -- Output basis and restriction 1997 code << "\n" << tab << "// -- Output field basis action and restrictions\n"; 1998 for (CeedInt i = 0; i < num_output_fields; i++) { 1999 bool is_active = false; 2000 const char *field_name; 2001 2002 { 2003 CeedVector vec; 2004 2005 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec)); 2006 is_active = vec == CEED_VECTOR_ACTIVE; 2007 CeedCallBackend(CeedVectorDestroy(&vec)); 2008 } 2009 if (!is_active) continue; 2010 2011 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 2012 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 2013 2014 // ---- Basis action 2015 CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], max_dim, Q_1d, false, 2016 is_all_tensor, is_at_points, use_3d_slices)); 2017 2018 // ---- Restriction 2019 if (is_full) { 2020 std::string var_suffix = "_out_" + std::to_string(i); 2021 CeedInt comp_stride; 2022 CeedSize l_size; 2023 CeedElemRestriction elem_rstr; 2024 2025 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 2026 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 2027 code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 2028 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 2029 code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 2030 code << tab << "WriteLVecStandard" << max_dim << "d_Assembly<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", P_1d" + var_suffix 2031 << ">(data, l_size" << var_suffix << ", elem, n, r_e" << var_suffix << ", values_array);\n"; 2032 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 2033 } else { 2034 std::string var_suffix = "_out_" + std::to_string(i); 2035 CeedInt comp_stride; 2036 CeedSize l_size; 2037 CeedElemRestriction elem_rstr; 2038 2039 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 2040 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 2041 code << tab << "const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 2042 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 2043 code << tab << "const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 2044 code << tab << "WriteLVecStandard" << max_dim << "d_Single<num_comp" << var_suffix << ", comp_stride" << var_suffix << ", P_1d" + var_suffix 2045 << ">(data, l_size" << var_suffix << ", elem, n, indices.outputs[" << i << "], r_e" << var_suffix << ", values_array);\n"; 2046 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 2047 } 2048 } 2049 2050 // -- Reset current active node and component 2051 code << "\n" << tab << "// Reset current active node and component to 0.0\n"; 2052 code << tab << "SetEVecStandard" << max_dim << "d_Single<num_comp" << active_var_suffix << ", P_1d" << active_var_suffix << ">(data, n, 0.0, r_e" 2053 << active_var_suffix << ");\n"; 2054 2055 // -- End of loop over active field 2056 tab.pop(); 2057 code << tab << "}\n"; 2058 2059 // Close loop and function 2060 tab.pop(); 2061 code << tab << "}\n"; 2062 tab.pop(); 2063 code << tab << "}\n"; 2064 code << tab << "// -----------------------------------------------------------------------------\n\n"; 2065 2066 // Compile 2067 { 2068 bool is_compile_good = false; 2069 const CeedInt T_1d = CeedIntMax(is_all_tensor ? Q_1d : Q, data->max_P_1d); 2070 2071 data->thread_1d = T_1d; 2072 CeedCallBackend(CeedTryCompile_Cuda(ceed, code.str().c_str(), &is_compile_good, 2073 is_full ? &data->module_assemble_full : &data->module_assemble_diagonal, 1, "OP_T_1D", T_1d)); 2074 if (is_compile_good) { 2075 *is_good_build = true; 2076 CeedCallBackend(CeedGetKernel_Cuda(ceed, is_full ? data->module_assemble_full : data->module_assemble_diagonal, operator_name.c_str(), 2077 is_full ? &data->assemble_full : &data->assemble_diagonal)); 2078 } else { 2079 *is_good_build = false; 2080 data->use_assembly_fallback = true; 2081 } 2082 } 2083 CeedCallBackend(CeedDestroy(&ceed)); 2084 CeedCallBackend(CeedQFunctionDestroy(&qf)); 2085 return CEED_ERROR_SUCCESS; 2086 } 2087 2088 extern "C" int CeedOperatorBuildKernelDiagonalAssemblyAtPoints_Cuda_gen(CeedOperator op, bool *is_good_build) { 2089 return CeedOperatorBuildKernelAssemblyAtPoints_Cuda_gen(op, false, is_good_build); 2090 } 2091 2092 extern "C" int CeedOperatorBuildKernelFullAssemblyAtPoints_Cuda_gen(CeedOperator op, bool *is_good_build) { 2093 return CeedOperatorBuildKernelAssemblyAtPoints_Cuda_gen(op, true, is_good_build); 2094 } 2095 2096 //------------------------------------------------------------------------------ 2097 // Build QFunction assembly operator kernel 2098 //------------------------------------------------------------------------------ 2099 extern "C" int CeedOperatorBuildKernelLinearAssembleQFunction_Cuda_gen(CeedOperator op, bool *is_good_build) { 2100 bool is_all_tensor = true, is_all_nontensor = true, is_at_points = false, use_3d_slices = false; 2101 Ceed ceed; 2102 CeedInt Q, Q_1d, num_input_fields, num_output_fields, max_dim = 1, max_num_points = 0; 2103 CeedQFunctionField *qf_input_fields, *qf_output_fields; 2104 CeedQFunction_Cuda_gen *qf_data; 2105 CeedQFunction qf; 2106 CeedOperatorField *op_input_fields, *op_output_fields; 2107 CeedOperator_Cuda_gen *data; 2108 std::ostringstream code; 2109 Tab tab; 2110 2111 // Check compatibility 2112 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 2113 CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points)); 2114 CeedCheck(!is_at_points, ceed, CEED_ERROR_BACKEND, "AtPoints QFunction assembly is not supported"); 2115 2116 // Check field compatibility 2117 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 2118 { 2119 bool has_shared_bases = true; 2120 2121 for (CeedInt i = 0; i < num_input_fields; i++) { 2122 CeedBasis basis; 2123 2124 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 2125 if (basis != CEED_BASIS_NONE) { 2126 bool is_tensor = true; 2127 const char *resource; 2128 char *resource_root; 2129 Ceed basis_ceed; 2130 2131 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 2132 is_all_tensor = is_all_tensor && is_tensor; 2133 is_all_nontensor = is_all_nontensor && !is_tensor; 2134 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 2135 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 2136 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 2137 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared"); 2138 CeedCallBackend(CeedFree(&resource_root)); 2139 CeedCallBackend(CeedDestroy(&basis_ceed)); 2140 } 2141 CeedCallBackend(CeedBasisDestroy(&basis)); 2142 } 2143 2144 for (CeedInt i = 0; i < num_output_fields; i++) { 2145 CeedBasis basis; 2146 2147 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 2148 if (basis != CEED_BASIS_NONE) { 2149 bool is_tensor = true; 2150 const char *resource; 2151 char *resource_root; 2152 Ceed basis_ceed; 2153 2154 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 2155 is_all_tensor = is_all_tensor && is_tensor; 2156 is_all_nontensor = is_all_nontensor && !is_tensor; 2157 2158 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 2159 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 2160 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 2161 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared"); 2162 CeedCallBackend(CeedFree(&resource_root)); 2163 CeedCallBackend(CeedDestroy(&basis_ceed)); 2164 } 2165 CeedCallBackend(CeedBasisDestroy(&basis)); 2166 } 2167 } 2168 2169 // Retrieve operator data 2170 CeedCallBackend(CeedOperatorGetData(op, &data)); 2171 Q = data->Q; 2172 Q_1d = data->Q_1d; 2173 max_dim = data->dim; 2174 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 2175 CeedCallBackend(CeedQFunctionGetData(qf, &qf_data)); 2176 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 2177 2178 // Add atomicAdd function for old NVidia architectures 2179 { 2180 Ceed_Cuda *ceed_data; 2181 struct cudaDeviceProp prop; 2182 2183 CeedCallBackend(CeedGetData(ceed, &ceed_data)); 2184 CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id)); 2185 if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) { 2186 code << tab << "// AtomicAdd fallback source\n"; 2187 code << tab << "#include <ceed/jit-source/cuda/cuda-atomic-add-fallback.h>\n\n"; 2188 } 2189 } 2190 2191 // Load basis source files 2192 if (!is_all_nontensor) { 2193 code << tab << "// Tensor basis source\n"; 2194 code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h>\n\n"; 2195 } 2196 if (!is_all_tensor) { 2197 code << tab << "// Non-tensor basis source\n"; 2198 code << tab << "#include <ceed/jit-source/cuda/cuda-shared-basis-nontensor-templates.h>\n\n"; 2199 } 2200 if (!is_all_tensor && !is_all_nontensor) { 2201 code << "// Tensor basis source\n"; 2202 code << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-flattened-templates.h>\n\n"; 2203 } 2204 code << "// CodeGen operator source\n"; 2205 code << "#include <ceed/jit-source/cuda/cuda-gen-templates.h>\n\n"; 2206 2207 // Get QFunction name 2208 std::string qfunction_name(qf_data->qfunction_name); 2209 std::string operator_name; 2210 2211 operator_name = "CeedKernelCudaGenQFunctionAssembly_" + qfunction_name; 2212 2213 // Define CEED_Q_VLA 2214 code << "\n" << tab << "#undef CEED_Q_VLA\n"; 2215 if (max_dim != 3 || is_at_points || use_3d_slices || !is_all_tensor) { 2216 code << tab << "#define CEED_Q_VLA 1\n\n"; 2217 } else { 2218 code << tab << "#define CEED_Q_VLA " << Q_1d << "\n\n"; 2219 } 2220 2221 // Add user QFunction source 2222 { 2223 const char *source_path; 2224 2225 CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path)); 2226 CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/cuda/gen backend requires QFunction source code file"); 2227 2228 code << tab << "// User QFunction source\n"; 2229 code << tab << "#include \"" << source_path << "\"\n\n"; 2230 } 2231 2232 // Setup 2233 code << "\n" << tab << "// -----------------------------------------------------------------------------\n"; 2234 code << tab << "// Operator Assembly Kernel\n"; 2235 code << tab << "// \n"; 2236 code << tab << "// d_[in,out]_i: CeedVector device array\n"; 2237 code << tab << "// r_[in,out]_e_i: Element vector register\n"; 2238 code << tab << "// r_[in,out]_q_i: Quadrature space vector register\n"; 2239 code << tab << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n"; 2240 code << tab << "// r_[in,out]_s_i: Quadrature space slice vector register\n"; 2241 code << tab << "// \n"; 2242 code << tab << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n"; 2243 code << tab << "// s_G_[in,out]_i: Gradient matrix, shared memory\n"; 2244 code << tab << "// -----------------------------------------------------------------------------\n"; 2245 code << tab << "extern \"C\" __global__ void " << operator_name 2246 << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar *W, Points_Cuda " 2247 "points, CeedScalar *__restrict__ values_array) {\n"; 2248 tab.push(); 2249 2250 // Scratch buffers 2251 for (CeedInt i = 0; i < num_input_fields; i++) { 2252 CeedEvalMode eval_mode; 2253 2254 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 2255 if (eval_mode != CEED_EVAL_WEIGHT) { // Skip CEED_EVAL_WEIGHT 2256 code << tab << "const CeedScalar *__restrict__ d_in_" << i << " = fields.inputs[" << i << "];\n"; 2257 } 2258 } 2259 for (CeedInt i = 0; i < num_output_fields; i++) { 2260 code << tab << "CeedScalar *__restrict__ d_out_" << i << " = fields.outputs[" << i << "];\n"; 2261 } 2262 2263 code << tab << "const CeedInt max_dim = " << max_dim << ";\n"; 2264 if (!is_all_tensor) { 2265 code << tab << "const CeedInt Q = " << Q << ";\n"; 2266 } 2267 if (!is_all_nontensor) { 2268 code << tab << "const CeedInt Q_1d = " << Q_1d << ";\n"; 2269 } 2270 2271 // Shared data 2272 code << tab << "extern __shared__ CeedScalar slice[];\n"; 2273 code << tab << "SharedData_Cuda data;\n"; 2274 code << tab << "data.t_id_x = threadIdx.x;\n"; 2275 code << tab << "data.t_id_y = threadIdx.y;\n"; 2276 code << tab << "data.t_id_z = threadIdx.z;\n"; 2277 code << tab << "data.t_id = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n"; 2278 code << tab << "data.slice = slice + data.t_id_z*OP_T_1D" << ((!is_all_tensor || max_dim == 1) ? "" : "*OP_T_1D") << ";\n"; 2279 2280 // -- Determine input mat reuse 2281 FieldReuse_Cuda input_matrix_reuse[CEED_FIELD_MAX]; 2282 2283 for (CeedInt i = 0; i < num_input_fields; i++) { 2284 input_matrix_reuse[i].index = -1; 2285 } 2286 for (CeedInt i = 0; i < num_input_fields; i++) { 2287 bool is_tensor = true; 2288 CeedEvalMode eval_mode_i; 2289 CeedBasis basis_i; 2290 2291 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i)); 2292 if (eval_mode_i == CEED_EVAL_WEIGHT) continue; 2293 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i)); 2294 CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor)); 2295 for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) { 2296 CeedEvalMode eval_mode_j; 2297 CeedBasis basis_j; 2298 2299 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 2300 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 2301 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 2302 if (basis_i == basis_j) { 2303 if (is_tensor) { 2304 input_matrix_reuse[i].index = j; 2305 input_matrix_reuse[i].is_input = true; 2306 input_matrix_reuse[i].eval_mode = eval_mode_j; 2307 } else { 2308 // For non-tensor can only re-use with the same eval mode 2309 if (eval_mode_i == eval_mode_j) { 2310 input_matrix_reuse[i].index = j; 2311 input_matrix_reuse[i].is_input = true; 2312 input_matrix_reuse[i].eval_mode = eval_mode_j; 2313 } 2314 } 2315 } 2316 CeedCallBackend(CeedBasisDestroy(&basis_j)); 2317 } 2318 CeedCallBackend(CeedBasisDestroy(&basis_i)); 2319 } 2320 2321 // -- Determine output mat reuse 2322 FieldReuse_Cuda output_matrix_reuse[CEED_FIELD_MAX]; 2323 2324 for (CeedInt i = 0; i < num_output_fields; i++) { 2325 output_matrix_reuse[i].index = -1; 2326 } 2327 for (CeedInt i = 0; i < num_output_fields; i++) { 2328 bool is_tensor = true; 2329 CeedEvalMode eval_mode_i; 2330 CeedBasis basis_i; 2331 2332 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i)); 2333 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i)); 2334 CeedCallBackend(CeedBasisIsTensor(basis_i, &is_tensor)); 2335 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) { 2336 CeedEvalMode eval_mode_j; 2337 CeedBasis basis_j; 2338 2339 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 2340 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 2341 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 2342 if (basis_i == basis_j) { 2343 if (is_tensor) { 2344 output_matrix_reuse[i].index = j; 2345 output_matrix_reuse[i].is_input = true; 2346 output_matrix_reuse[i].eval_mode = eval_mode_j; 2347 } else { 2348 // For non-tensor can only re-use with the same eval mode 2349 if (eval_mode_i == eval_mode_j) { 2350 output_matrix_reuse[i].index = j; 2351 output_matrix_reuse[i].is_input = true; 2352 output_matrix_reuse[i].eval_mode = eval_mode_j; 2353 } 2354 } 2355 } 2356 CeedCallBackend(CeedBasisDestroy(&basis_j)); 2357 } 2358 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) { 2359 CeedEvalMode eval_mode_j; 2360 CeedBasis basis_j; 2361 2362 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j)); 2363 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 2364 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j)); 2365 if (basis_i == basis_j) { 2366 if (is_tensor) { 2367 output_matrix_reuse[i].index = j; 2368 output_matrix_reuse[i].is_input = false; 2369 output_matrix_reuse[i].eval_mode = eval_mode_j; 2370 } else { 2371 // For non-tensor can only re-use with the same eval mode 2372 if (eval_mode_i == eval_mode_j) { 2373 output_matrix_reuse[i].index = j; 2374 output_matrix_reuse[i].is_input = false; 2375 output_matrix_reuse[i].eval_mode = eval_mode_j; 2376 } 2377 } 2378 } 2379 CeedCallBackend(CeedBasisDestroy(&basis_j)); 2380 } 2381 CeedCallBackend(CeedBasisDestroy(&basis_i)); 2382 } 2383 2384 // Initialize constants, and matrices B and G 2385 code << "\n" << tab << "// Input field constants and basis data\n"; 2386 for (CeedInt i = 0; i < num_input_fields; i++) { 2387 CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i], 2388 max_dim, Q, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices)); 2389 } 2390 code << "\n" << tab << "// Output field constants and basis data\n"; 2391 for (CeedInt i = 0; i < num_output_fields; i++) { 2392 CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, tab, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i], 2393 max_dim, Q, Q_1d, false, is_all_tensor, is_at_points, use_3d_slices)); 2394 } 2395 2396 // Loop over all elements 2397 code << "\n" << tab << "// Element loop\n"; 2398 code << tab << "__syncthreads();\n"; 2399 code << tab << "for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n"; 2400 tab.push(); 2401 2402 // -- Compute minimum buffer space needed 2403 CeedInt max_rstr_buffer_size = 1; 2404 2405 for (CeedInt i = 0; i < num_input_fields; i++) { 2406 CeedEvalMode eval_mode; 2407 2408 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 2409 if (eval_mode != CEED_EVAL_NONE && eval_mode != CEED_EVAL_WEIGHT) { 2410 CeedInt num_comp; 2411 CeedElemRestriction elem_rstr; 2412 2413 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 2414 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 2415 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1)); 2416 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 2417 } 2418 } 2419 for (CeedInt i = 0; i < num_output_fields; i++) { 2420 CeedEvalMode eval_mode; 2421 2422 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 2423 if (eval_mode != CEED_EVAL_NONE) { 2424 CeedInt num_comp; 2425 CeedElemRestriction elem_rstr; 2426 2427 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 2428 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 2429 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_all_tensor && (max_dim >= 3) ? Q_1d : 1)); 2430 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 2431 } 2432 } 2433 code << tab << "// Scratch restriction buffer space\n"; 2434 code << tab << "CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n"; 2435 2436 // -- Determine best input field processing order 2437 CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX]; 2438 2439 for (CeedInt i = 0; i < num_input_fields; i++) { 2440 field_rstr_in_buffer[i] = -1; 2441 input_field_order[i] = -1; 2442 } 2443 { 2444 bool is_ordered[CEED_FIELD_MAX]; 2445 CeedInt curr_index = 0; 2446 2447 for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false; 2448 for (CeedInt i = 0; i < num_input_fields; i++) { 2449 CeedVector vec_i; 2450 CeedElemRestriction rstr_i; 2451 2452 if (is_ordered[i]) continue; 2453 field_rstr_in_buffer[i] = i; 2454 is_ordered[i] = true; 2455 input_field_order[curr_index] = i; 2456 curr_index++; 2457 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i)); 2458 if (vec_i == CEED_VECTOR_NONE) continue; // CEED_EVAL_WEIGHT 2459 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i)); 2460 for (CeedInt j = i + 1; j < num_input_fields; j++) { 2461 CeedVector vec_j; 2462 CeedElemRestriction rstr_j; 2463 2464 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j)); 2465 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j)); 2466 if (rstr_i == rstr_j && vec_i == vec_j) { 2467 field_rstr_in_buffer[j] = i; 2468 is_ordered[j] = true; 2469 input_field_order[curr_index] = j; 2470 curr_index++; 2471 } 2472 CeedCallBackend(CeedVectorDestroy(&vec_j)); 2473 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j)); 2474 } 2475 CeedCallBackend(CeedVectorDestroy(&vec_i)); 2476 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i)); 2477 } 2478 } 2479 2480 // -- Input restriction and basis 2481 code << "\n" << tab << "// -- Input field restrictions and basis actions\n"; 2482 CeedInt num_active_in = 0, num_active_out = 0, qf_assembly_size_out = 0; 2483 CeedInt active_fields_in[CEED_FIELD_MAX], active_fields_out[CEED_FIELD_MAX]; 2484 2485 for (CeedInt i = 0; i < num_input_fields; i++) { 2486 bool is_active = false; 2487 const char *field_name; 2488 const CeedInt f = input_field_order[i]; 2489 2490 { 2491 CeedVector vec; 2492 2493 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[f], &vec)); 2494 is_active = vec == CEED_VECTOR_ACTIVE; 2495 CeedCallBackend(CeedVectorDestroy(&vec)); 2496 } 2497 2498 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name)); 2499 code << tab << "// ---- Input field " << f << ": " << field_name << "\n"; 2500 2501 if (is_active) { 2502 CeedEvalMode eval_mode; 2503 CeedInt field_size; 2504 2505 active_fields_in[num_active_in] = f; 2506 num_active_in++; 2507 CeedCallBackend(CeedQFunctionFieldGetSize(qf_input_fields[f], &field_size)); 2508 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[f], &eval_mode)); 2509 if (eval_mode == CEED_EVAL_GRAD) { 2510 code << tab << "CeedScalar r_q_in_" << f << "[num_comp_in_" << f << "*" << "dim_in_" << f << "*" 2511 << (is_all_tensor && (max_dim >= 3) ? "Q_1d" : "1") << "] = {0.};\n"; 2512 } else { 2513 code << tab << "CeedScalar r_q_in_" << f << "[num_comp_in_" << f << "*" << (is_all_tensor && (max_dim >= 3) ? "Q_1d" : "1") << "] = {0.};\n"; 2514 } 2515 code << tab << "const CeedInt field_size_in_" << f << " = " << field_size << ";\n"; 2516 } else { 2517 // ---- Restriction 2518 CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, tab, f, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f], 2519 max_dim, Q_1d, true, is_all_tensor, is_at_points, use_3d_slices)); 2520 2521 // ---- Basis action 2522 CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, tab, f, op_input_fields[f], qf_input_fields[f], max_dim, Q_1d, true, 2523 is_all_tensor, is_at_points, use_3d_slices)); 2524 } 2525 } 2526 code << tab << "const CeedInt field_sizes_in[" << num_active_in << "] = {"; 2527 for (CeedInt i = 0; i < num_active_in; i++) { 2528 code << "field_size_in_" << active_fields_in[i] << (i < num_active_in - 1 ? ", " : ""); 2529 } 2530 code << "};\n"; 2531 code << tab << "CeedScalar * r_q_in[" << num_active_in << "] = {"; 2532 for (CeedInt i = 0; i < num_active_in; i++) { 2533 code << "r_q_in_" << active_fields_in[i] << (i < num_active_in - 1 ? ", " : ""); 2534 } 2535 code << "};\n"; 2536 2537 for (CeedInt i = 0; i < num_output_fields; i++) { 2538 bool is_active = false; 2539 2540 { 2541 CeedVector vec; 2542 2543 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec)); 2544 is_active = vec == CEED_VECTOR_ACTIVE; 2545 CeedCallBackend(CeedVectorDestroy(&vec)); 2546 } 2547 if (is_active) { 2548 const char *field_name; 2549 CeedInt field_size; 2550 2551 active_fields_out[num_active_out] = i; 2552 num_active_out++; 2553 CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[i], &field_size)); 2554 qf_assembly_size_out += field_size; 2555 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 2556 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 2557 code << tab << "const CeedInt field_size_out_" << i << " = " << field_size << ";\n"; 2558 } 2559 } 2560 code << tab << "const CeedInt field_sizes_out[" << num_active_out << "] = {"; 2561 for (CeedInt i = 0; i < num_active_out; i++) { 2562 code << "field_size_out_" << active_fields_out[i] << (i < num_active_out - 1 ? ", " : ""); 2563 } 2564 code << "};\n"; 2565 code << tab << "const CeedInt total_size_out = " << qf_assembly_size_out << ";\n"; 2566 2567 // -- Loop over active field 2568 code << "\n" << tab << "CeedInt input_offset = 0;\n"; 2569 code << tab << "// Loop over active QFunction input fields\n"; 2570 code << tab << "const CeedInt num_active_in = " << num_active_in << ";\n"; 2571 code << tab << "for (CeedInt a = 0; a < num_active_in; a++) {\n"; 2572 tab.push(); 2573 2574 // -- Loop over size of active field 2575 code << "\n" << tab << "// Loop over current active input field size\n"; 2576 code << tab << "const CeedInt field_size_in = field_sizes_in[a];\n"; 2577 code << tab << "for (CeedInt s = 0; s < field_size_in; s++) {\n"; 2578 tab.push(); 2579 2580 // -- Set current active point and component to 1 2581 code << tab << "// Set current active point and component to 1.0\n"; 2582 if (is_all_tensor && (max_dim >= 3)) { 2583 code << tab << "for (CeedInt i = 0; i < Q_1d; i++) r_q_in[a][i + s * Q_1d] = 1.0;\n"; 2584 } else { 2585 code << tab << "r_q_in[a][s] = 1.0;\n"; 2586 } 2587 2588 // -- Q function 2589 CeedCallBackend(CeedOperatorBuildKernelQFunction_Cuda_gen(code, data, tab, max_dim, max_num_points, num_input_fields, op_input_fields, 2590 qf_input_fields, num_output_fields, op_output_fields, qf_output_fields, qfunction_name, 2591 Q_1d, is_all_tensor, is_at_points, use_3d_slices)); 2592 2593 // -- Output basis and restriction 2594 code << "\n" << tab << "// -- Output field basis action and restrictions\n"; 2595 CeedScalar offset = 0; 2596 2597 for (CeedInt i = 0; i < num_output_fields; i++) { 2598 bool is_active = false; 2599 const char *field_name; 2600 2601 { 2602 CeedVector vec; 2603 2604 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec)); 2605 is_active = vec == CEED_VECTOR_ACTIVE; 2606 CeedCallBackend(CeedVectorDestroy(&vec)); 2607 } 2608 if (!is_active) continue; 2609 2610 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 2611 code << tab << "// ---- Output field " << i << ": " << field_name << "\n"; 2612 2613 // ---- Restriction 2614 CeedInt field_size; 2615 2616 code << tab << "WriteLVecStandard" << (is_all_tensor ? max_dim : 1) << "d_QFAssembly<total_size_out, field_size_out_" << i << ", " 2617 << (is_all_tensor ? "Q_1d" : "Q") << ">(data, num_elem, elem, input_offset + s, " << offset << ", r_q_out_" << i << ", values_array);\n"; 2618 CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[i], &field_size)); 2619 offset += field_size; 2620 } 2621 2622 // -- Reset current active node and component 2623 code << "\n" << tab << "// Reset current active node and component to 0.0\n"; 2624 if (is_all_tensor && (max_dim >= 3)) { 2625 code << tab << "for (CeedInt i = 0; i < Q_1d; i++) r_q_in[a][i + s * Q_1d] = 0.0;\n"; 2626 } else { 2627 code << tab << "r_q_in[a][s] = 0.0;\n"; 2628 } 2629 2630 // -- End of loop over size of active field 2631 tab.pop(); 2632 code << tab << "}\n"; 2633 code << tab << "input_offset += field_size_in;\n"; 2634 2635 // -- End of loop over active field 2636 tab.pop(); 2637 code << tab << "}\n"; 2638 2639 // Close loop and function 2640 tab.pop(); 2641 code << tab << "}\n"; 2642 tab.pop(); 2643 code << tab << "}\n"; 2644 code << tab << "// -----------------------------------------------------------------------------\n\n"; 2645 2646 // Compile 2647 { 2648 bool is_compile_good = false; 2649 const CeedInt T_1d = CeedIntMax(is_all_tensor ? Q_1d : Q, data->max_P_1d); 2650 2651 data->thread_1d = T_1d; 2652 CeedCallBackend(CeedTryCompile_Cuda(ceed, code.str().c_str(), &is_compile_good, &data->module_assemble_qfunction, 1, "OP_T_1D", T_1d)); 2653 if (is_compile_good) { 2654 *is_good_build = true; 2655 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module_assemble_qfunction, operator_name.c_str(), &data->assemble_qfunction)); 2656 } else { 2657 *is_good_build = false; 2658 data->use_assembly_fallback = true; 2659 } 2660 } 2661 CeedCallBackend(CeedDestroy(&ceed)); 2662 CeedCallBackend(CeedQFunctionDestroy(&qf)); 2663 return CEED_ERROR_SUCCESS; 2664 } 2665 2666 //------------------------------------------------------------------------------ 2667