1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #define CEED_DEBUG_COLOR 12 9 10 #include <ceed.h> 11 #include <ceed/backend.h> 12 #include <ceed/jit-tools.h> 13 #include <cuda_runtime.h> 14 15 #include <iostream> 16 #include <sstream> 17 #include <string> 18 19 #include "../cuda-ref/ceed-cuda-ref.h" 20 #include "../cuda-shared/ceed-cuda-shared.h" 21 #include "../cuda/ceed-cuda-common.h" 22 #include "../cuda/ceed-cuda-compile.h" 23 #include "ceed-cuda-gen.h" 24 25 struct FieldReuse_Cuda { 26 CeedInt index; 27 bool is_input; 28 CeedEvalMode eval_mode; 29 }; 30 31 //------------------------------------------------------------------------------ 32 // Determine type of operator 33 //------------------------------------------------------------------------------ 34 static int CeedOperatorBuildKernelData_Cuda_gen(Ceed ceed, CeedInt num_input_fields, CeedOperatorField *op_input_fields, 35 CeedQFunctionField *qf_input_fields, CeedInt num_output_fields, CeedOperatorField *op_output_fields, 36 CeedQFunctionField *qf_output_fields, CeedInt *max_P_1d, CeedInt *Q_1d, CeedInt *dim, bool *is_tensor, 37 bool *use_3d_slices) { 38 // Find dim, P_1d, Q_1d 39 *max_P_1d = 0; 40 *Q_1d = 0; 41 *dim = 0; 42 *is_tensor = true; 43 44 for (CeedInt i = 0; i < num_input_fields; i++) { 45 CeedBasis basis; 46 47 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 48 if (basis != CEED_BASIS_NONE) { 49 bool is_field_tensor; 50 CeedInt field_P_1d = 0, field_Q_1d = 0, field_dim = 0; 51 52 // Collect dim, P_1d, and Q_1d 53 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 54 *is_tensor = *is_tensor && is_field_tensor; 55 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 56 else CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P_1d)); 57 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 58 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 59 CeedCheck(*dim == 0 || field_dim == *dim, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 60 *dim = field_dim; 61 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 62 else CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q_1d)); 63 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 64 *Q_1d = field_Q_1d; 65 } 66 CeedCallBackend(CeedBasisDestroy(&basis)); 67 } 68 for (CeedInt i = 0; i < num_output_fields; i++) { 69 CeedBasis basis; 70 71 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 72 if (basis != CEED_BASIS_NONE) { 73 bool is_field_tensor; 74 CeedInt field_P_1d = 0, field_Q_1d = 0, field_dim = 0; 75 76 // Collect dim, P_1d, and Q_1d 77 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 78 *is_tensor = *is_tensor && is_field_tensor; 79 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 80 else CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P_1d)); 81 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 82 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 83 CeedCheck(*dim == 0 || field_dim == *dim, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 84 *dim = field_dim; 85 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 86 else CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q_1d)); 87 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 88 *Q_1d = field_Q_1d; 89 } 90 CeedCallBackend(CeedBasisDestroy(&basis)); 91 } 92 93 // Only use 3D collocated gradient parallelization strategy when gradient is computed 94 *use_3d_slices = false; 95 if (*dim == 3) { 96 bool was_grad_found = false; 97 98 for (CeedInt i = 0; i < num_input_fields; i++) { 99 CeedEvalMode eval_mode; 100 101 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 102 if (eval_mode == CEED_EVAL_GRAD) { 103 CeedBasis_Cuda_shared *basis_data; 104 CeedBasis basis; 105 106 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 107 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 108 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 109 was_grad_found = true; 110 CeedCallBackend(CeedBasisDestroy(&basis)); 111 } 112 } 113 for (CeedInt i = 0; i < num_output_fields; i++) { 114 CeedEvalMode eval_mode; 115 116 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 117 if (eval_mode == CEED_EVAL_GRAD) { 118 CeedBasis_Cuda_shared *basis_data; 119 CeedBasis basis; 120 121 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 122 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 123 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 124 was_grad_found = true; 125 CeedCallBackend(CeedBasisDestroy(&basis)); 126 } 127 } 128 } 129 return CEED_ERROR_SUCCESS; 130 } 131 132 //------------------------------------------------------------------------------ 133 // Setup fields 134 //------------------------------------------------------------------------------ 135 static int CeedOperatorBuildKernelFieldData_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, CeedInt i, CeedOperatorField op_field, 136 CeedQFunctionField qf_field, FieldReuse_Cuda field_reuse, CeedInt Q_1d, bool is_input, 137 bool is_tensor, bool is_at_points, bool use_3d_slices) { 138 const char *field_name; 139 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 140 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 141 std::string option_name = (is_input ? "inputs" : "outputs"); 142 CeedEvalMode eval_mode = CEED_EVAL_NONE; 143 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 144 CeedElemRestriction elem_rstr; 145 CeedBasis_Cuda_shared *basis_data; 146 CeedBasis basis; 147 148 // Field reuse info 149 bool use_previous_field = field_reuse.index != -1; 150 151 CeedCallBackend(CeedOperatorFieldGetName(op_field, &field_name)); 152 code << " // -- " << (is_input ? "Input" : "Output") << " field " << i << ": " << field_name << "\n"; 153 154 // Get field data 155 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 156 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 157 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 158 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 159 } 160 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 161 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 162 if (basis != CEED_BASIS_NONE) { 163 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 164 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 165 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 166 } 167 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 168 169 // Set field constants 170 if (eval_mode != CEED_EVAL_WEIGHT) { 171 code << " const CeedInt " << P_name << " = " << (basis == CEED_BASIS_NONE ? Q_1d : P_1d) << ";\n"; 172 code << " const CeedInt num_comp" << var_suffix << " = " << num_comp << ";\n"; 173 } 174 175 // Load basis data 176 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 177 switch (eval_mode) { 178 case CEED_EVAL_NONE: 179 break; 180 case CEED_EVAL_INTERP: 181 if (is_at_points) { 182 // AtPoints 183 if (!basis_data->d_chebyshev_interp_1d) { 184 CeedSize interp_bytes; 185 CeedScalar *chebyshev_interp_1d; 186 187 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 188 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 189 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 190 CeedCallCuda(CeedBasisReturnCeed(basis), cudaMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 191 CeedCallCuda(CeedBasisReturnCeed(basis), 192 cudaMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, cudaMemcpyHostToDevice)); 193 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 194 } 195 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 196 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 197 } else { 198 // Standard quadrature 199 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 200 else data->B.outputs[i] = basis_data->d_interp_1d; 201 } 202 if (use_previous_field) { 203 std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 204 205 code << " CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 206 } else { 207 code << " __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 208 code << " LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 209 } 210 break; 211 case CEED_EVAL_GRAD: 212 if (is_at_points) { 213 // AtPoints 214 if (!basis_data->d_chebyshev_interp_1d) { 215 CeedSize interp_bytes; 216 CeedScalar *chebyshev_interp_1d; 217 218 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 219 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 220 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 221 CeedCallCuda(CeedBasisReturnCeed(basis), cudaMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 222 CeedCallCuda(CeedBasisReturnCeed(basis), 223 cudaMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, cudaMemcpyHostToDevice)); 224 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 225 } 226 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 227 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 228 } else { 229 // Standard quadrature 230 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 231 else data->B.outputs[i] = basis_data->d_interp_1d; 232 } 233 if (is_tensor) { 234 if (use_previous_field) { 235 std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 236 237 code << " CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 238 } else { 239 code << " __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 240 code << " LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 241 } 242 } 243 if (is_at_points) break; // No G mat for AtPoints 244 if (use_3d_slices) { 245 if (is_input) data->G.inputs[i] = basis_data->d_collo_grad_1d; 246 else data->G.outputs[i] = basis_data->d_collo_grad_1d; 247 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 248 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 249 250 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 251 } else { 252 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 253 code << " LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 254 } 255 } else { 256 bool has_collo_grad = basis_data->d_collo_grad_1d; 257 258 if (is_input) data->G.inputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 259 else data->G.outputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 260 if (has_collo_grad) { 261 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 262 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 263 264 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 265 } else { 266 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 267 code << " LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 268 } 269 } else { 270 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 271 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 272 273 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 274 } else { 275 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << P_name << "*" << Q_name << (is_tensor ? "" : "*dim") << "];\n"; 276 code << " LoadMatrix<" << P_name << ", " << Q_name << (is_tensor ? "" : "*dim") << ">(data, G." << option_name << "[" << i << "], s_G" 277 << var_suffix << ");\n"; 278 } 279 } 280 } 281 break; 282 case CEED_EVAL_WEIGHT: 283 break; // No action 284 // LCOV_EXCL_START 285 case CEED_EVAL_DIV: 286 case CEED_EVAL_CURL: 287 break; // TODO: Not implemented 288 // LCOV_EXCL_STOP 289 } 290 CeedCallBackend(CeedBasisDestroy(&basis)); 291 return CEED_ERROR_SUCCESS; 292 } 293 294 //------------------------------------------------------------------------------ 295 // Restriction 296 //------------------------------------------------------------------------------ 297 static int CeedOperatorBuildKernelRestriction_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, CeedInt i, CeedInt dim, 298 CeedInt field_input_buffer[], CeedOperatorField op_field, CeedQFunctionField qf_field, 299 CeedInt Q_1d, bool is_input, bool is_tensor, bool is_at_points, bool use_3d_slices) { 300 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 301 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix; 302 CeedEvalMode eval_mode = CEED_EVAL_NONE; 303 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 304 CeedSize l_size; 305 CeedRestrictionType rstr_type = CEED_RESTRICTION_STANDARD; 306 CeedElemRestriction_Cuda *rstr_data; 307 CeedElemRestriction elem_rstr; 308 CeedBasis basis; 309 310 // Get field data 311 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 312 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 313 CeedCallBackend(CeedElemRestrictionGetType(elem_rstr, &rstr_type)); 314 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 315 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 316 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 317 } 318 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 319 if (basis != CEED_BASIS_NONE) { 320 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 321 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 322 } 323 CeedCallBackend(CeedBasisDestroy(&basis)); 324 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 325 326 // Restriction 327 if (is_input) { 328 // Input 329 if (field_input_buffer[i] != i) { 330 std::string buffer_name = "r_e_in_" + std::to_string(field_input_buffer[i]); 331 332 // Restriction was already done for previous input 333 code << " CeedScalar *r_e" << var_suffix << " = " << buffer_name << ";\n"; 334 } else if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_3d_slices && is_at_points)) { 335 if (eval_mode == CEED_EVAL_NONE && rstr_type != CEED_RESTRICTION_POINTS) { 336 // No basis action, so r_e_in_* in also r_q_in_* and needs to be allocated 337 code << " CeedScalar r_e" << var_suffix << "[num_comp" << var_suffix << "*" << P_name << "];\n"; 338 } else if (rstr_type != CEED_RESTRICTION_POINTS) { 339 // Otherwise we're using the scratch space 340 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 341 } 342 switch (rstr_type) { 343 case CEED_RESTRICTION_STANDARD: { 344 CeedInt comp_stride; 345 346 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 347 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 348 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 349 code << " // CompStride: " << comp_stride << "\n"; 350 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 351 code << " ReadLVecStandard" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name 352 << ">(data, l_size" << var_suffix << ", elem, indices.inputs[" << i << "], d" << var_suffix << ", r_e" << var_suffix << ");\n"; 353 break; 354 } 355 case CEED_RESTRICTION_STRIDED: { 356 bool has_backend_strides; 357 CeedInt num_elem; 358 359 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 360 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 361 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 362 363 if (!has_backend_strides) { 364 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 365 } 366 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 367 code << " ReadLVecStrided" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", " 368 << strides[1] << ", " << strides[2] << ">(data, elem, d" << var_suffix << ", r_e" << var_suffix << ");\n"; 369 break; 370 } 371 case CEED_RESTRICTION_POINTS: { 372 CeedInt comp_stride; 373 374 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 375 code << " const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 376 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 377 break; 378 } 379 // LCOV_EXCL_START 380 case CEED_RESTRICTION_ORIENTED: 381 case CEED_RESTRICTION_CURL_ORIENTED: 382 break; // TODO: Not implemented 383 // LCOV_EXCL_STOP 384 } 385 } 386 } else { 387 // Output 388 switch (rstr_type) { 389 case CEED_RESTRICTION_STANDARD: { 390 CeedInt comp_stride; 391 392 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 393 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 394 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 395 code << " // CompStride: " << comp_stride << "\n"; 396 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 397 code << " WriteLVecStandard" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name 398 << ">(data, l_size" << var_suffix << ", elem, indices.outputs[" << i << "], r_e" << var_suffix << ", d" << var_suffix << ");\n"; 399 break; 400 } 401 case CEED_RESTRICTION_STRIDED: { 402 bool has_backend_strides; 403 CeedInt num_elem; 404 405 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 406 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 407 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 408 409 if (!has_backend_strides) { 410 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 411 } 412 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 413 code << " WriteLVecStrided" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", " 414 << strides[1] << ", " << strides[2] << ">(data, elem, r_e" << var_suffix << ", d" << var_suffix << ");\n"; 415 break; 416 } 417 case CEED_RESTRICTION_POINTS: 418 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 419 break; 420 // LCOV_EXCL_START 421 case CEED_RESTRICTION_ORIENTED: 422 case CEED_RESTRICTION_CURL_ORIENTED: 423 break; // TODO: Not implemented 424 // LCOV_EXCL_STOP 425 } 426 } 427 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 428 return CEED_ERROR_SUCCESS; 429 } 430 431 //------------------------------------------------------------------------------ 432 // Basis 433 //------------------------------------------------------------------------------ 434 static int CeedOperatorBuildKernelBasis_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, CeedInt i, CeedInt dim, 435 CeedOperatorField op_field, CeedQFunctionField qf_field, CeedInt Q_1d, bool is_input, bool is_tensor, 436 bool is_at_points, bool use_3d_slices) { 437 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 438 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 439 CeedEvalMode eval_mode = CEED_EVAL_NONE; 440 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 441 CeedElemRestriction elem_rstr; 442 CeedBasis basis; 443 444 // Get field data 445 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 446 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 447 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 448 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 449 } 450 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 451 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 452 if (basis != CEED_BASIS_NONE) { 453 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 454 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 455 } 456 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 457 458 // Basis 459 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 460 if (is_input) { 461 switch (eval_mode) { 462 case CEED_EVAL_NONE: 463 if (!use_3d_slices && !is_at_points) { 464 code << " CeedScalar *r_q" << var_suffix << " = r_e" << var_suffix << ";\n"; 465 } 466 break; 467 case CEED_EVAL_INTERP: 468 if (is_at_points) { 469 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 470 471 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 472 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 473 << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n"; 474 } else { 475 std::string function_name = is_tensor ? ((dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d") : "InterpNonTensor"; 476 477 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 478 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 479 << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n"; 480 } 481 break; 482 case CEED_EVAL_GRAD: 483 if (is_at_points) { 484 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 485 486 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 487 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 488 << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n"; 489 } else if (use_3d_slices) { 490 std::string function_name = (dim > 1 ? "InterpTensor" : "Interp") + std::to_string(dim) + "d"; 491 492 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 493 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 494 << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n"; 495 } else if (is_tensor) { 496 bool is_collocated = dim == 3 && Q_1d >= P_1d; 497 std::string function_name = (dim == 1 ? "Grad" : (is_collocated ? "GradTensorCollocated" : "GradTensor")) + std::to_string(dim) + "d"; 498 499 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim*" << (dim >= 3 ? Q_name : "1") << "];\n"; 500 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 501 << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 502 } else { 503 std::string function_name = "GradNonTensor"; 504 505 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 506 code << " " << function_name << "<num_comp" << var_suffix << ", dim, " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" 507 << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 508 } 509 break; 510 case CEED_EVAL_WEIGHT: { 511 if (is_at_points) { 512 code << " // Nothing to do AtPoints\n"; 513 } else { 514 CeedBasis_Cuda_shared *basis_data; 515 std::string function_name = is_tensor ? ((dim == 1 ? "Weight" : "WeightTensor") + std::to_string(dim) + "d") : "WeightNonTensor"; 516 517 code << " CeedScalar r_q" << var_suffix << "[" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 518 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 519 data->W = basis_data->d_q_weight_1d; 520 code << " " << function_name << "<" << Q_name << ">(data, W, r_q" << var_suffix << ");\n"; 521 } 522 break; 523 } 524 // LCOV_EXCL_START 525 case CEED_EVAL_DIV: 526 case CEED_EVAL_CURL: 527 break; // TODO: Not implemented 528 // LCOV_EXCL_STOP 529 } 530 } else { 531 switch (eval_mode) { 532 case CEED_EVAL_NONE: 533 code << " CeedScalar *r_e" << var_suffix << " = r_q" << var_suffix << ";\n"; 534 break; // No action 535 case CEED_EVAL_INTERP: 536 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 537 if (is_at_points) { 538 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 539 540 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix 541 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 542 } else { 543 std::string function_name = 544 is_tensor ? ((dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d") : "InterpTransposeNonTensor"; 545 546 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" << var_suffix 547 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 548 } 549 break; 550 case CEED_EVAL_GRAD: 551 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 552 if (is_at_points) { 553 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 554 555 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix 556 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 557 } else if (use_3d_slices) { 558 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 559 560 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" << var_suffix 561 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 562 } else if (is_tensor) { 563 bool is_collocated = dim == 3 && Q_1d >= P_1d; 564 std::string function_name = 565 (dim == 1 ? "GradTranspose" : (is_collocated ? "GradTransposeTensorCollocated" : "GradTransposeTensor")) + std::to_string(dim) + "d"; 566 567 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" << var_suffix 568 << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 569 } else { 570 std::string function_name = "GradTransposeNonTensor"; 571 572 code << " " << function_name << "<num_comp" << var_suffix << ", dim, " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" 573 << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 574 } 575 break; 576 // LCOV_EXCL_START 577 case CEED_EVAL_WEIGHT: 578 break; // Should not occur 579 case CEED_EVAL_DIV: 580 case CEED_EVAL_CURL: 581 break; // TODO: Not implemented 582 // LCOV_EXCL_STOP 583 } 584 } 585 CeedCallBackend(CeedBasisDestroy(&basis)); 586 return CEED_ERROR_SUCCESS; 587 } 588 589 //------------------------------------------------------------------------------ 590 // QFunction 591 //------------------------------------------------------------------------------ 592 static int CeedOperatorBuildKernelQFunction_Cuda_gen(std::ostringstream &code, CeedOperator_Cuda_gen *data, CeedInt dim, CeedInt max_num_points, 593 CeedInt num_input_fields, CeedOperatorField *op_input_fields, 594 CeedQFunctionField *qf_input_fields, CeedInt num_output_fields, 595 CeedOperatorField *op_output_fields, CeedQFunctionField *qf_output_fields, 596 std::string qfunction_name, CeedInt Q_1d, bool is_tensor, bool is_at_points, 597 bool use_3d_slices) { 598 std::string Q_name = is_tensor ? "Q_1d" : "Q"; 599 CeedEvalMode eval_mode = CEED_EVAL_NONE; 600 CeedElemRestriction elem_rstr; 601 602 // Setup output arrays 603 code << "\n // -- Output field setup\n"; 604 for (CeedInt i = 0; i < num_output_fields; i++) { 605 const char *field_name; 606 std::string var_suffix = "_out_" + std::to_string(i); 607 608 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 609 code << " // ---- Output field " << i << ": " << field_name << "\n"; 610 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 611 switch (eval_mode) { 612 case CEED_EVAL_NONE: 613 if (is_at_points) { 614 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "];\n"; 615 } else { 616 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 617 } 618 break; 619 case CEED_EVAL_INTERP: 620 if (is_at_points) { 621 // Accumulator for point data 622 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 623 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "; i++) {\n"; 624 code << " r_c" << var_suffix << "[i] = 0.0;\n"; 625 code << " }\n"; 626 } else { 627 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 628 } 629 break; 630 case CEED_EVAL_GRAD: 631 if (is_at_points) { 632 // Accumulator for point data 633 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "*dim];\n"; 634 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "; i++) {\n"; 635 code << " r_c" << var_suffix << "[i] = 0.0;\n"; 636 code << " }\n"; 637 } else if (use_3d_slices) { 638 // Accumulator for gradient slices 639 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 640 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << Q_name << "; i++) {\n"; 641 code << " r_q" << var_suffix << "[i] = 0.0;\n"; 642 code << " }\n"; 643 } else { 644 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 645 } 646 break; 647 case CEED_EVAL_WEIGHT: 648 break; 649 // LCOV_EXCL_START 650 case CEED_EVAL_DIV: 651 case CEED_EVAL_CURL: 652 break; // TODO: Not implemented 653 // LCOV_EXCL_STOP 654 } 655 } 656 657 if (is_at_points) { 658 // We need to handle batches of points 659 code << "\n // Note: Using batches of points\n"; 660 code << " const CeedInt point_loop_bound = (blockDim.x * blockDim.y) * ceil(1.0 * max_num_points / (blockDim.x * blockDim.y));\n\n"; 661 code << " #pragma unroll\n"; 662 code << " for (CeedInt i = threadIdx.x + threadIdx.y * blockDim.x; i < point_loop_bound; i += blockDim.x * blockDim.y) {\n"; 663 code << " const CeedInt p = i % max_num_points;\n\n"; 664 665 code << " // -- Coordinates\n"; 666 code << " CeedScalar r_x[dim];\n"; 667 code << " ReadPoint<dim, coords_comp_stride, max_num_points>(data, elem, p, max_num_points, points.indices, points.coords, r_x);\n\n"; 668 669 code << " // -- Input fields\n"; 670 for (CeedInt i = 0; i < num_input_fields; i++) { 671 const char *field_name; 672 std::string var_suffix = "_in_" + std::to_string(i); 673 std::string P_name = "P_1d" + var_suffix; 674 675 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 676 code << " // ---- Input field " << i << ": " << field_name << "\n"; 677 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 678 // Basis action 679 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 680 switch (eval_mode) { 681 case CEED_EVAL_NONE: 682 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 683 code << " ReadPoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 684 << ", max_num_points>(data, elem, p, max_num_points, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 685 break; 686 case CEED_EVAL_INTERP: 687 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 688 code << " InterpAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name << ">(data, i, r_c" 689 << var_suffix << ", r_x, r_s" << var_suffix << ");\n"; 690 break; 691 case CEED_EVAL_GRAD: 692 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 693 code << " GradAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name << ">(data, i, r_c" 694 << var_suffix << ", r_x, r_s" << var_suffix << ");\n"; 695 break; 696 case CEED_EVAL_WEIGHT: 697 code << " CeedScalar r_s" << var_suffix << "[1];\n"; 698 code << " r_s" << var_suffix << "[0] = 1.0;\n"; 699 break; 700 // LCOV_EXCL_START 701 case CEED_EVAL_DIV: 702 case CEED_EVAL_CURL: 703 break; // TODO: Not implemented 704 // LCOV_EXCL_STOP 705 } 706 } 707 code << "\n // -- Output fields\n"; 708 for (CeedInt i = 0; i < num_output_fields; i++) { 709 const char *field_name; 710 std::string var_suffix = "_out_" + std::to_string(i); 711 712 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 713 code << " // ---- Output field " << i << ": " << field_name << "\n"; 714 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 715 // Basis action 716 switch (eval_mode) { 717 case CEED_EVAL_NONE: 718 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 719 break; 720 case CEED_EVAL_INTERP: 721 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 722 break; 723 case CEED_EVAL_GRAD: 724 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 725 break; 726 // LCOV_EXCL_START 727 case CEED_EVAL_WEIGHT: 728 break; // Should not occur 729 case CEED_EVAL_DIV: 730 case CEED_EVAL_CURL: 731 break; // TODO: Not implemented 732 // LCOV_EXCL_STOP 733 } 734 } 735 736 } else if (use_3d_slices) { 737 // We treat quadrature points per slice in 3d to save registers 738 code << "\n // Note: Using planes of 3D elements\n"; 739 code << " #pragma unroll\n"; 740 code << " for (CeedInt q = 0; q < " << Q_name << "; q++) {\n"; 741 code << " // -- Input fields\n"; 742 for (CeedInt i = 0; i < num_input_fields; i++) { 743 const char *field_name; 744 std::string var_suffix = "_in_" + std::to_string(i); 745 746 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 747 code << " // ---- Input field " << i << ": " << field_name << "\n"; 748 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 749 // Basis action 750 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 751 switch (eval_mode) { 752 case CEED_EVAL_NONE: 753 bool is_strided; 754 755 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 756 757 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 758 CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided)); 759 if (is_strided) { 760 bool has_backend_strides; 761 CeedInt num_elem, elem_size; 762 763 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 764 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 765 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 766 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 767 768 if (!has_backend_strides) { 769 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 770 } 771 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 772 code << " ReadEVecSliceStrided3d<num_comp" << var_suffix << ", " << Q_name << ", " << strides[0] << ", " << strides[1] << ", " 773 << strides[2] << ">(data, elem, q, d" << var_suffix << ", r_s" << var_suffix << ");\n"; 774 } else { 775 CeedSize l_size = 0; 776 CeedInt comp_stride; 777 CeedElemRestriction_Cuda *rstr_data; 778 779 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 780 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 781 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 782 code << " // CompStride: " << comp_stride << "\n"; 783 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 784 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 785 code << " ReadEVecSliceStandard3d<num_comp" << var_suffix << ", " << comp_stride << ", " << Q_name << ">(data, l_size" << var_suffix 786 << ", elem, q, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 787 } 788 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 789 break; 790 case CEED_EVAL_INTERP: 791 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 792 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) {\n"; 793 code << " r_s" << var_suffix << "[j] = r_q" << var_suffix << "[q + j*" << Q_name << "];\n"; 794 code << " }\n"; 795 break; 796 case CEED_EVAL_GRAD: 797 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 798 code << " GradColloSlice3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_q" << var_suffix << ", s_G" 799 << var_suffix << ", r_s" << var_suffix << ");\n"; 800 break; 801 case CEED_EVAL_WEIGHT: 802 code << " CeedScalar r_s" << var_suffix << "[1];\n"; 803 code << " r_s" << var_suffix << "[0] = r_q" << var_suffix << "[q];\n"; 804 break; 805 // LCOV_EXCL_START 806 case CEED_EVAL_DIV: 807 case CEED_EVAL_CURL: 808 break; // TODO: Not implemented 809 // LCOV_EXCL_STOP 810 } 811 } 812 code << "\n // -- Output fields\n"; 813 for (CeedInt i = 0; i < num_output_fields; i++) { 814 const char *field_name; 815 std::string var_suffix = "_out_" + std::to_string(i); 816 817 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 818 code << " // ---- Output field " << i << ": " << field_name << "\n"; 819 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 820 // Basis action 821 switch (eval_mode) { 822 case CEED_EVAL_NONE: 823 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 824 break; 825 case CEED_EVAL_INTERP: 826 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 827 break; 828 case CEED_EVAL_GRAD: 829 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 830 break; 831 // LCOV_EXCL_START 832 case CEED_EVAL_WEIGHT: 833 break; // Should not occur 834 case CEED_EVAL_DIV: 835 case CEED_EVAL_CURL: 836 break; // TODO: Not implemented 837 // LCOV_EXCL_STOP 838 } 839 } 840 } else { 841 code << "\n // Note: Using full elements\n"; 842 code << " {\n"; 843 code << " // -- Input fields\n"; 844 for (CeedInt i = 0; i < num_input_fields; i++) { 845 const char *field_name; 846 847 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 848 code << " // ---- Input field " << i << ": " << field_name << "\n"; 849 code << " CeedScalar *r_s_in_" << i << " = r_q_in_" << i << ";\n"; 850 } 851 code << " // -- Output fields\n"; 852 for (CeedInt i = 0; i < num_output_fields; i++) { 853 const char *field_name; 854 855 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 856 code << " // ---- Output field " << i << ": " << field_name << "\n"; 857 code << " CeedScalar *r_s_out_" << i << " = r_q_out_" << i << ";\n"; 858 } 859 } 860 861 // Input and output buffers 862 code << "\n // -- QFunction inputs and outputs\n"; 863 code << " // ---- Inputs\n"; 864 code << " CeedScalar *inputs[" << CeedIntMax(num_input_fields, 1) << "];\n"; 865 for (CeedInt i = 0; i < num_input_fields; i++) { 866 const char *field_name; 867 868 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 869 code << " // ------ Input field " << i << ": " << field_name << "\n"; 870 code << " inputs[" << i << "] = r_s_in_" << i << ";\n"; 871 } 872 code << " // ---- Outputs\n"; 873 code << " CeedScalar *outputs[" << CeedIntMax(num_output_fields, 1) << "];\n"; 874 for (CeedInt i = 0; i < num_output_fields; i++) { 875 const char *field_name; 876 877 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 878 code << " // ------ Output field " << i << ": " << field_name << "\n"; 879 code << " outputs[" << i << "] = r_s_out_" << i << ";\n"; 880 } 881 882 // Apply QFunction 883 code << "\n // -- Apply QFunction\n"; 884 code << " " << qfunction_name << "(ctx, "; 885 if (dim != 3 || is_at_points || use_3d_slices || !is_tensor) { 886 code << "1"; 887 } else { 888 code << Q_name; 889 } 890 code << ", inputs, outputs);\n"; 891 892 if (is_at_points) { 893 // Map back to coefficients 894 code << "\n // -- Output fields\n"; 895 for (CeedInt i = 0; i < num_output_fields; i++) { 896 const char *field_name; 897 std::string var_suffix = "_out_" + std::to_string(i); 898 std::string P_name = "P_1d" + var_suffix; 899 900 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 901 code << " // ---- Output field " << i << ": " << field_name << "\n"; 902 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 903 // Basis action 904 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 905 switch (eval_mode) { 906 case CEED_EVAL_NONE: { 907 CeedInt comp_stride; 908 CeedElemRestriction elem_rstr; 909 910 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 911 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 912 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 913 code << " const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 914 code << " WritePoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 915 << ", max_num_points>(data, elem, i, points.num_per_elem[elem], indices.outputs[" << i << "]" 916 << ", r_s" << var_suffix << ", d" << var_suffix << ");\n"; 917 break; 918 } 919 case CEED_EVAL_INTERP: 920 code << " if (i >= points.num_per_elem[elem]) {\n"; 921 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 922 code << " }\n"; 923 code << " InterpTransposeAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name 924 << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 925 break; 926 case CEED_EVAL_GRAD: 927 code << " if (i >= points.num_per_elem[elem]) {\n"; 928 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "*dim; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 929 code << " }\n"; 930 code << " GradTransposeAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name 931 << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 932 break; 933 // LCOV_EXCL_START 934 case CEED_EVAL_WEIGHT: 935 break; // Should not occur 936 case CEED_EVAL_DIV: 937 case CEED_EVAL_CURL: 938 break; // TODO: Not implemented 939 // LCOV_EXCL_STOP 940 } 941 } 942 } else if (use_3d_slices) { 943 // Copy or apply transpose grad, if needed 944 code << "\n // -- Output fields\n"; 945 for (CeedInt i = 0; i < num_output_fields; i++) { 946 const char *field_name; 947 std::string var_suffix = "_out_" + std::to_string(i); 948 std::string P_name = "P_1d" + var_suffix; 949 950 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 951 code << " // ---- Output field " << i << ": " << field_name << "\n"; 952 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 953 // Basis action 954 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 955 switch (eval_mode) { 956 case CEED_EVAL_NONE: 957 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 958 code << " r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 959 code << " }\n"; 960 break; 961 case CEED_EVAL_INTERP: 962 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 963 code << " r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 964 code << " }\n"; 965 break; 966 case CEED_EVAL_GRAD: 967 code << " GradColloSliceTranspose3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_s" << var_suffix << ", s_G" 968 << var_suffix << ", r_q" << var_suffix << ");\n"; 969 break; 970 // LCOV_EXCL_START 971 case CEED_EVAL_WEIGHT: 972 break; // Should not occur 973 case CEED_EVAL_DIV: 974 case CEED_EVAL_CURL: 975 break; // TODO: Not implemented 976 // LCOV_EXCL_STOP 977 } 978 } 979 } 980 code << " }\n"; 981 return CEED_ERROR_SUCCESS; 982 } 983 984 //------------------------------------------------------------------------------ 985 // Build single operator kernel 986 //------------------------------------------------------------------------------ 987 extern "C" int CeedOperatorBuildKernel_Cuda_gen(CeedOperator op, bool *is_good_build) { 988 bool is_tensor = true, is_at_points = false, use_3d_slices = false; 989 Ceed ceed; 990 CeedInt Q_1d, num_input_fields, num_output_fields, dim = 1, max_num_points = 0, coords_comp_stride = 0; 991 CeedQFunctionField *qf_input_fields, *qf_output_fields; 992 CeedQFunction_Cuda_gen *qf_data; 993 CeedQFunction qf; 994 CeedOperatorField *op_input_fields, *op_output_fields; 995 CeedOperator_Cuda_gen *data; 996 std::ostringstream code; 997 998 CeedCallBackend(CeedOperatorGetData(op, &data)); 999 { 1000 bool is_setup_done; 1001 1002 CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done)); 1003 if (is_setup_done) { 1004 *is_good_build = !data->use_fallback; 1005 return CEED_ERROR_SUCCESS; 1006 } 1007 } 1008 1009 // Check field compatibility 1010 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 1011 { 1012 bool has_shared_bases = true, is_all_tensor = true, is_all_nontensor = true; 1013 1014 for (CeedInt i = 0; i < num_input_fields; i++) { 1015 CeedBasis basis; 1016 1017 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 1018 if (basis != CEED_BASIS_NONE) { 1019 bool is_tensor = true; 1020 const char *resource; 1021 char *resource_root; 1022 Ceed basis_ceed; 1023 1024 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1025 is_all_tensor = is_all_tensor && is_tensor; 1026 is_all_nontensor = is_all_nontensor && !is_tensor; 1027 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1028 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1029 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1030 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared"); 1031 CeedCallBackend(CeedFree(&resource_root)); 1032 CeedCallBackend(CeedDestroy(&basis_ceed)); 1033 } 1034 CeedCallBackend(CeedBasisDestroy(&basis)); 1035 } 1036 1037 for (CeedInt i = 0; i < num_output_fields; i++) { 1038 CeedBasis basis; 1039 1040 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 1041 if (basis != CEED_BASIS_NONE) { 1042 bool is_tensor = true; 1043 const char *resource; 1044 char *resource_root; 1045 Ceed basis_ceed; 1046 1047 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1048 is_all_tensor = is_all_tensor && is_tensor; 1049 is_all_nontensor = is_all_nontensor && !is_tensor; 1050 1051 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1052 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1053 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1054 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/cuda/shared"); 1055 CeedCallBackend(CeedFree(&resource_root)); 1056 CeedCallBackend(CeedDestroy(&basis_ceed)); 1057 } 1058 CeedCallBackend(CeedBasisDestroy(&basis)); 1059 } 1060 // -- Fallback to ref if not all bases are shared 1061 if (!has_shared_bases || (!is_all_tensor && !is_all_nontensor)) { 1062 *is_good_build = false; 1063 return CEED_ERROR_SUCCESS; 1064 } 1065 } 1066 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1067 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 1068 CeedCallBackend(CeedQFunctionGetData(qf, &qf_data)); 1069 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 1070 1071 // Get operator data 1072 CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points)); 1073 CeedCallBackend(CeedOperatorBuildKernelData_Cuda_gen(ceed, num_input_fields, op_input_fields, qf_input_fields, num_output_fields, op_output_fields, 1074 qf_output_fields, &data->max_P_1d, &Q_1d, &dim, &is_tensor, &use_3d_slices)); 1075 if (dim == 0) dim = 1; 1076 data->dim = dim; 1077 if (is_at_points) { 1078 CeedElemRestriction_Cuda *rstr_data; 1079 CeedElemRestriction rstr_points = NULL; 1080 1081 CeedCallBackend(CeedOperatorAtPointsGetPoints(op, &rstr_points, NULL)); 1082 CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr_points, &max_num_points)); 1083 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr_points, &coords_comp_stride)); 1084 CeedCallBackend(CeedElemRestrictionGetData(rstr_points, &rstr_data)); 1085 data->points.indices = (CeedInt *)rstr_data->d_offsets; 1086 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_points)); 1087 } 1088 if (is_at_points) use_3d_slices = false; 1089 if (Q_1d == 0) { 1090 if (is_at_points) Q_1d = max_num_points; 1091 else CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q_1d)); 1092 } 1093 data->Q_1d = Q_1d; 1094 1095 // Check for restriction only identity operator 1096 { 1097 bool is_identity_qf; 1098 1099 CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf)); 1100 if (is_identity_qf) { 1101 CeedEvalMode eval_mode_in, eval_mode_out; 1102 1103 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in)); 1104 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out)); 1105 CeedCheck(eval_mode_in != CEED_EVAL_NONE || eval_mode_out != CEED_EVAL_NONE, ceed, CEED_ERROR_BACKEND, 1106 "Backend does not implement restriction only identity operators"); 1107 } 1108 } 1109 1110 // Add atomicAdd function for old NVidia architectures 1111 { 1112 Ceed_Cuda *ceed_data; 1113 struct cudaDeviceProp prop; 1114 1115 CeedCallBackend(CeedGetData(ceed, &ceed_data)); 1116 CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id)); 1117 if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) { 1118 code << "// AtomicAdd fallback source\n"; 1119 code << "#include <ceed/jit-source/cuda/cuda-atomic-add-fallback.h>\n\n"; 1120 } 1121 } 1122 1123 // Load basis source files 1124 if (is_tensor) { 1125 code << "// Tensor basis source\n"; 1126 code << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h>\n\n"; 1127 } else { 1128 code << "// Non-tensor basis source\n"; 1129 code << "#include <ceed/jit-source/cuda/cuda-shared-basis-nontensor-templates.h>\n\n"; 1130 } 1131 if (is_at_points) { 1132 code << "// AtPoints basis source\n"; 1133 code << "#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-at-points-templates.h>\n\n"; 1134 } 1135 code << "// CodeGen operator source\n"; 1136 code << "#include <ceed/jit-source/cuda/cuda-gen-templates.h>\n\n"; 1137 1138 // Get QFunction name 1139 std::string qfunction_name(qf_data->qfunction_name); 1140 std::string operator_name; 1141 1142 operator_name = "CeedKernelCudaGenOperator_" + qfunction_name; 1143 1144 // Define CEED_Q_VLA 1145 code << "\n#undef CEED_Q_VLA\n"; 1146 if (dim != 3 || is_at_points || use_3d_slices || !is_tensor) { 1147 code << "#define CEED_Q_VLA 1\n\n"; 1148 } else { 1149 code << "#define CEED_Q_VLA " << Q_1d << "\n\n"; 1150 } 1151 1152 // Add user QFunction source 1153 { 1154 const char *source_path; 1155 1156 CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path)); 1157 CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/cuda/gen backend requires QFunction source code file"); 1158 1159 code << "// User QFunction source\n"; 1160 code << "#include \"" << source_path << "\"\n\n"; 1161 } 1162 1163 // Setup 1164 code << "\n// -----------------------------------------------------------------------------\n"; 1165 code << "// Operator Kernel\n"; 1166 code << "// \n"; 1167 code << "// d_[in,out]_i: CeedVector device array\n"; 1168 code << "// r_[in,out]_e_i: Element vector register\n"; 1169 code << "// r_[in,out]_q_i: Quadrature space vector register\n"; 1170 code << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n"; 1171 code << "// r_[in,out]_s_i: Quadrature space slice vector register\n"; 1172 code << "// \n"; 1173 code << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n"; 1174 code << "// s_G_[in,out]_i: Gradient matrix, shared memory\n"; 1175 code << "// -----------------------------------------------------------------------------\n"; 1176 code << "extern \"C\" __global__ void " << operator_name 1177 << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar *W, Points_Cuda " 1178 "points) {\n"; 1179 1180 // Scratch buffers 1181 for (CeedInt i = 0; i < num_input_fields; i++) { 1182 CeedEvalMode eval_mode; 1183 1184 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 1185 if (eval_mode != CEED_EVAL_WEIGHT) { // Skip CEED_EVAL_WEIGHT 1186 code << " const CeedScalar *__restrict__ d_in_" << i << " = fields.inputs[" << i << "];\n"; 1187 } 1188 } 1189 for (CeedInt i = 0; i < num_output_fields; i++) { 1190 code << " CeedScalar *__restrict__ d_out_" << i << " = fields.outputs[" << i << "];\n"; 1191 } 1192 1193 code << " const CeedInt dim = " << dim << ";\n"; 1194 code << " const CeedInt " << (is_tensor ? "Q_1d" : "Q") << " = " << Q_1d << ";\n"; 1195 if (is_at_points) { 1196 code << " const CeedInt max_num_points = " << max_num_points << ";\n"; 1197 code << " const CeedInt coords_comp_stride = " << coords_comp_stride << ";\n"; 1198 } 1199 1200 // Shared data 1201 code << " extern __shared__ CeedScalar slice[];\n"; 1202 code << " SharedData_Cuda data;\n"; 1203 code << " data.t_id_x = threadIdx.x;\n"; 1204 code << " data.t_id_y = threadIdx.y;\n"; 1205 code << " data.t_id_z = threadIdx.z;\n"; 1206 code << " data.t_id = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n"; 1207 code << " data.slice = slice + data.t_id_z*OP_T_1D" << ((!is_tensor || dim == 1) ? "" : "*OP_T_1D") << ";\n"; 1208 1209 // -- Determine input mat reuse 1210 FieldReuse_Cuda input_matrix_reuse[CEED_FIELD_MAX]; 1211 1212 for (CeedInt i = 0; i < num_input_fields; i++) { 1213 input_matrix_reuse[i].index = -1; 1214 } 1215 for (CeedInt i = 0; i < num_input_fields; i++) { 1216 CeedEvalMode eval_mode_i; 1217 CeedBasis basis_i; 1218 1219 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i)); 1220 if (eval_mode_i == CEED_EVAL_WEIGHT) continue; 1221 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i)); 1222 for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) { 1223 CeedEvalMode eval_mode_j; 1224 CeedBasis basis_j; 1225 1226 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1227 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1228 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1229 if (basis_i == basis_j) { 1230 if (is_tensor) { 1231 input_matrix_reuse[i].index = j; 1232 input_matrix_reuse[i].is_input = true; 1233 input_matrix_reuse[i].eval_mode = eval_mode_j; 1234 } else { 1235 // For non-tensor can only re-use with the same eval mode 1236 if (eval_mode_i == eval_mode_j) { 1237 input_matrix_reuse[i].index = j; 1238 input_matrix_reuse[i].is_input = true; 1239 input_matrix_reuse[i].eval_mode = eval_mode_j; 1240 } 1241 } 1242 } 1243 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1244 } 1245 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1246 } 1247 1248 // -- Determine output mat reuse 1249 FieldReuse_Cuda output_matrix_reuse[CEED_FIELD_MAX]; 1250 1251 for (CeedInt i = 0; i < num_output_fields; i++) { 1252 output_matrix_reuse[i].index = -1; 1253 } 1254 for (CeedInt i = 0; i < num_output_fields; i++) { 1255 CeedEvalMode eval_mode_i; 1256 CeedBasis basis_i; 1257 1258 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i)); 1259 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i)); 1260 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) { 1261 CeedEvalMode eval_mode_j; 1262 CeedBasis basis_j; 1263 1264 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1265 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1266 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1267 if (basis_i == basis_j) { 1268 if (is_tensor) { 1269 output_matrix_reuse[i].index = j; 1270 output_matrix_reuse[i].is_input = true; 1271 output_matrix_reuse[i].eval_mode = eval_mode_j; 1272 } else { 1273 // For non-tensor can only re-use with the same eval mode 1274 if (eval_mode_i == eval_mode_j) { 1275 output_matrix_reuse[i].index = j; 1276 output_matrix_reuse[i].is_input = true; 1277 output_matrix_reuse[i].eval_mode = eval_mode_j; 1278 } 1279 } 1280 } 1281 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1282 } 1283 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) { 1284 CeedEvalMode eval_mode_j; 1285 CeedBasis basis_j; 1286 1287 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j)); 1288 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1289 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j)); 1290 if (basis_i == basis_j) { 1291 if (is_tensor) { 1292 output_matrix_reuse[i].index = j; 1293 output_matrix_reuse[i].is_input = false; 1294 output_matrix_reuse[i].eval_mode = eval_mode_j; 1295 } else { 1296 // For non-tensor can only re-use with the same eval mode 1297 if (eval_mode_i == eval_mode_j) { 1298 output_matrix_reuse[i].index = j; 1299 output_matrix_reuse[i].is_input = false; 1300 output_matrix_reuse[i].eval_mode = eval_mode_j; 1301 } 1302 } 1303 } 1304 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1305 } 1306 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1307 } 1308 1309 // Initialize constants, and matrices B and G 1310 code << "\n // Input field constants and basis data\n"; 1311 for (CeedInt i = 0; i < num_input_fields; i++) { 1312 CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i], Q_1d, 1313 true, is_tensor, is_at_points, use_3d_slices)); 1314 } 1315 code << "\n // Output field constants and basis data\n"; 1316 for (CeedInt i = 0; i < num_output_fields; i++) { 1317 CeedCallBackend(CeedOperatorBuildKernelFieldData_Cuda_gen(code, data, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i], Q_1d, 1318 false, is_tensor, is_at_points, use_3d_slices)); 1319 } 1320 1321 // Loop over all elements 1322 code << "\n // Element loop\n"; 1323 code << " __syncthreads();\n"; 1324 code << " for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n"; 1325 1326 // -- Compute minimum buffer space needed 1327 CeedInt max_rstr_buffer_size = 1; 1328 1329 for (CeedInt i = 0; i < num_input_fields; i++) { 1330 CeedInt num_comp, elem_size; 1331 CeedElemRestriction elem_rstr; 1332 1333 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 1334 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1335 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 1336 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_tensor && (dim >= 3) ? elem_size : 1)); 1337 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1338 } 1339 for (CeedInt i = 0; i < num_output_fields; i++) { 1340 CeedInt num_comp, elem_size; 1341 CeedElemRestriction elem_rstr; 1342 1343 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 1344 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1345 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 1346 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_tensor && (dim >= 3) ? elem_size : 1)); 1347 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1348 } 1349 code << " // Scratch restriction buffer space\n"; 1350 code << " CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n"; 1351 1352 // -- Determine best input field processing order 1353 CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX]; 1354 1355 for (CeedInt i = 0; i < num_input_fields; i++) { 1356 field_rstr_in_buffer[i] = -1; 1357 input_field_order[i] = -1; 1358 } 1359 { 1360 bool is_ordered[CEED_FIELD_MAX]; 1361 CeedInt curr_index = 0; 1362 1363 for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false; 1364 for (CeedInt i = 0; i < num_input_fields; i++) { 1365 CeedVector vec_i; 1366 CeedElemRestriction rstr_i; 1367 1368 if (is_ordered[i]) continue; 1369 field_rstr_in_buffer[i] = i; 1370 is_ordered[i] = true; 1371 input_field_order[curr_index] = i; 1372 curr_index++; 1373 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i)); 1374 if (vec_i == CEED_VECTOR_NONE) continue; // CEED_EVAL_WEIGHT 1375 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i)); 1376 for (CeedInt j = i + 1; j < num_input_fields; j++) { 1377 CeedVector vec_j; 1378 CeedElemRestriction rstr_j; 1379 1380 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j)); 1381 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j)); 1382 if (rstr_i == rstr_j && vec_i == vec_j) { 1383 field_rstr_in_buffer[j] = i; 1384 is_ordered[j] = true; 1385 input_field_order[curr_index] = j; 1386 curr_index++; 1387 } 1388 CeedCallBackend(CeedVectorDestroy(&vec_j)); 1389 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j)); 1390 } 1391 CeedCallBackend(CeedVectorDestroy(&vec_i)); 1392 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i)); 1393 } 1394 } 1395 1396 // -- Input restriction and basis 1397 code << "\n // -- Input field restrictions and basis actions\n"; 1398 for (CeedInt i = 0; i < num_input_fields; i++) { 1399 const char *field_name; 1400 const CeedInt f = input_field_order[i]; 1401 1402 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name)); 1403 code << " // ---- Input field " << f << ": " << field_name << "\n"; 1404 1405 // ---- Restriction 1406 CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, f, dim, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f], 1407 Q_1d, true, is_tensor, is_at_points, use_3d_slices)); 1408 1409 // ---- Basis action 1410 CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, f, dim, op_input_fields[f], qf_input_fields[f], Q_1d, true, is_tensor, 1411 is_at_points, use_3d_slices)); 1412 } 1413 1414 // -- Q function 1415 CeedCallBackend(CeedOperatorBuildKernelQFunction_Cuda_gen(code, data, dim, max_num_points, num_input_fields, op_input_fields, qf_input_fields, 1416 num_output_fields, op_output_fields, qf_output_fields, qfunction_name, Q_1d, is_tensor, 1417 is_at_points, use_3d_slices)); 1418 1419 // -- Output basis and restriction 1420 code << "\n // -- Output field basis action and restrictions\n"; 1421 for (CeedInt i = 0; i < num_output_fields; i++) { 1422 const char *field_name; 1423 1424 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 1425 code << " // ---- Output field " << i << ": " << field_name << "\n"; 1426 1427 // ---- Basis action 1428 CeedCallBackend(CeedOperatorBuildKernelBasis_Cuda_gen(code, data, i, dim, op_output_fields[i], qf_output_fields[i], Q_1d, false, is_tensor, 1429 is_at_points, use_3d_slices)); 1430 1431 // ---- Restriction 1432 CeedCallBackend(CeedOperatorBuildKernelRestriction_Cuda_gen(code, data, i, dim, NULL, op_output_fields[i], qf_output_fields[i], Q_1d, false, 1433 is_tensor, is_at_points, use_3d_slices)); 1434 } 1435 1436 // Close loop and function 1437 code << " }\n"; 1438 code << "}\n"; 1439 code << "// -----------------------------------------------------------------------------\n\n"; 1440 1441 // Compile 1442 { 1443 bool is_compile_good = false; 1444 1445 CeedCallBackend(CeedTryCompile_Cuda(ceed, code.str().c_str(), &is_compile_good, &data->module, 1, "OP_T_1D", CeedIntMax(Q_1d, data->max_P_1d))); 1446 if (is_compile_good) { 1447 *is_good_build = true; 1448 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, operator_name.c_str(), &data->op)); 1449 } else { 1450 *is_good_build = false; 1451 data->use_fallback = true; 1452 } 1453 } 1454 CeedCallBackend(CeedOperatorSetSetupDone(op)); 1455 CeedCallBackend(CeedDestroy(&ceed)); 1456 CeedCallBackend(CeedQFunctionDestroy(&qf)); 1457 return CEED_ERROR_SUCCESS; 1458 } 1459 1460 //------------------------------------------------------------------------------ 1461