1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #define CEED_DEBUG_COLOR 12 9 10 #include <ceed.h> 11 #include <ceed/backend.h> 12 #include <ceed/jit-tools.h> 13 14 #include <iostream> 15 #include <sstream> 16 #include <string> 17 18 #include "../hip-ref/ceed-hip-ref.h" 19 #include "../hip-shared/ceed-hip-shared.h" 20 #include "../hip/ceed-hip-common.h" 21 #include "../hip/ceed-hip-compile.h" 22 #include "ceed-hip-gen.h" 23 24 //------------------------------------------------------------------------------ 25 // Calculate the block size used for launching the operator kernel 26 //------------------------------------------------------------------------------ 27 extern "C" int BlockGridCalculate_Hip_gen(const CeedInt dim, const CeedInt num_elem, const CeedInt P_1d, const CeedInt Q_1d, CeedInt *block_sizes) { 28 const CeedInt thread_1d = CeedIntMax(Q_1d, P_1d); 29 if (dim == 1) { 30 CeedInt elems_per_block = 64 * thread_1d > 256 ? 256 / thread_1d : 64; 31 32 elems_per_block = elems_per_block > 0 ? elems_per_block : 1; 33 block_sizes[0] = thread_1d; 34 block_sizes[1] = 1; 35 block_sizes[2] = elems_per_block; 36 } else if (dim == 2) { 37 const CeedInt elems_per_block = thread_1d < 4 ? 16 : 2; 38 39 block_sizes[0] = thread_1d; 40 block_sizes[1] = thread_1d; 41 block_sizes[2] = elems_per_block; 42 } else if (dim == 3) { 43 const CeedInt elems_per_block = thread_1d < 6 ? 4 : (thread_1d < 8 ? 2 : 1); 44 45 block_sizes[0] = thread_1d; 46 block_sizes[1] = thread_1d; 47 block_sizes[2] = elems_per_block; 48 } 49 return CEED_ERROR_SUCCESS; 50 } 51 52 //------------------------------------------------------------------------------ 53 // Determine type of operator 54 //------------------------------------------------------------------------------ 55 static int CeedOperatorBuildKernelData_Hip_gen(Ceed ceed, CeedInt num_input_fields, CeedOperatorField *op_input_fields, 56 CeedQFunctionField *qf_input_fields, CeedInt num_output_fields, CeedOperatorField *op_output_fields, 57 CeedQFunctionField *qf_output_fields, CeedInt *max_P_1d, CeedInt *Q_1d, CeedInt *dim, bool *is_tensor, 58 bool *use_3d_slices) { 59 // Find dim, P_1d, Q_1d 60 *max_P_1d = 0; 61 *Q_1d = 0; 62 *dim = 0; 63 *is_tensor = true; 64 65 for (CeedInt i = 0; i < num_input_fields; i++) { 66 CeedBasis basis; 67 68 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 69 if (basis != CEED_BASIS_NONE) { 70 bool is_field_tensor; 71 CeedInt field_P_1d = 0, field_Q_1d = 0, field_dim = 0; 72 73 // Collect dim, P_1d, and Q_1d 74 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 75 *is_tensor = *is_tensor && is_field_tensor; 76 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 77 else CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P_1d)); 78 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 79 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 80 CeedCheck(*dim == 0 || field_dim == *dim, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 81 *dim = field_dim; 82 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 83 else CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q_1d)); 84 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 85 *Q_1d = field_Q_1d; 86 } 87 CeedCallBackend(CeedBasisDestroy(&basis)); 88 } 89 for (CeedInt i = 0; i < num_output_fields; i++) { 90 CeedBasis basis; 91 92 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 93 if (basis != CEED_BASIS_NONE) { 94 bool is_field_tensor; 95 CeedInt field_P_1d = 0, field_Q_1d = 0, field_dim = 0; 96 97 // Collect dim, P_1d, and Q_1d 98 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 99 *is_tensor = *is_tensor && is_field_tensor; 100 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 101 else CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P_1d)); 102 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 103 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 104 CeedCheck(*dim == 0 || field_dim == *dim, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 105 *dim = field_dim; 106 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 107 else CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q_1d)); 108 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 109 *Q_1d = field_Q_1d; 110 } 111 CeedCallBackend(CeedBasisDestroy(&basis)); 112 } 113 114 // Only use 3D collocated gradient parallelization strategy when gradient is computed 115 *use_3d_slices = false; 116 if (*dim == 3) { 117 bool was_grad_found = false; 118 119 for (CeedInt i = 0; i < num_input_fields; i++) { 120 CeedEvalMode eval_mode; 121 122 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 123 if (eval_mode == CEED_EVAL_GRAD) { 124 CeedBasis_Hip_shared *basis_data; 125 CeedBasis basis; 126 127 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 128 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 129 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 130 was_grad_found = true; 131 CeedCallBackend(CeedBasisDestroy(&basis)); 132 } 133 } 134 for (CeedInt i = 0; i < num_output_fields; i++) { 135 CeedEvalMode eval_mode; 136 137 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 138 if (eval_mode == CEED_EVAL_GRAD) { 139 CeedBasis_Hip_shared *basis_data; 140 CeedBasis basis; 141 142 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 143 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 144 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 145 was_grad_found = true; 146 CeedCallBackend(CeedBasisDestroy(&basis)); 147 } 148 } 149 } 150 return CEED_ERROR_SUCCESS; 151 } 152 153 //------------------------------------------------------------------------------ 154 // Setup fields 155 //------------------------------------------------------------------------------ 156 static int CeedOperatorBuildKernelFieldData_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedOperatorField op_field, 157 CeedQFunctionField qf_field, CeedInt field_reuse[3], CeedInt Q_1d, bool is_input, bool is_tensor, 158 bool is_at_points, bool use_3d_slices) { 159 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 160 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 161 std::string option_name = (is_input ? "inputs" : "outputs"); 162 CeedEvalMode eval_mode = CEED_EVAL_NONE; 163 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 164 CeedElemRestriction elem_rstr; 165 CeedBasis_Hip_shared *basis_data; 166 CeedBasis basis; 167 168 // Field reuse info 169 bool use_previous_field = field_reuse[0] != -1; 170 bool reuse_input = field_reuse[1]; 171 CeedInt reuse_field = field_reuse[0]; 172 CeedEvalMode reuse_mode = (CeedEvalMode)field_reuse[2]; 173 174 code << " // -- " << (is_input ? "Input" : "Output") << " field " << i << "\n"; 175 176 // Get field data 177 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 178 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 179 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 180 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 181 } 182 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 183 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 184 if (basis != CEED_BASIS_NONE) { 185 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 186 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 187 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 188 } 189 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 190 191 // Set field constants 192 if (eval_mode != CEED_EVAL_WEIGHT) { 193 code << " const CeedInt " << P_name << " = " << (basis == CEED_BASIS_NONE ? Q_1d : P_1d) << ";\n"; 194 code << " const CeedInt num_comp" << var_suffix << " = " << num_comp << ";\n"; 195 } 196 197 // Load basis data 198 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 199 switch (eval_mode) { 200 case CEED_EVAL_NONE: 201 break; 202 case CEED_EVAL_INTERP: 203 if (is_at_points) { 204 // AtPoints 205 if (!basis_data->d_chebyshev_interp_1d) { 206 CeedSize interp_bytes; 207 CeedScalar *chebyshev_interp_1d; 208 209 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 210 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 211 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 212 CeedCallHip(CeedBasisReturnCeed(basis), hipMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 213 CeedCallHip(CeedBasisReturnCeed(basis), 214 hipMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, hipMemcpyHostToDevice)); 215 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 216 } 217 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 218 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 219 } else { 220 // Standard quadrature 221 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 222 else data->B.outputs[i] = basis_data->d_interp_1d; 223 } 224 if (use_previous_field) { 225 std::string reuse_var = "s_B" + ((reuse_input ? "_in_" : "_out_") + std::to_string(reuse_field)); 226 227 code << " CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 228 } else { 229 code << " __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 230 code << " LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 231 } 232 break; 233 case CEED_EVAL_GRAD: 234 if (is_at_points) { 235 // AtPoints 236 if (!basis_data->d_chebyshev_interp_1d) { 237 CeedSize interp_bytes; 238 CeedScalar *chebyshev_interp_1d; 239 240 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 241 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 242 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 243 CeedCallHip(CeedBasisReturnCeed(basis), hipMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 244 CeedCallHip(CeedBasisReturnCeed(basis), 245 hipMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, hipMemcpyHostToDevice)); 246 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 247 } 248 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 249 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 250 } else { 251 // Standard quadrature 252 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 253 else data->B.outputs[i] = basis_data->d_interp_1d; 254 } 255 if (is_tensor) { 256 if (use_previous_field) { 257 std::string reuse_var = "s_B" + ((reuse_input ? "_in_" : "_out_") + std::to_string(reuse_field)); 258 259 code << " CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 260 } else { 261 code << " __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 262 code << " LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 263 } 264 } 265 if (is_at_points) break; // No G mat for AtPoints 266 if (use_3d_slices) { 267 if (is_input) data->G.inputs[i] = basis_data->d_collo_grad_1d; 268 else data->G.outputs[i] = basis_data->d_collo_grad_1d; 269 if (use_previous_field && reuse_mode == CEED_EVAL_GRAD) { 270 std::string reuse_var = "s_G" + ((reuse_input ? "_in_" : "_out_") + std::to_string(reuse_field)); 271 272 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 273 } else { 274 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 275 code << " LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 276 } 277 } else { 278 bool has_collo_grad = basis_data->d_collo_grad_1d; 279 280 if (is_input) data->G.inputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 281 else data->G.outputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 282 if (has_collo_grad) { 283 if (use_previous_field && reuse_mode == CEED_EVAL_GRAD) { 284 std::string reuse_var = "s_G" + ((reuse_input ? "_in_" : "_out_") + std::to_string(reuse_field)); 285 286 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 287 } else { 288 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 289 code << " LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 290 } 291 } else { 292 if (use_previous_field && reuse_mode == CEED_EVAL_GRAD) { 293 std::string reuse_var = "s_G" + ((reuse_input ? "_in_" : "_out_") + std::to_string(reuse_field)); 294 295 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 296 } else { 297 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << P_name << "*" << Q_name << (is_tensor ? "" : "*dim") << "];\n"; 298 code << " LoadMatrix<" << P_name << ", " << Q_name << (is_tensor ? "" : "*dim") << ">(data, G." << option_name << "[" << i << "], s_G" 299 << var_suffix << ");\n"; 300 } 301 } 302 } 303 break; 304 case CEED_EVAL_WEIGHT: 305 break; // No action 306 // LCOV_EXCL_START 307 case CEED_EVAL_DIV: 308 case CEED_EVAL_CURL: 309 break; // TODO: Not implemented 310 // LCOV_EXCL_STOP 311 } 312 CeedCallBackend(CeedBasisDestroy(&basis)); 313 return CEED_ERROR_SUCCESS; 314 } 315 316 //------------------------------------------------------------------------------ 317 // Restriction 318 //------------------------------------------------------------------------------ 319 static int CeedOperatorBuildKernelRestriction_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedInt dim, 320 CeedInt field_input_buffer[], CeedOperatorField op_field, CeedQFunctionField qf_field, 321 CeedInt Q_1d, bool is_input, bool is_tensor, bool is_at_points, bool use_3d_slices) { 322 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 323 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix; 324 CeedEvalMode eval_mode = CEED_EVAL_NONE; 325 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 326 CeedSize l_size; 327 CeedRestrictionType rstr_type = CEED_RESTRICTION_STANDARD; 328 CeedElemRestriction_Hip *rstr_data; 329 CeedElemRestriction elem_rstr; 330 CeedBasis basis; 331 332 // Get field data 333 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 334 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 335 CeedCallBackend(CeedElemRestrictionGetType(elem_rstr, &rstr_type)); 336 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 337 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 338 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 339 } 340 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 341 if (basis != CEED_BASIS_NONE) { 342 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 343 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 344 } 345 CeedCallBackend(CeedBasisDestroy(&basis)); 346 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 347 348 // Restriction 349 if (is_input) { 350 // Input 351 if (field_input_buffer[i] != i) { 352 std::string buffer_name = "r_e_in_" + std::to_string(field_input_buffer[i]); 353 354 // Restriction was already done for previous input 355 code << " CeedScalar *r_e" << var_suffix << " = " << buffer_name << ";\n"; 356 } else if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_3d_slices && is_at_points)) { 357 if (eval_mode == CEED_EVAL_NONE && rstr_type != CEED_RESTRICTION_POINTS) { 358 // No basis action, so r_e_in_* in also r_q_in_* and needs to be allocated 359 code << " CeedScalar r_e" << var_suffix << "[num_comp" << var_suffix << "*" << P_name << "];\n"; 360 } else if (rstr_type != CEED_RESTRICTION_POINTS) { 361 // Otherwise we're using the scratch space 362 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 363 } 364 switch (rstr_type) { 365 case CEED_RESTRICTION_STANDARD: { 366 CeedInt comp_stride; 367 368 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 369 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 370 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 371 code << " // CompStride: " << comp_stride << "\n"; 372 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 373 code << " ReadLVecStandard" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name 374 << ">(data, l_size" << var_suffix << ", elem, indices.inputs[" << i << "], d" << var_suffix << ", r_e" << var_suffix << ");\n"; 375 break; 376 } 377 case CEED_RESTRICTION_STRIDED: { 378 bool has_backend_strides; 379 CeedInt num_elem; 380 381 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 382 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 383 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 384 385 if (!has_backend_strides) { 386 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 387 } 388 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 389 code << " ReadLVecStrided" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", " 390 << strides[1] << ", " << strides[2] << ">(data, elem, d" << var_suffix << ", r_e" << var_suffix << ");\n"; 391 break; 392 } 393 case CEED_RESTRICTION_POINTS: { 394 CeedInt comp_stride; 395 396 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 397 code << " const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 398 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 399 break; 400 } 401 // LCOV_EXCL_START 402 case CEED_RESTRICTION_ORIENTED: 403 case CEED_RESTRICTION_CURL_ORIENTED: 404 break; // TODO: Not implemented 405 // LCOV_EXCL_STOP 406 } 407 } 408 } else { 409 // Output 410 switch (rstr_type) { 411 case CEED_RESTRICTION_STANDARD: { 412 CeedInt comp_stride; 413 414 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 415 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 416 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 417 code << " // CompStride: " << comp_stride << "\n"; 418 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 419 code << " WriteLVecStandard" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name 420 << ">(data, l_size" << var_suffix << ", elem, indices.outputs[" << i << "], r_e" << var_suffix << ", d" << var_suffix << ");\n"; 421 break; 422 } 423 case CEED_RESTRICTION_STRIDED: { 424 bool has_backend_strides; 425 CeedInt num_elem; 426 427 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 428 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 429 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 430 431 if (!has_backend_strides) { 432 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 433 } 434 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 435 code << " WriteLVecStrided" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", " 436 << strides[1] << ", " << strides[2] << ">(data, elem, r_e" << var_suffix << ", d" << var_suffix << ");\n"; 437 break; 438 } 439 case CEED_RESTRICTION_POINTS: 440 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 441 break; 442 // LCOV_EXCL_START 443 case CEED_RESTRICTION_ORIENTED: 444 case CEED_RESTRICTION_CURL_ORIENTED: 445 break; // TODO: Not implemented 446 // LCOV_EXCL_STOP 447 } 448 } 449 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 450 return CEED_ERROR_SUCCESS; 451 } 452 453 //------------------------------------------------------------------------------ 454 // Basis 455 //------------------------------------------------------------------------------ 456 static int CeedOperatorBuildKernelBasis_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedInt dim, 457 CeedOperatorField op_field, CeedQFunctionField qf_field, CeedInt Q_1d, bool is_input, bool is_tensor, 458 bool is_at_points, bool use_3d_slices) { 459 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 460 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 461 CeedEvalMode eval_mode = CEED_EVAL_NONE; 462 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 463 CeedElemRestriction elem_rstr; 464 CeedBasis basis; 465 466 // Get field data 467 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 468 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 469 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 470 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 471 } 472 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 473 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 474 if (basis != CEED_BASIS_NONE) { 475 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 476 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 477 } 478 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 479 480 // Basis 481 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 482 if (is_input) { 483 switch (eval_mode) { 484 case CEED_EVAL_NONE: 485 if (!use_3d_slices && !is_at_points) { 486 code << " CeedScalar *r_q" << var_suffix << " = r_e" << var_suffix << ";\n"; 487 } 488 break; 489 case CEED_EVAL_INTERP: 490 if (is_at_points) { 491 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 492 493 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 494 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 495 << var_suffix << ", r_c" << var_suffix << ");\n"; 496 } else { 497 std::string function_name = is_tensor ? ((dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d") : "InterpNonTensor"; 498 499 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 500 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 501 << var_suffix << ", r_q" << var_suffix << ");\n"; 502 } 503 break; 504 case CEED_EVAL_GRAD: 505 if (is_at_points) { 506 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 507 508 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 509 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 510 << var_suffix << ", r_c" << var_suffix << ");\n"; 511 } else if (use_3d_slices) { 512 std::string function_name = (dim > 1 ? "InterpTensor" : "Interp") + std::to_string(dim) + "d"; 513 514 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 515 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 516 << var_suffix << ", r_q" << var_suffix << ");\n"; 517 } else if (is_tensor) { 518 bool is_collocated = dim == 3 && Q_1d >= P_1d; 519 std::string function_name = (dim == 1 ? "Grad" : (is_collocated ? "GradTensorCollocated" : "GradTensor")) + std::to_string(dim) + "d"; 520 521 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim*" << (dim >= 3 ? Q_name : "1") << "];\n"; 522 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 523 << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 524 } else { 525 std::string function_name = "GradNonTensor"; 526 527 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 528 code << " " << function_name << "<num_comp" << var_suffix << ", dim, " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix 529 << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 530 } 531 break; 532 case CEED_EVAL_WEIGHT: { 533 if (is_at_points) { 534 code << " // Nothing to do AtPoints\n"; 535 } else { 536 CeedBasis_Hip_shared *basis_data; 537 std::string function_name = is_tensor ? ((dim == 1 ? "Weight" : "WeightTensor") + std::to_string(dim) + "d") : "WeightNonTensor"; 538 539 code << " CeedScalar r_q" << var_suffix << "[" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 540 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 541 data->W = basis_data->d_q_weight_1d; 542 code << " " << function_name << "<" << Q_name << ">(data, W, r_q" << var_suffix << ");\n"; 543 } 544 break; 545 } 546 // LCOV_EXCL_START 547 case CEED_EVAL_DIV: 548 case CEED_EVAL_CURL: 549 break; // TODO: Not implemented 550 // LCOV_EXCL_STOP 551 } 552 } else { 553 switch (eval_mode) { 554 case CEED_EVAL_NONE: 555 code << " CeedScalar *r_e" << var_suffix << " = r_q" << var_suffix << ";\n"; 556 break; // No action 557 case CEED_EVAL_INTERP: 558 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 559 if (is_at_points) { 560 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 561 562 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_c" << var_suffix << ", s_B" 563 << var_suffix << ", r_e" << var_suffix << ");\n"; 564 } else { 565 std::string function_name = 566 is_tensor ? ((dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d") : "InterpTransposeNonTensor"; 567 568 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_q" << var_suffix << ", s_B" 569 << var_suffix << ", r_e" << var_suffix << ");\n"; 570 } 571 break; 572 case CEED_EVAL_GRAD: 573 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 574 if (is_at_points) { 575 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 576 577 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_c" << var_suffix << ", s_B" 578 << var_suffix << ", r_e" << var_suffix << ");\n"; 579 } else if (use_3d_slices) { 580 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 581 582 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_q" << var_suffix << ", s_B" 583 << var_suffix << ", r_e" << var_suffix << ");\n"; 584 } else if (is_tensor) { 585 bool is_collocated = dim == 3 && Q_1d >= P_1d; 586 std::string function_name = 587 (dim == 1 ? "GradTranspose" : (is_collocated ? "GradTransposeTensorCollocated" : "GradTransposeTensor")) + std::to_string(dim) + "d"; 588 589 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_q" << var_suffix << ", s_B" 590 << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 591 } else { 592 std::string function_name = "GradTransposeNonTensor"; 593 594 code << " " << function_name << "<num_comp" << var_suffix << ", dim, " << P_name << ", " << Q_name << ">(data, r_q" << var_suffix 595 << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 596 } 597 break; 598 // LCOV_EXCL_START 599 case CEED_EVAL_WEIGHT: 600 break; // Should not occur 601 case CEED_EVAL_DIV: 602 case CEED_EVAL_CURL: 603 break; // TODO: Not implemented 604 // LCOV_EXCL_STOP 605 } 606 } 607 CeedCallBackend(CeedBasisDestroy(&basis)); 608 return CEED_ERROR_SUCCESS; 609 } 610 611 //------------------------------------------------------------------------------ 612 // QFunction 613 //------------------------------------------------------------------------------ 614 static int CeedOperatorBuildKernelQFunction_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt dim, CeedInt max_num_points, 615 CeedInt num_input_fields, CeedOperatorField *op_input_fields, CeedQFunctionField *qf_input_fields, 616 CeedInt num_output_fields, CeedOperatorField *op_output_fields, 617 CeedQFunctionField *qf_output_fields, std::string qfunction_name, CeedInt Q_1d, bool is_tensor, 618 bool is_at_points, bool use_3d_slices) { 619 std::string Q_name = is_tensor ? "Q_1d" : "Q"; 620 CeedEvalMode eval_mode = CEED_EVAL_NONE; 621 CeedElemRestriction elem_rstr; 622 623 // Setup output arrays 624 code << "\n // -- Output field setup\n"; 625 for (CeedInt i = 0; i < num_output_fields; i++) { 626 std::string var_suffix = "_out_" + std::to_string(i); 627 628 code << " // ---- Output field " << i << "\n"; 629 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 630 switch (eval_mode) { 631 case CEED_EVAL_NONE: 632 if (is_at_points) { 633 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "];\n"; 634 } else { 635 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 636 } 637 break; 638 case CEED_EVAL_INTERP: 639 if (is_at_points) { 640 // Accumulator for point data 641 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 642 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "; i++) {\n"; 643 code << " r_c" << var_suffix << "[i] = 0.0;\n"; 644 code << " }\n"; 645 } else { 646 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 647 } 648 break; 649 case CEED_EVAL_GRAD: 650 if (is_at_points) { 651 // Accumulator for point data 652 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "*dim];\n"; 653 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "; i++) {\n"; 654 code << " r_c" << var_suffix << "[i] = 0.0;\n"; 655 code << " }\n"; 656 } else if (use_3d_slices) { 657 // Accumulator for gradient slices 658 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 659 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << Q_name << "; i++) {\n"; 660 code << " r_q" << var_suffix << "[i] = 0.0;\n"; 661 code << " }\n"; 662 } else { 663 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 664 } 665 break; 666 case CEED_EVAL_WEIGHT: 667 break; 668 // LCOV_EXCL_START 669 case CEED_EVAL_DIV: 670 case CEED_EVAL_CURL: 671 break; // TODO: Not implemented 672 // LCOV_EXCL_STOP 673 } 674 } 675 676 if (is_at_points) { 677 // We need to handle batches of points 678 code << "\n // Note: Using batches of points\n"; 679 code << " const CeedInt point_loop_bound = (blockDim.x * blockDim.y) * ceil(1.0 * max_num_points / (blockDim.x * blockDim.y));\n\n"; 680 code << " #pragma unroll\n"; 681 code << " for (CeedInt i = threadIdx.x + threadIdx.y * blockDim.x; i < point_loop_bound; i += blockDim.x * blockDim.y) {\n"; 682 code << " const CeedInt p = i % max_num_points;\n\n"; 683 684 code << " // -- Coordinates\n"; 685 code << " CeedScalar r_x[dim];\n"; 686 code << " ReadPoint<dim, coords_comp_stride, max_num_points>(data, elem, p, max_num_points, points.indices, points.coords, r_x);\n\n"; 687 688 code << " // -- Input fields\n"; 689 for (CeedInt i = 0; i < num_input_fields; i++) { 690 std::string var_suffix = "_in_" + std::to_string(i); 691 692 code << " // ---- Input field " << i << "\n"; 693 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 694 // Basis action 695 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 696 switch (eval_mode) { 697 case CEED_EVAL_NONE: 698 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 699 code << " ReadPoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 700 << ", max_num_points>(data, elem, p, max_num_points, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 701 break; 702 case CEED_EVAL_INTERP: 703 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 704 code << " InterpAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << Q_name << ">(data, i, r_c" << var_suffix 705 << ", r_x, r_s" << var_suffix << ");\n"; 706 break; 707 case CEED_EVAL_GRAD: 708 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 709 code << " GradAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << Q_name << ">(data, i, r_c" << var_suffix 710 << ", r_x, r_s" << var_suffix << ");\n"; 711 break; 712 case CEED_EVAL_WEIGHT: 713 code << " CeedScalar r_s" << var_suffix << "[1];\n"; 714 code << " r_s" << var_suffix << "[0] = 1.0;\n"; 715 break; 716 // LCOV_EXCL_START 717 case CEED_EVAL_DIV: 718 case CEED_EVAL_CURL: 719 break; // TODO: Not implemented 720 // LCOV_EXCL_STOP 721 } 722 } 723 code << "\n // -- Output fields\n"; 724 for (CeedInt i = 0; i < num_output_fields; i++) { 725 std::string var_suffix = "_out_" + std::to_string(i); 726 727 code << " // ---- Output field " << i << "\n"; 728 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 729 // Basis action 730 switch (eval_mode) { 731 case CEED_EVAL_NONE: 732 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 733 break; 734 case CEED_EVAL_INTERP: 735 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 736 break; 737 case CEED_EVAL_GRAD: 738 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 739 break; 740 // LCOV_EXCL_START 741 case CEED_EVAL_WEIGHT: 742 break; // Should not occur 743 case CEED_EVAL_DIV: 744 case CEED_EVAL_CURL: 745 break; // TODO: Not implemented 746 // LCOV_EXCL_STOP 747 } 748 } 749 750 } else if (use_3d_slices) { 751 // We treat quadrature points per slice in 3d to save registers 752 code << "\n // Note: Using planes of 3D elements\n"; 753 code << " #pragma unroll\n"; 754 code << " for (CeedInt q = 0; q < " << Q_name << "; q++) {\n"; 755 code << " // -- Input fields\n"; 756 for (CeedInt i = 0; i < num_input_fields; i++) { 757 std::string var_suffix = "_in_" + std::to_string(i); 758 759 code << " // ---- Input field " << i << "\n"; 760 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 761 // Basis action 762 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 763 switch (eval_mode) { 764 case CEED_EVAL_NONE: 765 bool is_strided; 766 767 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 768 769 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 770 CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided)); 771 if (is_strided) { 772 bool has_backend_strides; 773 CeedInt num_elem, elem_size; 774 775 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 776 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 777 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 778 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 779 780 if (!has_backend_strides) { 781 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 782 } 783 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 784 code << " ReadEVecSliceStrided3d<num_comp" << var_suffix << ", " << Q_name << ", " << strides[0] << ", " << strides[1] << ", " 785 << strides[2] << ">(data, elem, q, d" << var_suffix << ", r_s" << var_suffix << ");\n"; 786 } else { 787 CeedSize l_size = 0; 788 CeedInt comp_stride; 789 CeedElemRestriction_Hip *rstr_data; 790 791 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 792 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 793 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 794 code << " // CompStride: " << comp_stride << "\n"; 795 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 796 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 797 code << " ReadEVecSliceStandard3d<num_comp" << var_suffix << ", " << comp_stride << ", " << Q_name << ">(data, l_size" << var_suffix 798 << ", elem, q, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 799 } 800 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 801 break; 802 case CEED_EVAL_INTERP: 803 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 804 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) {\n"; 805 code << " r_s" << var_suffix << "[j] = r_q" << var_suffix << "[q + j*" << Q_name << "];\n"; 806 code << " }\n"; 807 break; 808 case CEED_EVAL_GRAD: 809 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 810 code << " GradColloSlice3d<num_comp" << var_suffix << ", " << Q_name << ">(data, q, r_q" << var_suffix << ", s_G" << var_suffix 811 << ", r_s" << var_suffix << ");\n"; 812 break; 813 case CEED_EVAL_WEIGHT: 814 code << " CeedScalar r_s" << var_suffix << "[1];\n"; 815 code << " r_s" << var_suffix << "[0] = r_q" << var_suffix << "[q];\n"; 816 break; 817 // LCOV_EXCL_START 818 case CEED_EVAL_DIV: 819 case CEED_EVAL_CURL: 820 break; // TODO: Not implemented 821 // LCOV_EXCL_STOP 822 } 823 } 824 code << "\n // -- Output fields\n"; 825 for (CeedInt i = 0; i < num_output_fields; i++) { 826 std::string var_suffix = "_out_" + std::to_string(i); 827 828 code << " // ---- Output field " << i << "\n"; 829 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 830 // Basis action 831 switch (eval_mode) { 832 case CEED_EVAL_NONE: 833 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 834 break; 835 case CEED_EVAL_INTERP: 836 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 837 break; 838 case CEED_EVAL_GRAD: 839 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 840 break; 841 // LCOV_EXCL_START 842 case CEED_EVAL_WEIGHT: 843 break; // Should not occur 844 case CEED_EVAL_DIV: 845 case CEED_EVAL_CURL: 846 break; // TODO: Not implemented 847 // LCOV_EXCL_STOP 848 } 849 } 850 } else { 851 code << "\n // Note: Using full elements\n"; 852 code << " {\n"; 853 code << " // -- Input fields\n"; 854 for (CeedInt i = 0; i < num_input_fields; i++) { 855 code << " // ---- Input field " << i << "\n"; 856 code << " CeedScalar *r_s_in_" << i << " = r_q_in_" << i << ";\n"; 857 } 858 code << " // -- Output fields\n"; 859 for (CeedInt i = 0; i < num_output_fields; i++) { 860 code << " // ---- Output field " << i << "\n"; 861 code << " CeedScalar *r_s_out_" << i << " = r_q_out_" << i << ";\n"; 862 } 863 } 864 865 // Input and output buffers 866 code << "\n // -- QFunction inputs and outputs\n"; 867 code << " // ---- Inputs\n"; 868 code << " CeedScalar *inputs[" << CeedIntMax(num_input_fields, 1) << "];\n"; 869 for (CeedInt i = 0; i < num_input_fields; i++) { 870 code << " // ------ Input field " << i << "\n"; 871 code << " inputs[" << i << "] = r_s_in_" << i << ";\n"; 872 } 873 code << " // ---- Outputs\n"; 874 code << " CeedScalar *outputs[" << CeedIntMax(num_output_fields, 1) << "];\n"; 875 for (CeedInt i = 0; i < num_output_fields; i++) { 876 code << " // ------ Output field " << i << "\n"; 877 code << " outputs[" << i << "] = r_s_out_" << i << ";\n"; 878 } 879 880 // Apply QFunction 881 code << "\n // -- Apply QFunction\n"; 882 code << " " << qfunction_name << "(ctx, "; 883 if (dim != 3 || is_at_points || use_3d_slices || !is_tensor) { 884 code << "1"; 885 } else { 886 code << Q_name; 887 } 888 code << ", inputs, outputs);\n"; 889 890 if (is_at_points) { 891 // Map back to coefficients 892 code << "\n // -- Output fields\n"; 893 for (CeedInt i = 0; i < num_output_fields; i++) { 894 std::string var_suffix = "_out_" + std::to_string(i); 895 std::string P_name = "P_1d" + var_suffix; 896 897 code << " // ---- Output field " << i << "\n"; 898 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 899 // Basis action 900 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 901 switch (eval_mode) { 902 case CEED_EVAL_NONE: { 903 CeedInt comp_stride; 904 CeedElemRestriction elem_rstr; 905 906 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 907 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 908 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 909 code << " const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 910 code << " WritePoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 911 << ", max_num_points>(data, elem, i, points.num_per_elem[elem], indices.outputs[" << i << "]" 912 << ", r_s" << var_suffix << ", d" << var_suffix << ");\n"; 913 break; 914 } 915 case CEED_EVAL_INTERP: 916 code << " if (i >= points.num_per_elem[elem]) {\n"; 917 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 918 code << " }\n"; 919 code << " InterpTransposeAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << Q_name << ">(data, i, r_s" 920 << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 921 break; 922 case CEED_EVAL_GRAD: 923 code << " if (i >= points.num_per_elem[elem]) {\n"; 924 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "*dim; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 925 code << " }\n"; 926 code << " GradTransposeAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << Q_name << ">(data, i, r_s" 927 << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 928 break; 929 // LCOV_EXCL_START 930 case CEED_EVAL_WEIGHT: 931 break; // Should not occur 932 case CEED_EVAL_DIV: 933 case CEED_EVAL_CURL: 934 break; // TODO: Not implemented 935 // LCOV_EXCL_STOP 936 } 937 } 938 } else if (use_3d_slices) { 939 // Copy or apply transpose grad, if needed 940 code << "\n // -- Output fields\n"; 941 for (CeedInt i = 0; i < num_output_fields; i++) { 942 std::string var_suffix = "_out_" + std::to_string(i); 943 std::string P_name = "P_1d" + var_suffix; 944 945 code << " // ---- Output field " << i << "\n"; 946 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 947 // Basis action 948 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 949 switch (eval_mode) { 950 case CEED_EVAL_NONE: 951 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 952 code << " r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 953 code << " }\n"; 954 break; 955 case CEED_EVAL_INTERP: 956 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 957 code << " r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 958 code << " }\n"; 959 break; 960 case CEED_EVAL_GRAD: 961 code << " GradColloSliceTranspose3d<num_comp" << var_suffix << ", " << Q_name << ">(data, q, r_s" << var_suffix << ", s_G" 962 << var_suffix << ", r_q" << var_suffix << ");\n"; 963 break; 964 // LCOV_EXCL_START 965 case CEED_EVAL_WEIGHT: 966 break; // Should not occur 967 case CEED_EVAL_DIV: 968 case CEED_EVAL_CURL: 969 break; // TODO: Not implemented 970 // LCOV_EXCL_STOP 971 } 972 } 973 } 974 code << " }\n"; 975 return CEED_ERROR_SUCCESS; 976 } 977 978 //------------------------------------------------------------------------------ 979 // Build single operator kernel 980 //------------------------------------------------------------------------------ 981 extern "C" int CeedOperatorBuildKernel_Hip_gen(CeedOperator op, bool *is_good_build) { 982 bool is_tensor = true, is_at_points = false, use_3d_slices = false; 983 Ceed ceed; 984 CeedInt Q_1d, num_input_fields, num_output_fields, dim = 1, max_num_points = 0, coords_comp_stride = 0; 985 CeedQFunctionField *qf_input_fields, *qf_output_fields; 986 CeedQFunction_Hip_gen *qf_data; 987 CeedQFunction qf; 988 CeedOperatorField *op_input_fields, *op_output_fields; 989 CeedOperator_Hip_gen *data; 990 std::ostringstream code; 991 992 CeedCallBackend(CeedOperatorGetData(op, &data)); 993 { 994 bool is_setup_done; 995 996 CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done)); 997 if (is_setup_done) { 998 *is_good_build = !data->use_fallback; 999 return CEED_ERROR_SUCCESS; 1000 } 1001 } 1002 1003 // Check field compatibility 1004 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 1005 { 1006 bool has_shared_bases = true, is_all_tensor = true, is_all_nontensor = true; 1007 1008 for (CeedInt i = 0; i < num_input_fields; i++) { 1009 CeedBasis basis; 1010 1011 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 1012 if (basis != CEED_BASIS_NONE) { 1013 bool is_tensor = true; 1014 const char *resource; 1015 char *resource_root; 1016 Ceed basis_ceed; 1017 1018 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1019 is_all_tensor = is_all_tensor && is_tensor; 1020 is_all_nontensor = is_all_nontensor && !is_tensor; 1021 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1022 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1023 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1024 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/hip/shared"); 1025 CeedCallBackend(CeedFree(&resource_root)); 1026 CeedCallBackend(CeedDestroy(&basis_ceed)); 1027 } 1028 CeedCallBackend(CeedBasisDestroy(&basis)); 1029 } 1030 1031 for (CeedInt i = 0; i < num_output_fields; i++) { 1032 CeedBasis basis; 1033 1034 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 1035 if (basis != CEED_BASIS_NONE) { 1036 bool is_tensor = true; 1037 const char *resource; 1038 char *resource_root; 1039 Ceed basis_ceed; 1040 1041 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1042 is_all_tensor = is_all_tensor && is_tensor; 1043 is_all_nontensor = is_all_nontensor && !is_tensor; 1044 1045 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1046 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1047 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1048 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/hip/shared"); 1049 CeedCallBackend(CeedFree(&resource_root)); 1050 CeedCallBackend(CeedDestroy(&basis_ceed)); 1051 } 1052 CeedCallBackend(CeedBasisDestroy(&basis)); 1053 } 1054 // -- Fallback to ref if not all bases are shared 1055 if (!has_shared_bases || (!is_all_tensor && !is_all_nontensor)) { 1056 *is_good_build = false; 1057 return CEED_ERROR_SUCCESS; 1058 } 1059 } 1060 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1061 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 1062 CeedCallBackend(CeedQFunctionGetData(qf, &qf_data)); 1063 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 1064 1065 // Get operator data 1066 CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points)); 1067 CeedCallBackend(CeedOperatorBuildKernelData_Hip_gen(ceed, num_input_fields, op_input_fields, qf_input_fields, num_output_fields, op_output_fields, 1068 qf_output_fields, &data->max_P_1d, &Q_1d, &dim, &is_tensor, &use_3d_slices)); 1069 if (dim == 0) dim = 1; 1070 data->dim = dim; 1071 if (is_at_points) { 1072 CeedElemRestriction_Hip *rstr_data; 1073 CeedElemRestriction rstr_points = NULL; 1074 1075 CeedCallBackend(CeedOperatorAtPointsGetPoints(op, &rstr_points, NULL)); 1076 CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr_points, &max_num_points)); 1077 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr_points, &coords_comp_stride)); 1078 CeedCallBackend(CeedElemRestrictionGetData(rstr_points, &rstr_data)); 1079 data->points.indices = (CeedInt *)rstr_data->d_offsets; 1080 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_points)); 1081 } 1082 if (is_at_points) use_3d_slices = false; 1083 if (Q_1d == 0) { 1084 if (is_at_points) Q_1d = max_num_points; 1085 else CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q_1d)); 1086 } 1087 data->Q_1d = Q_1d; 1088 1089 // Check for restriction only identity operator 1090 { 1091 bool is_identity_qf; 1092 1093 CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf)); 1094 if (is_identity_qf) { 1095 CeedEvalMode eval_mode_in, eval_mode_out; 1096 1097 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in)); 1098 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out)); 1099 CeedCheck(eval_mode_in != CEED_EVAL_NONE || eval_mode_out != CEED_EVAL_NONE, ceed, CEED_ERROR_BACKEND, 1100 "Backend does not implement restriction only identity operators"); 1101 } 1102 } 1103 1104 // Load basis source files 1105 if (is_tensor) { 1106 code << "// Tensor basis source\n"; 1107 code << "#include <ceed/jit-source/hip/hip-shared-basis-tensor-templates.h>\n\n"; 1108 } else { 1109 code << "// Non-tensor basis source\n"; 1110 code << "#include <ceed/jit-source/hip/hip-shared-basis-nontensor-templates.h>\n\n"; 1111 } 1112 if (is_at_points) { 1113 code << "// AtPoints basis source\n"; 1114 code << "#include <ceed/jit-source/hip/hip-shared-basis-tensor-at-points-templates.h>\n\n"; 1115 } 1116 code << "// CodeGen operator source\n"; 1117 code << "#include <ceed/jit-source/hip/hip-gen-templates.h>\n\n"; 1118 1119 // Get QFunction name 1120 std::string qfunction_name(qf_data->qfunction_name); 1121 std::string operator_name; 1122 1123 operator_name = "CeedKernelHipGenOperator_" + qfunction_name; 1124 1125 // Define CEED_Q_VLA 1126 code << "\n#undef CEED_Q_VLA\n"; 1127 if (dim != 3 || is_at_points || use_3d_slices || !is_tensor) { 1128 code << "#define CEED_Q_VLA 1\n\n"; 1129 } else { 1130 code << "#define CEED_Q_VLA " << Q_1d << "\n\n"; 1131 } 1132 1133 // Add user QFunction source 1134 { 1135 const char *source_path; 1136 1137 CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path)); 1138 CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/hip/gen backend requires QFunction source code file"); 1139 1140 code << "// User QFunction source\n"; 1141 code << "#include \"" << source_path << "\"\n\n"; 1142 } 1143 1144 // Setup 1145 code << "\n// -----------------------------------------------------------------------------\n"; 1146 code << "// Operator Kernel\n"; 1147 code << "// \n"; 1148 code << "// d_[in,out]_i: CeedVector device array\n"; 1149 code << "// r_[in,out]_e_i: Element vector register\n"; 1150 code << "// r_[in,out]_q_i: Quadrature space vector register\n"; 1151 code << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n"; 1152 code << "// r_[in,out]_s_i: Quadrature space slice vector register\n"; 1153 code << "// \n"; 1154 code << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n"; 1155 code << "// s_G_[in,out]_i: Gradient matrix, shared memory\n"; 1156 code << "// -----------------------------------------------------------------------------\n"; 1157 code << "\nextern \"C\" __launch_bounds__(BLOCK_SIZE)\n"; 1158 code << "__global__ void " << operator_name 1159 << "(CeedInt num_elem, void* ctx, FieldsInt_Hip indices, Fields_Hip fields, Fields_Hip B, Fields_Hip G, CeedScalar* W, Points_Hip points) {\n"; 1160 1161 // Scratch buffers 1162 for (CeedInt i = 0; i < num_input_fields; i++) { 1163 CeedEvalMode eval_mode; 1164 1165 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 1166 if (eval_mode != CEED_EVAL_WEIGHT) { // Skip CEED_EVAL_WEIGHT 1167 code << " const CeedScalar *d_in_" << i << " = fields.inputs[" << i << "];\n"; 1168 } 1169 } 1170 for (CeedInt i = 0; i < num_output_fields; i++) { 1171 code << " CeedScalar *d_out_" << i << " = fields.outputs[" << i << "];\n"; 1172 } 1173 1174 code << " const CeedInt dim = " << dim << ";\n"; 1175 code << " const CeedInt " << (is_tensor ? "Q_1d" : "Q") << " = " << Q_1d << ";\n"; 1176 if (is_at_points) { 1177 code << " const CeedInt max_num_points = " << max_num_points << ";\n"; 1178 code << " const CeedInt coords_comp_stride = " << coords_comp_stride << ";\n"; 1179 } 1180 1181 // Shared data 1182 code << " extern __shared__ CeedScalar slice[];\n"; 1183 code << " SharedData_Hip data;\n"; 1184 code << " data.t_id_x = threadIdx.x;\n"; 1185 code << " data.t_id_y = threadIdx.y;\n"; 1186 code << " data.t_id_z = threadIdx.z;\n"; 1187 code << " data.t_id = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n"; 1188 code << " data.slice = slice + data.t_id_z*T_1D" << ((!is_tensor || dim == 1) ? "" : "*T_1D") << ";\n"; 1189 1190 // -- Determine input mat reuse 1191 CeedInt input_matrix_reuse[CEED_FIELD_MAX][3]; // field, is_input, eval_mode 1192 1193 for (CeedInt i = 0; i < num_input_fields; i++) { 1194 input_matrix_reuse[i][0] = -1; 1195 } 1196 for (CeedInt i = 0; i < num_input_fields; i++) { 1197 CeedEvalMode eval_mode_i; 1198 CeedBasis basis_i; 1199 1200 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i)); 1201 if (eval_mode_i == CEED_EVAL_WEIGHT) continue; 1202 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i)); 1203 for (CeedInt j = 0; (input_matrix_reuse[i][0] == -1) && (j < i); j++) { 1204 CeedEvalMode eval_mode_j; 1205 CeedBasis basis_j; 1206 1207 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1208 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1209 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1210 if (basis_i == basis_j) { 1211 if (is_tensor) { 1212 input_matrix_reuse[i][0] = j; 1213 input_matrix_reuse[i][1] = true; 1214 input_matrix_reuse[i][2] = eval_mode_j; 1215 } else { 1216 // For non-tensor can only re-use with the same eval mode 1217 if (eval_mode_i == eval_mode_j) { 1218 input_matrix_reuse[i][0] = j; 1219 input_matrix_reuse[i][1] = true; 1220 input_matrix_reuse[i][2] = eval_mode_j; 1221 } 1222 } 1223 } 1224 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1225 } 1226 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1227 } 1228 1229 // -- Determine output mat reuse 1230 CeedInt output_matrix_reuse[CEED_FIELD_MAX][3]; // field, is_input, eval_mode 1231 1232 for (CeedInt i = 0; i < num_output_fields; i++) { 1233 output_matrix_reuse[i][0] = -1; 1234 } 1235 for (CeedInt i = 0; i < num_output_fields; i++) { 1236 CeedEvalMode eval_mode_i; 1237 CeedBasis basis_i; 1238 1239 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i)); 1240 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i)); 1241 for (CeedInt j = 0; (output_matrix_reuse[i][0] == -1) && (j < num_input_fields); j++) { 1242 CeedEvalMode eval_mode_j; 1243 CeedBasis basis_j; 1244 1245 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1246 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1247 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1248 if (basis_i == basis_j) { 1249 if (is_tensor) { 1250 output_matrix_reuse[i][0] = j; 1251 output_matrix_reuse[i][1] = true; 1252 output_matrix_reuse[i][2] = eval_mode_j; 1253 } else { 1254 // For non-tensor can only re-use with the same eval mode 1255 if (eval_mode_i == eval_mode_j) { 1256 output_matrix_reuse[i][0] = j; 1257 output_matrix_reuse[i][1] = true; 1258 output_matrix_reuse[i][2] = eval_mode_j; 1259 } 1260 } 1261 } 1262 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1263 } 1264 for (CeedInt j = 0; (output_matrix_reuse[i][0] == -1) && (j < i); j++) { 1265 CeedEvalMode eval_mode_j; 1266 CeedBasis basis_j; 1267 1268 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j)); 1269 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1270 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j)); 1271 if (basis_i == basis_j) { 1272 if (is_tensor) { 1273 output_matrix_reuse[i][0] = j; 1274 output_matrix_reuse[i][1] = false; 1275 output_matrix_reuse[i][2] = eval_mode_j; 1276 } else { 1277 // For non-tensor can only re-use with the same eval mode 1278 if (eval_mode_i == eval_mode_j) { 1279 output_matrix_reuse[i][0] = j; 1280 output_matrix_reuse[i][1] = false; 1281 output_matrix_reuse[i][2] = eval_mode_j; 1282 } 1283 } 1284 } 1285 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1286 } 1287 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1288 } 1289 1290 // Initialize constants, and matrices B and G 1291 code << "\n // Input field constants and basis data\n"; 1292 for (CeedInt i = 0; i < num_input_fields; i++) { 1293 CeedCallBackend(CeedOperatorBuildKernelFieldData_Hip_gen(code, data, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i], Q_1d, true, 1294 is_tensor, is_at_points, use_3d_slices)); 1295 } 1296 code << "\n // Output field constants and basis data\n"; 1297 for (CeedInt i = 0; i < num_output_fields; i++) { 1298 CeedCallBackend(CeedOperatorBuildKernelFieldData_Hip_gen(code, data, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i], Q_1d, 1299 false, is_tensor, is_at_points, use_3d_slices)); 1300 } 1301 1302 // Loop over all elements 1303 code << "\n // Element loop\n"; 1304 code << " __syncthreads();\n"; 1305 code << " for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n"; 1306 1307 // -- Compute minimum buffer space needed 1308 CeedInt max_rstr_buffer_size = 1; 1309 1310 for (CeedInt i = 0; i < num_input_fields; i++) { 1311 CeedInt num_comp, elem_size; 1312 CeedElemRestriction elem_rstr; 1313 1314 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 1315 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1316 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 1317 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_tensor && (dim >= 3) ? elem_size : 1)); 1318 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1319 } 1320 for (CeedInt i = 0; i < num_output_fields; i++) { 1321 CeedInt num_comp, elem_size; 1322 CeedElemRestriction elem_rstr; 1323 1324 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 1325 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1326 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 1327 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_tensor && (dim >= 3) ? elem_size : 1)); 1328 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1329 } 1330 code << " // Scratch restriction buffer space\n"; 1331 code << " CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n"; 1332 1333 // -- Determine best input field processing order 1334 CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX]; 1335 1336 for (CeedInt i = 0; i < num_input_fields; i++) { 1337 field_rstr_in_buffer[i] = -1; 1338 input_field_order[i] = -1; 1339 } 1340 { 1341 bool is_ordered[CEED_FIELD_MAX]; 1342 CeedInt curr_index = 0; 1343 1344 for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false; 1345 for (CeedInt i = 0; i < num_input_fields; i++) { 1346 CeedVector vec_i; 1347 CeedElemRestriction rstr_i; 1348 1349 if (is_ordered[i]) continue; 1350 field_rstr_in_buffer[i] = i; 1351 is_ordered[i] = true; 1352 input_field_order[curr_index] = i; 1353 curr_index++; 1354 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i)); 1355 if (vec_i == CEED_VECTOR_NONE) continue; // CEED_EVAL_WEIGHT 1356 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i)); 1357 for (CeedInt j = i + 1; j < num_input_fields; j++) { 1358 CeedVector vec_j; 1359 CeedElemRestriction rstr_j; 1360 1361 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j)); 1362 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j)); 1363 if (rstr_i == rstr_j && vec_i == vec_j) { 1364 field_rstr_in_buffer[j] = i; 1365 is_ordered[j] = true; 1366 input_field_order[curr_index] = j; 1367 curr_index++; 1368 } 1369 CeedCallBackend(CeedVectorDestroy(&vec_j)); 1370 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j)); 1371 } 1372 CeedCallBackend(CeedVectorDestroy(&vec_i)); 1373 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i)); 1374 } 1375 } 1376 1377 // -- Input restriction and basis 1378 code << "\n // -- Input field restrictions and basis actions\n"; 1379 for (CeedInt i = 0; i < num_input_fields; i++) { 1380 CeedInt f = input_field_order[i]; 1381 1382 code << " // ---- Input field " << f << "\n"; 1383 1384 // ---- Restriction 1385 CeedCallBackend(CeedOperatorBuildKernelRestriction_Hip_gen(code, data, f, dim, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f], Q_1d, 1386 true, is_tensor, is_at_points, use_3d_slices)); 1387 1388 // ---- Basis action 1389 CeedCallBackend(CeedOperatorBuildKernelBasis_Hip_gen(code, data, f, dim, op_input_fields[f], qf_input_fields[f], Q_1d, true, is_tensor, 1390 is_at_points, use_3d_slices)); 1391 } 1392 1393 // -- Q function 1394 CeedCallBackend(CeedOperatorBuildKernelQFunction_Hip_gen(code, data, dim, max_num_points, num_input_fields, op_input_fields, qf_input_fields, 1395 num_output_fields, op_output_fields, qf_output_fields, qfunction_name, Q_1d, is_tensor, 1396 is_at_points, use_3d_slices)); 1397 1398 // -- Output basis and restriction 1399 code << "\n // -- Output field basis action and restrictions\n"; 1400 for (CeedInt i = 0; i < num_output_fields; i++) { 1401 code << " // ---- Output field " << i << "\n"; 1402 1403 // ---- Basis action 1404 CeedCallBackend(CeedOperatorBuildKernelBasis_Hip_gen(code, data, i, dim, op_output_fields[i], qf_output_fields[i], Q_1d, false, is_tensor, 1405 is_at_points, use_3d_slices)); 1406 1407 // ---- Restriction 1408 CeedCallBackend(CeedOperatorBuildKernelRestriction_Hip_gen(code, data, i, dim, NULL, op_output_fields[i], qf_output_fields[i], Q_1d, false, 1409 is_tensor, is_at_points, use_3d_slices)); 1410 } 1411 1412 // Close loop and function 1413 code << " }\n"; 1414 code << "}\n"; 1415 code << "// -----------------------------------------------------------------------------\n\n"; 1416 1417 CeedInt block_sizes[3] = {0, 0, 0}; 1418 CeedInt num_elem; 1419 1420 // Compile 1421 CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem)); 1422 CeedCallBackend(BlockGridCalculate_Hip_gen(is_tensor ? dim : 1, num_elem, data->max_P_1d, Q_1d, block_sizes)); 1423 { 1424 bool is_compile_good = false; 1425 1426 CeedCallBackend(CeedTryCompile_Hip(ceed, code.str().c_str(), &is_compile_good, &data->module, 2, "T_1D", block_sizes[0], "BLOCK_SIZE", 1427 block_sizes[0] * block_sizes[1] * block_sizes[2])); 1428 if (is_compile_good) { 1429 *is_good_build = true; 1430 CeedCallBackend(CeedGetKernel_Hip(ceed, data->module, operator_name.c_str(), &data->op)); 1431 } else { 1432 *is_good_build = false; 1433 data->use_fallback = true; 1434 } 1435 } 1436 CeedCallBackend(CeedOperatorSetSetupDone(op)); 1437 CeedCallBackend(CeedDestroy(&ceed)); 1438 CeedCallBackend(CeedQFunctionDestroy(&qf)); 1439 return CEED_ERROR_SUCCESS; 1440 } 1441 1442 //------------------------------------------------------------------------------ 1443