1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #define CEED_DEBUG_COLOR 12 9 10 #include <ceed.h> 11 #include <ceed/backend.h> 12 #include <ceed/jit-tools.h> 13 14 #include <iostream> 15 #include <sstream> 16 #include <string> 17 18 #include "../hip-ref/ceed-hip-ref.h" 19 #include "../hip-shared/ceed-hip-shared.h" 20 #include "../hip/ceed-hip-common.h" 21 #include "../hip/ceed-hip-compile.h" 22 #include "ceed-hip-gen.h" 23 24 struct FieldReuse_Hip { 25 CeedInt index; 26 bool is_input; 27 CeedEvalMode eval_mode; 28 }; 29 30 //------------------------------------------------------------------------------ 31 // Calculate the block size used for launching the operator kernel 32 //------------------------------------------------------------------------------ 33 extern "C" int BlockGridCalculate_Hip_gen(const CeedInt dim, const CeedInt num_elem, const CeedInt P_1d, const CeedInt Q_1d, CeedInt *block_sizes) { 34 const CeedInt thread_1d = CeedIntMax(Q_1d, P_1d); 35 if (dim == 1) { 36 CeedInt elems_per_block = 64 * thread_1d > 256 ? 256 / thread_1d : 64; 37 38 elems_per_block = elems_per_block > 0 ? elems_per_block : 1; 39 block_sizes[0] = thread_1d; 40 block_sizes[1] = 1; 41 block_sizes[2] = elems_per_block; 42 } else if (dim == 2) { 43 const CeedInt elems_per_block = thread_1d < 4 ? 16 : 2; 44 45 block_sizes[0] = thread_1d; 46 block_sizes[1] = thread_1d; 47 block_sizes[2] = elems_per_block; 48 } else if (dim == 3) { 49 const CeedInt elems_per_block = thread_1d < 6 ? 4 : (thread_1d < 8 ? 2 : 1); 50 51 block_sizes[0] = thread_1d; 52 block_sizes[1] = thread_1d; 53 block_sizes[2] = elems_per_block; 54 } 55 return CEED_ERROR_SUCCESS; 56 } 57 58 //------------------------------------------------------------------------------ 59 // Determine type of operator 60 //------------------------------------------------------------------------------ 61 static int CeedOperatorBuildKernelData_Hip_gen(Ceed ceed, CeedInt num_input_fields, CeedOperatorField *op_input_fields, 62 CeedQFunctionField *qf_input_fields, CeedInt num_output_fields, CeedOperatorField *op_output_fields, 63 CeedQFunctionField *qf_output_fields, CeedInt *max_P_1d, CeedInt *Q_1d, CeedInt *dim, bool *is_tensor, 64 bool *use_3d_slices) { 65 // Find dim, P_1d, Q_1d 66 *max_P_1d = 0; 67 *Q_1d = 0; 68 *dim = 0; 69 *is_tensor = true; 70 71 for (CeedInt i = 0; i < num_input_fields; i++) { 72 CeedBasis basis; 73 74 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 75 if (basis != CEED_BASIS_NONE) { 76 bool is_field_tensor; 77 CeedInt field_P_1d = 0, field_Q_1d = 0, field_dim = 0; 78 79 // Collect dim, P_1d, and Q_1d 80 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 81 *is_tensor = *is_tensor && is_field_tensor; 82 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 83 else CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P_1d)); 84 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 85 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 86 CeedCheck(*dim == 0 || field_dim == *dim, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 87 *dim = field_dim; 88 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 89 else CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q_1d)); 90 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 91 *Q_1d = field_Q_1d; 92 } 93 CeedCallBackend(CeedBasisDestroy(&basis)); 94 } 95 for (CeedInt i = 0; i < num_output_fields; i++) { 96 CeedBasis basis; 97 98 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 99 if (basis != CEED_BASIS_NONE) { 100 bool is_field_tensor; 101 CeedInt field_P_1d = 0, field_Q_1d = 0, field_dim = 0; 102 103 // Collect dim, P_1d, and Q_1d 104 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 105 *is_tensor = *is_tensor && is_field_tensor; 106 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 107 else CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P_1d)); 108 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 109 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 110 CeedCheck(*dim == 0 || field_dim == *dim, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 111 *dim = field_dim; 112 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 113 else CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q_1d)); 114 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 115 *Q_1d = field_Q_1d; 116 } 117 CeedCallBackend(CeedBasisDestroy(&basis)); 118 } 119 120 // Only use 3D collocated gradient parallelization strategy when gradient is computed 121 *use_3d_slices = false; 122 if (*dim == 3) { 123 bool was_grad_found = false; 124 125 for (CeedInt i = 0; i < num_input_fields; i++) { 126 CeedEvalMode eval_mode; 127 128 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 129 if (eval_mode == CEED_EVAL_GRAD) { 130 CeedBasis_Hip_shared *basis_data; 131 CeedBasis basis; 132 133 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 134 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 135 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 136 was_grad_found = true; 137 CeedCallBackend(CeedBasisDestroy(&basis)); 138 } 139 } 140 for (CeedInt i = 0; i < num_output_fields; i++) { 141 CeedEvalMode eval_mode; 142 143 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 144 if (eval_mode == CEED_EVAL_GRAD) { 145 CeedBasis_Hip_shared *basis_data; 146 CeedBasis basis; 147 148 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 149 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 150 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 151 was_grad_found = true; 152 CeedCallBackend(CeedBasisDestroy(&basis)); 153 } 154 } 155 } 156 return CEED_ERROR_SUCCESS; 157 } 158 159 //------------------------------------------------------------------------------ 160 // Setup fields 161 //------------------------------------------------------------------------------ 162 static int CeedOperatorBuildKernelFieldData_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedOperatorField op_field, 163 CeedQFunctionField qf_field, FieldReuse_Hip field_reuse, CeedInt Q_1d, bool is_input, 164 bool is_tensor, bool is_at_points, bool use_3d_slices) { 165 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 166 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 167 std::string option_name = (is_input ? "inputs" : "outputs"); 168 CeedEvalMode eval_mode = CEED_EVAL_NONE; 169 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 170 CeedElemRestriction elem_rstr; 171 CeedBasis_Hip_shared *basis_data; 172 CeedBasis basis; 173 174 // Field reuse info 175 bool use_previous_field = field_reuse.index != -1; 176 177 code << " // -- " << (is_input ? "Input" : "Output") << " field " << i << "\n"; 178 179 // Get field data 180 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 181 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 182 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 183 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 184 } 185 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 186 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 187 if (basis != CEED_BASIS_NONE) { 188 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 189 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 190 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 191 } 192 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 193 194 // Set field constants 195 if (eval_mode != CEED_EVAL_WEIGHT) { 196 code << " const CeedInt " << P_name << " = " << (basis == CEED_BASIS_NONE ? Q_1d : P_1d) << ";\n"; 197 code << " const CeedInt num_comp" << var_suffix << " = " << num_comp << ";\n"; 198 } 199 200 // Load basis data 201 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 202 switch (eval_mode) { 203 case CEED_EVAL_NONE: 204 break; 205 case CEED_EVAL_INTERP: 206 if (is_at_points) { 207 // AtPoints 208 if (!basis_data->d_chebyshev_interp_1d) { 209 CeedSize interp_bytes; 210 CeedScalar *chebyshev_interp_1d; 211 212 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 213 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 214 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 215 CeedCallHip(CeedBasisReturnCeed(basis), hipMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 216 CeedCallHip(CeedBasisReturnCeed(basis), 217 hipMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, hipMemcpyHostToDevice)); 218 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 219 } 220 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 221 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 222 } else { 223 // Standard quadrature 224 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 225 else data->B.outputs[i] = basis_data->d_interp_1d; 226 } 227 if (use_previous_field) { 228 std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 229 230 code << " CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 231 } else { 232 code << " __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 233 code << " LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 234 } 235 break; 236 case CEED_EVAL_GRAD: 237 if (is_at_points) { 238 // AtPoints 239 if (!basis_data->d_chebyshev_interp_1d) { 240 CeedSize interp_bytes; 241 CeedScalar *chebyshev_interp_1d; 242 243 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 244 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 245 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 246 CeedCallHip(CeedBasisReturnCeed(basis), hipMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 247 CeedCallHip(CeedBasisReturnCeed(basis), 248 hipMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, hipMemcpyHostToDevice)); 249 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 250 } 251 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 252 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 253 } else { 254 // Standard quadrature 255 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 256 else data->B.outputs[i] = basis_data->d_interp_1d; 257 } 258 if (is_tensor) { 259 if (use_previous_field) { 260 std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 261 262 code << " CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 263 } else { 264 code << " __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 265 code << " LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 266 } 267 } 268 if (is_at_points) break; // No G mat for AtPoints 269 if (use_3d_slices) { 270 if (is_input) data->G.inputs[i] = basis_data->d_collo_grad_1d; 271 else data->G.outputs[i] = basis_data->d_collo_grad_1d; 272 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 273 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 274 275 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 276 } else { 277 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 278 code << " LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 279 } 280 } else { 281 bool has_collo_grad = basis_data->d_collo_grad_1d; 282 283 if (is_input) data->G.inputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 284 else data->G.outputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 285 if (has_collo_grad) { 286 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 287 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 288 289 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 290 } else { 291 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 292 code << " LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 293 } 294 } else { 295 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 296 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 297 298 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 299 } else { 300 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << P_name << "*" << Q_name << (is_tensor ? "" : "*dim") << "];\n"; 301 code << " LoadMatrix<" << P_name << ", " << Q_name << (is_tensor ? "" : "*dim") << ">(data, G." << option_name << "[" << i << "], s_G" 302 << var_suffix << ");\n"; 303 } 304 } 305 } 306 break; 307 case CEED_EVAL_WEIGHT: 308 break; // No action 309 // LCOV_EXCL_START 310 case CEED_EVAL_DIV: 311 case CEED_EVAL_CURL: 312 break; // TODO: Not implemented 313 // LCOV_EXCL_STOP 314 } 315 CeedCallBackend(CeedBasisDestroy(&basis)); 316 return CEED_ERROR_SUCCESS; 317 } 318 319 //------------------------------------------------------------------------------ 320 // Restriction 321 //------------------------------------------------------------------------------ 322 static int CeedOperatorBuildKernelRestriction_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedInt dim, 323 CeedInt field_input_buffer[], CeedOperatorField op_field, CeedQFunctionField qf_field, 324 CeedInt Q_1d, bool is_input, bool is_tensor, bool is_at_points, bool use_3d_slices) { 325 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 326 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix; 327 CeedEvalMode eval_mode = CEED_EVAL_NONE; 328 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 329 CeedSize l_size; 330 CeedRestrictionType rstr_type = CEED_RESTRICTION_STANDARD; 331 CeedElemRestriction_Hip *rstr_data; 332 CeedElemRestriction elem_rstr; 333 CeedBasis basis; 334 335 // Get field data 336 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 337 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 338 CeedCallBackend(CeedElemRestrictionGetType(elem_rstr, &rstr_type)); 339 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 340 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 341 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 342 } 343 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 344 if (basis != CEED_BASIS_NONE) { 345 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 346 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 347 } 348 CeedCallBackend(CeedBasisDestroy(&basis)); 349 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 350 351 // Restriction 352 if (is_input) { 353 // Input 354 if (field_input_buffer[i] != i) { 355 std::string buffer_name = "r_e_in_" + std::to_string(field_input_buffer[i]); 356 357 // Restriction was already done for previous input 358 code << " CeedScalar *r_e" << var_suffix << " = " << buffer_name << ";\n"; 359 } else if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_3d_slices && is_at_points)) { 360 if (eval_mode == CEED_EVAL_NONE && rstr_type != CEED_RESTRICTION_POINTS) { 361 // No basis action, so r_e_in_* in also r_q_in_* and needs to be allocated 362 code << " CeedScalar r_e" << var_suffix << "[num_comp" << var_suffix << "*" << P_name << "];\n"; 363 } else if (rstr_type != CEED_RESTRICTION_POINTS) { 364 // Otherwise we're using the scratch space 365 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 366 } 367 switch (rstr_type) { 368 case CEED_RESTRICTION_STANDARD: { 369 CeedInt comp_stride; 370 371 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 372 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 373 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 374 code << " // CompStride: " << comp_stride << "\n"; 375 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 376 code << " ReadLVecStandard" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name 377 << ">(data, l_size" << var_suffix << ", elem, indices.inputs[" << i << "], d" << var_suffix << ", r_e" << var_suffix << ");\n"; 378 break; 379 } 380 case CEED_RESTRICTION_STRIDED: { 381 bool has_backend_strides; 382 CeedInt num_elem; 383 384 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 385 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 386 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 387 388 if (!has_backend_strides) { 389 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 390 } 391 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 392 code << " ReadLVecStrided" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", " 393 << strides[1] << ", " << strides[2] << ">(data, elem, d" << var_suffix << ", r_e" << var_suffix << ");\n"; 394 break; 395 } 396 case CEED_RESTRICTION_POINTS: { 397 CeedInt comp_stride; 398 399 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 400 code << " const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 401 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 402 break; 403 } 404 // LCOV_EXCL_START 405 case CEED_RESTRICTION_ORIENTED: 406 case CEED_RESTRICTION_CURL_ORIENTED: 407 break; // TODO: Not implemented 408 // LCOV_EXCL_STOP 409 } 410 } 411 } else { 412 // Output 413 switch (rstr_type) { 414 case CEED_RESTRICTION_STANDARD: { 415 CeedInt comp_stride; 416 417 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 418 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 419 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 420 code << " // CompStride: " << comp_stride << "\n"; 421 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 422 code << " WriteLVecStandard" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name 423 << ">(data, l_size" << var_suffix << ", elem, indices.outputs[" << i << "], r_e" << var_suffix << ", d" << var_suffix << ");\n"; 424 break; 425 } 426 case CEED_RESTRICTION_STRIDED: { 427 bool has_backend_strides; 428 CeedInt num_elem; 429 430 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 431 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 432 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 433 434 if (!has_backend_strides) { 435 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 436 } 437 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 438 code << " WriteLVecStrided" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", " 439 << strides[1] << ", " << strides[2] << ">(data, elem, r_e" << var_suffix << ", d" << var_suffix << ");\n"; 440 break; 441 } 442 case CEED_RESTRICTION_POINTS: 443 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 444 break; 445 // LCOV_EXCL_START 446 case CEED_RESTRICTION_ORIENTED: 447 case CEED_RESTRICTION_CURL_ORIENTED: 448 break; // TODO: Not implemented 449 // LCOV_EXCL_STOP 450 } 451 } 452 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 453 return CEED_ERROR_SUCCESS; 454 } 455 456 //------------------------------------------------------------------------------ 457 // Basis 458 //------------------------------------------------------------------------------ 459 static int CeedOperatorBuildKernelBasis_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedInt dim, 460 CeedOperatorField op_field, CeedQFunctionField qf_field, CeedInt Q_1d, bool is_input, bool is_tensor, 461 bool is_at_points, bool use_3d_slices) { 462 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 463 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 464 CeedEvalMode eval_mode = CEED_EVAL_NONE; 465 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 466 CeedElemRestriction elem_rstr; 467 CeedBasis basis; 468 469 // Get field data 470 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 471 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 472 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 473 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 474 } 475 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 476 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 477 if (basis != CEED_BASIS_NONE) { 478 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 479 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 480 } 481 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 482 483 // Basis 484 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 485 if (is_input) { 486 switch (eval_mode) { 487 case CEED_EVAL_NONE: 488 if (!use_3d_slices && !is_at_points) { 489 code << " CeedScalar *r_q" << var_suffix << " = r_e" << var_suffix << ";\n"; 490 } 491 break; 492 case CEED_EVAL_INTERP: 493 if (is_at_points) { 494 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 495 496 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 497 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 498 << var_suffix << ", r_c" << var_suffix << ");\n"; 499 } else { 500 std::string function_name = is_tensor ? ((dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d") : "InterpNonTensor"; 501 502 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 503 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 504 << var_suffix << ", r_q" << var_suffix << ");\n"; 505 } 506 break; 507 case CEED_EVAL_GRAD: 508 if (is_at_points) { 509 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 510 511 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 512 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 513 << var_suffix << ", r_c" << var_suffix << ");\n"; 514 } else if (use_3d_slices) { 515 std::string function_name = (dim > 1 ? "InterpTensor" : "Interp") + std::to_string(dim) + "d"; 516 517 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 518 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 519 << var_suffix << ", r_q" << var_suffix << ");\n"; 520 } else if (is_tensor) { 521 bool is_collocated = dim == 3 && Q_1d >= P_1d; 522 std::string function_name = (dim == 1 ? "Grad" : (is_collocated ? "GradTensorCollocated" : "GradTensor")) + std::to_string(dim) + "d"; 523 524 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim*" << (dim >= 3 ? Q_name : "1") << "];\n"; 525 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix << ", s_B" 526 << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 527 } else { 528 std::string function_name = "GradNonTensor"; 529 530 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 531 code << " " << function_name << "<num_comp" << var_suffix << ", dim, " << P_name << ", " << Q_name << ">(data, r_e" << var_suffix 532 << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 533 } 534 break; 535 case CEED_EVAL_WEIGHT: { 536 if (is_at_points) { 537 code << " // Nothing to do AtPoints\n"; 538 } else { 539 CeedBasis_Hip_shared *basis_data; 540 std::string function_name = is_tensor ? ((dim == 1 ? "Weight" : "WeightTensor") + std::to_string(dim) + "d") : "WeightNonTensor"; 541 542 code << " CeedScalar r_q" << var_suffix << "[" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 543 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 544 data->W = basis_data->d_q_weight_1d; 545 code << " " << function_name << "<" << Q_name << ">(data, W, r_q" << var_suffix << ");\n"; 546 } 547 break; 548 } 549 // LCOV_EXCL_START 550 case CEED_EVAL_DIV: 551 case CEED_EVAL_CURL: 552 break; // TODO: Not implemented 553 // LCOV_EXCL_STOP 554 } 555 } else { 556 switch (eval_mode) { 557 case CEED_EVAL_NONE: 558 code << " CeedScalar *r_e" << var_suffix << " = r_q" << var_suffix << ";\n"; 559 break; // No action 560 case CEED_EVAL_INTERP: 561 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 562 if (is_at_points) { 563 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 564 565 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_c" << var_suffix << ", s_B" 566 << var_suffix << ", r_e" << var_suffix << ");\n"; 567 } else { 568 std::string function_name = 569 is_tensor ? ((dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d") : "InterpTransposeNonTensor"; 570 571 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_q" << var_suffix << ", s_B" 572 << var_suffix << ", r_e" << var_suffix << ");\n"; 573 } 574 break; 575 case CEED_EVAL_GRAD: 576 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 577 if (is_at_points) { 578 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 579 580 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_c" << var_suffix << ", s_B" 581 << var_suffix << ", r_e" << var_suffix << ");\n"; 582 } else if (use_3d_slices) { 583 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 584 585 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_q" << var_suffix << ", s_B" 586 << var_suffix << ", r_e" << var_suffix << ");\n"; 587 } else if (is_tensor) { 588 bool is_collocated = dim == 3 && Q_1d >= P_1d; 589 std::string function_name = 590 (dim == 1 ? "GradTranspose" : (is_collocated ? "GradTransposeTensorCollocated" : "GradTransposeTensor")) + std::to_string(dim) + "d"; 591 592 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ">(data, r_q" << var_suffix << ", s_B" 593 << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 594 } else { 595 std::string function_name = "GradTransposeNonTensor"; 596 597 code << " " << function_name << "<num_comp" << var_suffix << ", dim, " << P_name << ", " << Q_name << ">(data, r_q" << var_suffix 598 << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 599 } 600 break; 601 // LCOV_EXCL_START 602 case CEED_EVAL_WEIGHT: 603 break; // Should not occur 604 case CEED_EVAL_DIV: 605 case CEED_EVAL_CURL: 606 break; // TODO: Not implemented 607 // LCOV_EXCL_STOP 608 } 609 } 610 CeedCallBackend(CeedBasisDestroy(&basis)); 611 return CEED_ERROR_SUCCESS; 612 } 613 614 //------------------------------------------------------------------------------ 615 // QFunction 616 //------------------------------------------------------------------------------ 617 static int CeedOperatorBuildKernelQFunction_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt dim, CeedInt max_num_points, 618 CeedInt num_input_fields, CeedOperatorField *op_input_fields, CeedQFunctionField *qf_input_fields, 619 CeedInt num_output_fields, CeedOperatorField *op_output_fields, 620 CeedQFunctionField *qf_output_fields, std::string qfunction_name, CeedInt Q_1d, bool is_tensor, 621 bool is_at_points, bool use_3d_slices) { 622 std::string Q_name = is_tensor ? "Q_1d" : "Q"; 623 CeedEvalMode eval_mode = CEED_EVAL_NONE; 624 CeedElemRestriction elem_rstr; 625 626 // Setup output arrays 627 code << "\n // -- Output field setup\n"; 628 for (CeedInt i = 0; i < num_output_fields; i++) { 629 std::string var_suffix = "_out_" + std::to_string(i); 630 631 code << " // ---- Output field " << i << "\n"; 632 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 633 switch (eval_mode) { 634 case CEED_EVAL_NONE: 635 if (is_at_points) { 636 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "];\n"; 637 } else { 638 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 639 } 640 break; 641 case CEED_EVAL_INTERP: 642 if (is_at_points) { 643 // Accumulator for point data 644 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 645 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "; i++) {\n"; 646 code << " r_c" << var_suffix << "[i] = 0.0;\n"; 647 code << " }\n"; 648 } else { 649 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 650 } 651 break; 652 case CEED_EVAL_GRAD: 653 if (is_at_points) { 654 // Accumulator for point data 655 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "*dim];\n"; 656 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "; i++) {\n"; 657 code << " r_c" << var_suffix << "[i] = 0.0;\n"; 658 code << " }\n"; 659 } else if (use_3d_slices) { 660 // Accumulator for gradient slices 661 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 662 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << Q_name << "; i++) {\n"; 663 code << " r_q" << var_suffix << "[i] = 0.0;\n"; 664 code << " }\n"; 665 } else { 666 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 667 } 668 break; 669 case CEED_EVAL_WEIGHT: 670 break; 671 // LCOV_EXCL_START 672 case CEED_EVAL_DIV: 673 case CEED_EVAL_CURL: 674 break; // TODO: Not implemented 675 // LCOV_EXCL_STOP 676 } 677 } 678 679 if (is_at_points) { 680 // We need to handle batches of points 681 code << "\n // Note: Using batches of points\n"; 682 code << " const CeedInt point_loop_bound = (blockDim.x * blockDim.y) * ceil(1.0 * max_num_points / (blockDim.x * blockDim.y));\n\n"; 683 code << " #pragma unroll\n"; 684 code << " for (CeedInt i = threadIdx.x + threadIdx.y * blockDim.x; i < point_loop_bound; i += blockDim.x * blockDim.y) {\n"; 685 code << " const CeedInt p = i % max_num_points;\n\n"; 686 687 code << " // -- Coordinates\n"; 688 code << " CeedScalar r_x[dim];\n"; 689 code << " ReadPoint<dim, coords_comp_stride, max_num_points>(data, elem, p, max_num_points, points.indices, points.coords, r_x);\n\n"; 690 691 code << " // -- Input fields\n"; 692 for (CeedInt i = 0; i < num_input_fields; i++) { 693 std::string var_suffix = "_in_" + std::to_string(i); 694 695 code << " // ---- Input field " << i << "\n"; 696 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 697 // Basis action 698 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 699 switch (eval_mode) { 700 case CEED_EVAL_NONE: 701 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 702 code << " ReadPoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 703 << ", max_num_points>(data, elem, p, max_num_points, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 704 break; 705 case CEED_EVAL_INTERP: 706 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 707 code << " InterpAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << Q_name << ">(data, i, r_c" << var_suffix 708 << ", r_x, r_s" << var_suffix << ");\n"; 709 break; 710 case CEED_EVAL_GRAD: 711 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 712 code << " GradAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << Q_name << ">(data, i, r_c" << var_suffix 713 << ", r_x, r_s" << var_suffix << ");\n"; 714 break; 715 case CEED_EVAL_WEIGHT: 716 code << " CeedScalar r_s" << var_suffix << "[1];\n"; 717 code << " r_s" << var_suffix << "[0] = 1.0;\n"; 718 break; 719 // LCOV_EXCL_START 720 case CEED_EVAL_DIV: 721 case CEED_EVAL_CURL: 722 break; // TODO: Not implemented 723 // LCOV_EXCL_STOP 724 } 725 } 726 code << "\n // -- Output fields\n"; 727 for (CeedInt i = 0; i < num_output_fields; i++) { 728 std::string var_suffix = "_out_" + std::to_string(i); 729 730 code << " // ---- Output field " << i << "\n"; 731 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 732 // Basis action 733 switch (eval_mode) { 734 case CEED_EVAL_NONE: 735 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 736 break; 737 case CEED_EVAL_INTERP: 738 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 739 break; 740 case CEED_EVAL_GRAD: 741 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 742 break; 743 // LCOV_EXCL_START 744 case CEED_EVAL_WEIGHT: 745 break; // Should not occur 746 case CEED_EVAL_DIV: 747 case CEED_EVAL_CURL: 748 break; // TODO: Not implemented 749 // LCOV_EXCL_STOP 750 } 751 } 752 753 } else if (use_3d_slices) { 754 // We treat quadrature points per slice in 3d to save registers 755 code << "\n // Note: Using planes of 3D elements\n"; 756 code << " #pragma unroll\n"; 757 code << " for (CeedInt q = 0; q < " << Q_name << "; q++) {\n"; 758 code << " // -- Input fields\n"; 759 for (CeedInt i = 0; i < num_input_fields; i++) { 760 std::string var_suffix = "_in_" + std::to_string(i); 761 762 code << " // ---- Input field " << i << "\n"; 763 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 764 // Basis action 765 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 766 switch (eval_mode) { 767 case CEED_EVAL_NONE: 768 bool is_strided; 769 770 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 771 772 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 773 CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided)); 774 if (is_strided) { 775 bool has_backend_strides; 776 CeedInt num_elem, elem_size; 777 778 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 779 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 780 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 781 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 782 783 if (!has_backend_strides) { 784 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 785 } 786 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 787 code << " ReadEVecSliceStrided3d<num_comp" << var_suffix << ", " << Q_name << ", " << strides[0] << ", " << strides[1] << ", " 788 << strides[2] << ">(data, elem, q, d" << var_suffix << ", r_s" << var_suffix << ");\n"; 789 } else { 790 CeedSize l_size = 0; 791 CeedInt comp_stride; 792 CeedElemRestriction_Hip *rstr_data; 793 794 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 795 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 796 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 797 code << " // CompStride: " << comp_stride << "\n"; 798 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 799 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 800 code << " ReadEVecSliceStandard3d<num_comp" << var_suffix << ", " << comp_stride << ", " << Q_name << ">(data, l_size" << var_suffix 801 << ", elem, q, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 802 } 803 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 804 break; 805 case CEED_EVAL_INTERP: 806 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 807 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) {\n"; 808 code << " r_s" << var_suffix << "[j] = r_q" << var_suffix << "[q + j*" << Q_name << "];\n"; 809 code << " }\n"; 810 break; 811 case CEED_EVAL_GRAD: 812 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 813 code << " GradColloSlice3d<num_comp" << var_suffix << ", " << Q_name << ">(data, q, r_q" << var_suffix << ", s_G" << var_suffix 814 << ", r_s" << var_suffix << ");\n"; 815 break; 816 case CEED_EVAL_WEIGHT: 817 code << " CeedScalar r_s" << var_suffix << "[1];\n"; 818 code << " r_s" << var_suffix << "[0] = r_q" << var_suffix << "[q];\n"; 819 break; 820 // LCOV_EXCL_START 821 case CEED_EVAL_DIV: 822 case CEED_EVAL_CURL: 823 break; // TODO: Not implemented 824 // LCOV_EXCL_STOP 825 } 826 } 827 code << "\n // -- Output fields\n"; 828 for (CeedInt i = 0; i < num_output_fields; i++) { 829 std::string var_suffix = "_out_" + std::to_string(i); 830 831 code << " // ---- Output field " << i << "\n"; 832 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 833 // Basis action 834 switch (eval_mode) { 835 case CEED_EVAL_NONE: 836 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 837 break; 838 case CEED_EVAL_INTERP: 839 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 840 break; 841 case CEED_EVAL_GRAD: 842 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 843 break; 844 // LCOV_EXCL_START 845 case CEED_EVAL_WEIGHT: 846 break; // Should not occur 847 case CEED_EVAL_DIV: 848 case CEED_EVAL_CURL: 849 break; // TODO: Not implemented 850 // LCOV_EXCL_STOP 851 } 852 } 853 } else { 854 code << "\n // Note: Using full elements\n"; 855 code << " {\n"; 856 code << " // -- Input fields\n"; 857 for (CeedInt i = 0; i < num_input_fields; i++) { 858 code << " // ---- Input field " << i << "\n"; 859 code << " CeedScalar *r_s_in_" << i << " = r_q_in_" << i << ";\n"; 860 } 861 code << " // -- Output fields\n"; 862 for (CeedInt i = 0; i < num_output_fields; i++) { 863 code << " // ---- Output field " << i << "\n"; 864 code << " CeedScalar *r_s_out_" << i << " = r_q_out_" << i << ";\n"; 865 } 866 } 867 868 // Input and output buffers 869 code << "\n // -- QFunction inputs and outputs\n"; 870 code << " // ---- Inputs\n"; 871 code << " CeedScalar *inputs[" << CeedIntMax(num_input_fields, 1) << "];\n"; 872 for (CeedInt i = 0; i < num_input_fields; i++) { 873 code << " // ------ Input field " << i << "\n"; 874 code << " inputs[" << i << "] = r_s_in_" << i << ";\n"; 875 } 876 code << " // ---- Outputs\n"; 877 code << " CeedScalar *outputs[" << CeedIntMax(num_output_fields, 1) << "];\n"; 878 for (CeedInt i = 0; i < num_output_fields; i++) { 879 code << " // ------ Output field " << i << "\n"; 880 code << " outputs[" << i << "] = r_s_out_" << i << ";\n"; 881 } 882 883 // Apply QFunction 884 code << "\n // -- Apply QFunction\n"; 885 code << " " << qfunction_name << "(ctx, "; 886 if (dim != 3 || is_at_points || use_3d_slices || !is_tensor) { 887 code << "1"; 888 } else { 889 code << Q_name; 890 } 891 code << ", inputs, outputs);\n"; 892 893 if (is_at_points) { 894 // Map back to coefficients 895 code << "\n // -- Output fields\n"; 896 for (CeedInt i = 0; i < num_output_fields; i++) { 897 std::string var_suffix = "_out_" + std::to_string(i); 898 std::string P_name = "P_1d" + var_suffix; 899 900 code << " // ---- Output field " << i << "\n"; 901 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 902 // Basis action 903 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 904 switch (eval_mode) { 905 case CEED_EVAL_NONE: { 906 CeedInt comp_stride; 907 CeedElemRestriction elem_rstr; 908 909 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 910 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 911 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 912 code << " const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 913 code << " WritePoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 914 << ", max_num_points>(data, elem, i, points.num_per_elem[elem], indices.outputs[" << i << "]" 915 << ", r_s" << var_suffix << ", d" << var_suffix << ");\n"; 916 break; 917 } 918 case CEED_EVAL_INTERP: 919 code << " if (i >= points.num_per_elem[elem]) {\n"; 920 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 921 code << " }\n"; 922 code << " InterpTransposeAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << Q_name << ">(data, i, r_s" 923 << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 924 break; 925 case CEED_EVAL_GRAD: 926 code << " if (i >= points.num_per_elem[elem]) {\n"; 927 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "*dim; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 928 code << " }\n"; 929 code << " GradTransposeAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << Q_name << ">(data, i, r_s" 930 << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 931 break; 932 // LCOV_EXCL_START 933 case CEED_EVAL_WEIGHT: 934 break; // Should not occur 935 case CEED_EVAL_DIV: 936 case CEED_EVAL_CURL: 937 break; // TODO: Not implemented 938 // LCOV_EXCL_STOP 939 } 940 } 941 } else if (use_3d_slices) { 942 // Copy or apply transpose grad, if needed 943 code << "\n // -- Output fields\n"; 944 for (CeedInt i = 0; i < num_output_fields; i++) { 945 std::string var_suffix = "_out_" + std::to_string(i); 946 std::string P_name = "P_1d" + var_suffix; 947 948 code << " // ---- Output field " << i << "\n"; 949 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 950 // Basis action 951 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 952 switch (eval_mode) { 953 case CEED_EVAL_NONE: 954 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 955 code << " r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 956 code << " }\n"; 957 break; 958 case CEED_EVAL_INTERP: 959 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 960 code << " r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 961 code << " }\n"; 962 break; 963 case CEED_EVAL_GRAD: 964 code << " GradColloSliceTranspose3d<num_comp" << var_suffix << ", " << Q_name << ">(data, q, r_s" << var_suffix << ", s_G" 965 << var_suffix << ", r_q" << var_suffix << ");\n"; 966 break; 967 // LCOV_EXCL_START 968 case CEED_EVAL_WEIGHT: 969 break; // Should not occur 970 case CEED_EVAL_DIV: 971 case CEED_EVAL_CURL: 972 break; // TODO: Not implemented 973 // LCOV_EXCL_STOP 974 } 975 } 976 } 977 code << " }\n"; 978 return CEED_ERROR_SUCCESS; 979 } 980 981 //------------------------------------------------------------------------------ 982 // Build single operator kernel 983 //------------------------------------------------------------------------------ 984 extern "C" int CeedOperatorBuildKernel_Hip_gen(CeedOperator op, bool *is_good_build) { 985 bool is_tensor = true, is_at_points = false, use_3d_slices = false; 986 Ceed ceed; 987 CeedInt Q_1d, num_input_fields, num_output_fields, dim = 1, max_num_points = 0, coords_comp_stride = 0; 988 CeedQFunctionField *qf_input_fields, *qf_output_fields; 989 CeedQFunction_Hip_gen *qf_data; 990 CeedQFunction qf; 991 CeedOperatorField *op_input_fields, *op_output_fields; 992 CeedOperator_Hip_gen *data; 993 std::ostringstream code; 994 995 CeedCallBackend(CeedOperatorGetData(op, &data)); 996 { 997 bool is_setup_done; 998 999 CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done)); 1000 if (is_setup_done) { 1001 *is_good_build = !data->use_fallback; 1002 return CEED_ERROR_SUCCESS; 1003 } 1004 } 1005 1006 // Check field compatibility 1007 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 1008 { 1009 bool has_shared_bases = true, is_all_tensor = true, is_all_nontensor = true; 1010 1011 for (CeedInt i = 0; i < num_input_fields; i++) { 1012 CeedBasis basis; 1013 1014 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 1015 if (basis != CEED_BASIS_NONE) { 1016 bool is_tensor = true; 1017 const char *resource; 1018 char *resource_root; 1019 Ceed basis_ceed; 1020 1021 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1022 is_all_tensor = is_all_tensor && is_tensor; 1023 is_all_nontensor = is_all_nontensor && !is_tensor; 1024 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1025 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1026 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1027 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/hip/shared"); 1028 CeedCallBackend(CeedFree(&resource_root)); 1029 CeedCallBackend(CeedDestroy(&basis_ceed)); 1030 } 1031 CeedCallBackend(CeedBasisDestroy(&basis)); 1032 } 1033 1034 for (CeedInt i = 0; i < num_output_fields; i++) { 1035 CeedBasis basis; 1036 1037 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 1038 if (basis != CEED_BASIS_NONE) { 1039 bool is_tensor = true; 1040 const char *resource; 1041 char *resource_root; 1042 Ceed basis_ceed; 1043 1044 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1045 is_all_tensor = is_all_tensor && is_tensor; 1046 is_all_nontensor = is_all_nontensor && !is_tensor; 1047 1048 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1049 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1050 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1051 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/hip/shared"); 1052 CeedCallBackend(CeedFree(&resource_root)); 1053 CeedCallBackend(CeedDestroy(&basis_ceed)); 1054 } 1055 CeedCallBackend(CeedBasisDestroy(&basis)); 1056 } 1057 // -- Fallback to ref if not all bases are shared 1058 if (!has_shared_bases || (!is_all_tensor && !is_all_nontensor)) { 1059 *is_good_build = false; 1060 return CEED_ERROR_SUCCESS; 1061 } 1062 } 1063 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1064 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 1065 CeedCallBackend(CeedQFunctionGetData(qf, &qf_data)); 1066 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 1067 1068 // Get operator data 1069 CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points)); 1070 CeedCallBackend(CeedOperatorBuildKernelData_Hip_gen(ceed, num_input_fields, op_input_fields, qf_input_fields, num_output_fields, op_output_fields, 1071 qf_output_fields, &data->max_P_1d, &Q_1d, &dim, &is_tensor, &use_3d_slices)); 1072 if (dim == 0) dim = 1; 1073 data->dim = dim; 1074 if (is_at_points) { 1075 CeedElemRestriction_Hip *rstr_data; 1076 CeedElemRestriction rstr_points = NULL; 1077 1078 CeedCallBackend(CeedOperatorAtPointsGetPoints(op, &rstr_points, NULL)); 1079 CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr_points, &max_num_points)); 1080 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr_points, &coords_comp_stride)); 1081 CeedCallBackend(CeedElemRestrictionGetData(rstr_points, &rstr_data)); 1082 data->points.indices = (CeedInt *)rstr_data->d_offsets; 1083 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_points)); 1084 } 1085 if (is_at_points) use_3d_slices = false; 1086 if (Q_1d == 0) { 1087 if (is_at_points) Q_1d = max_num_points; 1088 else CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q_1d)); 1089 } 1090 data->Q_1d = Q_1d; 1091 1092 // Check for restriction only identity operator 1093 { 1094 bool is_identity_qf; 1095 1096 CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf)); 1097 if (is_identity_qf) { 1098 CeedEvalMode eval_mode_in, eval_mode_out; 1099 1100 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in)); 1101 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out)); 1102 CeedCheck(eval_mode_in != CEED_EVAL_NONE || eval_mode_out != CEED_EVAL_NONE, ceed, CEED_ERROR_BACKEND, 1103 "Backend does not implement restriction only identity operators"); 1104 } 1105 } 1106 1107 // Load basis source files 1108 if (is_tensor) { 1109 code << "// Tensor basis source\n"; 1110 code << "#include <ceed/jit-source/hip/hip-shared-basis-tensor-templates.h>\n\n"; 1111 } else { 1112 code << "// Non-tensor basis source\n"; 1113 code << "#include <ceed/jit-source/hip/hip-shared-basis-nontensor-templates.h>\n\n"; 1114 } 1115 if (is_at_points) { 1116 code << "// AtPoints basis source\n"; 1117 code << "#include <ceed/jit-source/hip/hip-shared-basis-tensor-at-points-templates.h>\n\n"; 1118 } 1119 code << "// CodeGen operator source\n"; 1120 code << "#include <ceed/jit-source/hip/hip-gen-templates.h>\n\n"; 1121 1122 // Get QFunction name 1123 std::string qfunction_name(qf_data->qfunction_name); 1124 std::string operator_name; 1125 1126 operator_name = "CeedKernelHipGenOperator_" + qfunction_name; 1127 1128 // Define CEED_Q_VLA 1129 code << "\n#undef CEED_Q_VLA\n"; 1130 if (dim != 3 || is_at_points || use_3d_slices || !is_tensor) { 1131 code << "#define CEED_Q_VLA 1\n\n"; 1132 } else { 1133 code << "#define CEED_Q_VLA " << Q_1d << "\n\n"; 1134 } 1135 1136 // Add user QFunction source 1137 { 1138 const char *source_path; 1139 1140 CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path)); 1141 CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/hip/gen backend requires QFunction source code file"); 1142 1143 code << "// User QFunction source\n"; 1144 code << "#include \"" << source_path << "\"\n\n"; 1145 } 1146 1147 // Setup 1148 code << "\n// -----------------------------------------------------------------------------\n"; 1149 code << "// Operator Kernel\n"; 1150 code << "// \n"; 1151 code << "// d_[in,out]_i: CeedVector device array\n"; 1152 code << "// r_[in,out]_e_i: Element vector register\n"; 1153 code << "// r_[in,out]_q_i: Quadrature space vector register\n"; 1154 code << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n"; 1155 code << "// r_[in,out]_s_i: Quadrature space slice vector register\n"; 1156 code << "// \n"; 1157 code << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n"; 1158 code << "// s_G_[in,out]_i: Gradient matrix, shared memory\n"; 1159 code << "// -----------------------------------------------------------------------------\n"; 1160 code << "\nextern \"C\" __launch_bounds__(BLOCK_SIZE)\n"; 1161 code << "__global__ void " << operator_name 1162 << "(CeedInt num_elem, void* ctx, FieldsInt_Hip indices, Fields_Hip fields, Fields_Hip B, Fields_Hip G, CeedScalar* W, Points_Hip points) {\n"; 1163 1164 // Scratch buffers 1165 for (CeedInt i = 0; i < num_input_fields; i++) { 1166 CeedEvalMode eval_mode; 1167 1168 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 1169 if (eval_mode != CEED_EVAL_WEIGHT) { // Skip CEED_EVAL_WEIGHT 1170 code << " const CeedScalar *d_in_" << i << " = fields.inputs[" << i << "];\n"; 1171 } 1172 } 1173 for (CeedInt i = 0; i < num_output_fields; i++) { 1174 code << " CeedScalar *d_out_" << i << " = fields.outputs[" << i << "];\n"; 1175 } 1176 1177 code << " const CeedInt dim = " << dim << ";\n"; 1178 code << " const CeedInt " << (is_tensor ? "Q_1d" : "Q") << " = " << Q_1d << ";\n"; 1179 if (is_at_points) { 1180 code << " const CeedInt max_num_points = " << max_num_points << ";\n"; 1181 code << " const CeedInt coords_comp_stride = " << coords_comp_stride << ";\n"; 1182 } 1183 1184 // Shared data 1185 code << " extern __shared__ CeedScalar slice[];\n"; 1186 code << " SharedData_Hip data;\n"; 1187 code << " data.t_id_x = threadIdx.x;\n"; 1188 code << " data.t_id_y = threadIdx.y;\n"; 1189 code << " data.t_id_z = threadIdx.z;\n"; 1190 code << " data.t_id = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n"; 1191 code << " data.slice = slice + data.t_id_z*T_1D" << ((!is_tensor || dim == 1) ? "" : "*T_1D") << ";\n"; 1192 1193 // -- Determine input mat reuse 1194 FieldReuse_Hip input_matrix_reuse[CEED_FIELD_MAX]; 1195 1196 for (CeedInt i = 0; i < num_input_fields; i++) { 1197 input_matrix_reuse[i].index = -1; 1198 } 1199 for (CeedInt i = 0; i < num_input_fields; i++) { 1200 CeedEvalMode eval_mode_i; 1201 CeedBasis basis_i; 1202 1203 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i)); 1204 if (eval_mode_i == CEED_EVAL_WEIGHT) continue; 1205 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i)); 1206 for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) { 1207 CeedEvalMode eval_mode_j; 1208 CeedBasis basis_j; 1209 1210 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1211 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1212 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1213 if (basis_i == basis_j) { 1214 if (is_tensor) { 1215 input_matrix_reuse[i].index = j; 1216 input_matrix_reuse[i].is_input = true; 1217 input_matrix_reuse[i].eval_mode = eval_mode_j; 1218 } else { 1219 // For non-tensor can only re-use with the same eval mode 1220 if (eval_mode_i == eval_mode_j) { 1221 input_matrix_reuse[i].index = j; 1222 input_matrix_reuse[i].is_input = true; 1223 input_matrix_reuse[i].eval_mode = eval_mode_j; 1224 } 1225 } 1226 } 1227 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1228 } 1229 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1230 } 1231 1232 // -- Determine output mat reuse 1233 FieldReuse_Hip output_matrix_reuse[CEED_FIELD_MAX]; 1234 1235 for (CeedInt i = 0; i < num_output_fields; i++) { 1236 output_matrix_reuse[i].index = -1; 1237 } 1238 for (CeedInt i = 0; i < num_output_fields; i++) { 1239 CeedEvalMode eval_mode_i; 1240 CeedBasis basis_i; 1241 1242 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i)); 1243 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i)); 1244 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) { 1245 CeedEvalMode eval_mode_j; 1246 CeedBasis basis_j; 1247 1248 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1249 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1250 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1251 if (basis_i == basis_j) { 1252 if (is_tensor) { 1253 output_matrix_reuse[i].index = j; 1254 output_matrix_reuse[i].is_input = true; 1255 output_matrix_reuse[i].eval_mode = eval_mode_j; 1256 } else { 1257 // For non-tensor can only re-use with the same eval mode 1258 if (eval_mode_i == eval_mode_j) { 1259 output_matrix_reuse[i].index = j; 1260 output_matrix_reuse[i].is_input = true; 1261 output_matrix_reuse[i].eval_mode = eval_mode_j; 1262 } 1263 } 1264 } 1265 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1266 } 1267 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) { 1268 CeedEvalMode eval_mode_j; 1269 CeedBasis basis_j; 1270 1271 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j)); 1272 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1273 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j)); 1274 if (basis_i == basis_j) { 1275 if (is_tensor) { 1276 output_matrix_reuse[i].index = j; 1277 output_matrix_reuse[i].is_input = false; 1278 output_matrix_reuse[i].eval_mode = eval_mode_j; 1279 } else { 1280 // For non-tensor can only re-use with the same eval mode 1281 if (eval_mode_i == eval_mode_j) { 1282 output_matrix_reuse[i].index = j; 1283 output_matrix_reuse[i].is_input = false; 1284 output_matrix_reuse[i].eval_mode = eval_mode_j; 1285 } 1286 } 1287 } 1288 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1289 } 1290 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1291 } 1292 1293 // Initialize constants, and matrices B and G 1294 code << "\n // Input field constants and basis data\n"; 1295 for (CeedInt i = 0; i < num_input_fields; i++) { 1296 CeedCallBackend(CeedOperatorBuildKernelFieldData_Hip_gen(code, data, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i], Q_1d, true, 1297 is_tensor, is_at_points, use_3d_slices)); 1298 } 1299 code << "\n // Output field constants and basis data\n"; 1300 for (CeedInt i = 0; i < num_output_fields; i++) { 1301 CeedCallBackend(CeedOperatorBuildKernelFieldData_Hip_gen(code, data, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i], Q_1d, 1302 false, is_tensor, is_at_points, use_3d_slices)); 1303 } 1304 1305 // Loop over all elements 1306 code << "\n // Element loop\n"; 1307 code << " __syncthreads();\n"; 1308 code << " for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n"; 1309 1310 // -- Compute minimum buffer space needed 1311 CeedInt max_rstr_buffer_size = 1; 1312 1313 for (CeedInt i = 0; i < num_input_fields; i++) { 1314 CeedInt num_comp, elem_size; 1315 CeedElemRestriction elem_rstr; 1316 1317 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 1318 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1319 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 1320 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_tensor && (dim >= 3) ? elem_size : 1)); 1321 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1322 } 1323 for (CeedInt i = 0; i < num_output_fields; i++) { 1324 CeedInt num_comp, elem_size; 1325 CeedElemRestriction elem_rstr; 1326 1327 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 1328 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1329 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 1330 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_tensor && (dim >= 3) ? elem_size : 1)); 1331 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1332 } 1333 code << " // Scratch restriction buffer space\n"; 1334 code << " CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n"; 1335 1336 // -- Determine best input field processing order 1337 CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX]; 1338 1339 for (CeedInt i = 0; i < num_input_fields; i++) { 1340 field_rstr_in_buffer[i] = -1; 1341 input_field_order[i] = -1; 1342 } 1343 { 1344 bool is_ordered[CEED_FIELD_MAX]; 1345 CeedInt curr_index = 0; 1346 1347 for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false; 1348 for (CeedInt i = 0; i < num_input_fields; i++) { 1349 CeedVector vec_i; 1350 CeedElemRestriction rstr_i; 1351 1352 if (is_ordered[i]) continue; 1353 field_rstr_in_buffer[i] = i; 1354 is_ordered[i] = true; 1355 input_field_order[curr_index] = i; 1356 curr_index++; 1357 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i)); 1358 if (vec_i == CEED_VECTOR_NONE) continue; // CEED_EVAL_WEIGHT 1359 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i)); 1360 for (CeedInt j = i + 1; j < num_input_fields; j++) { 1361 CeedVector vec_j; 1362 CeedElemRestriction rstr_j; 1363 1364 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j)); 1365 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j)); 1366 if (rstr_i == rstr_j && vec_i == vec_j) { 1367 field_rstr_in_buffer[j] = i; 1368 is_ordered[j] = true; 1369 input_field_order[curr_index] = j; 1370 curr_index++; 1371 } 1372 CeedCallBackend(CeedVectorDestroy(&vec_j)); 1373 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j)); 1374 } 1375 CeedCallBackend(CeedVectorDestroy(&vec_i)); 1376 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i)); 1377 } 1378 } 1379 1380 // -- Input restriction and basis 1381 code << "\n // -- Input field restrictions and basis actions\n"; 1382 for (CeedInt i = 0; i < num_input_fields; i++) { 1383 CeedInt f = input_field_order[i]; 1384 1385 code << " // ---- Input field " << f << "\n"; 1386 1387 // ---- Restriction 1388 CeedCallBackend(CeedOperatorBuildKernelRestriction_Hip_gen(code, data, f, dim, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f], Q_1d, 1389 true, is_tensor, is_at_points, use_3d_slices)); 1390 1391 // ---- Basis action 1392 CeedCallBackend(CeedOperatorBuildKernelBasis_Hip_gen(code, data, f, dim, op_input_fields[f], qf_input_fields[f], Q_1d, true, is_tensor, 1393 is_at_points, use_3d_slices)); 1394 } 1395 1396 // -- Q function 1397 CeedCallBackend(CeedOperatorBuildKernelQFunction_Hip_gen(code, data, dim, max_num_points, num_input_fields, op_input_fields, qf_input_fields, 1398 num_output_fields, op_output_fields, qf_output_fields, qfunction_name, Q_1d, is_tensor, 1399 is_at_points, use_3d_slices)); 1400 1401 // -- Output basis and restriction 1402 code << "\n // -- Output field basis action and restrictions\n"; 1403 for (CeedInt i = 0; i < num_output_fields; i++) { 1404 code << " // ---- Output field " << i << "\n"; 1405 1406 // ---- Basis action 1407 CeedCallBackend(CeedOperatorBuildKernelBasis_Hip_gen(code, data, i, dim, op_output_fields[i], qf_output_fields[i], Q_1d, false, is_tensor, 1408 is_at_points, use_3d_slices)); 1409 1410 // ---- Restriction 1411 CeedCallBackend(CeedOperatorBuildKernelRestriction_Hip_gen(code, data, i, dim, NULL, op_output_fields[i], qf_output_fields[i], Q_1d, false, 1412 is_tensor, is_at_points, use_3d_slices)); 1413 } 1414 1415 // Close loop and function 1416 code << " }\n"; 1417 code << "}\n"; 1418 code << "// -----------------------------------------------------------------------------\n\n"; 1419 1420 CeedInt block_sizes[3] = {0, 0, 0}; 1421 CeedInt num_elem; 1422 1423 // Compile 1424 CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem)); 1425 CeedCallBackend(BlockGridCalculate_Hip_gen(is_tensor ? dim : 1, num_elem, data->max_P_1d, Q_1d, block_sizes)); 1426 { 1427 bool is_compile_good = false; 1428 1429 CeedCallBackend(CeedTryCompile_Hip(ceed, code.str().c_str(), &is_compile_good, &data->module, 2, "T_1D", block_sizes[0], "BLOCK_SIZE", 1430 block_sizes[0] * block_sizes[1] * block_sizes[2])); 1431 if (is_compile_good) { 1432 *is_good_build = true; 1433 CeedCallBackend(CeedGetKernel_Hip(ceed, data->module, operator_name.c_str(), &data->op)); 1434 } else { 1435 *is_good_build = false; 1436 data->use_fallback = true; 1437 } 1438 } 1439 CeedCallBackend(CeedOperatorSetSetupDone(op)); 1440 CeedCallBackend(CeedDestroy(&ceed)); 1441 CeedCallBackend(CeedQFunctionDestroy(&qf)); 1442 return CEED_ERROR_SUCCESS; 1443 } 1444 1445 //------------------------------------------------------------------------------ 1446