1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #define CEED_DEBUG_COLOR 12 9 10 #include <ceed.h> 11 #include <ceed/backend.h> 12 #include <ceed/jit-tools.h> 13 14 #include <iostream> 15 #include <sstream> 16 #include <string> 17 18 #include "../hip-ref/ceed-hip-ref.h" 19 #include "../hip-shared/ceed-hip-shared.h" 20 #include "../hip/ceed-hip-common.h" 21 #include "../hip/ceed-hip-compile.h" 22 #include "ceed-hip-gen.h" 23 24 struct FieldReuse_Hip { 25 CeedInt index; 26 bool is_input; 27 CeedEvalMode eval_mode; 28 }; 29 30 //------------------------------------------------------------------------------ 31 // Calculate the block size used for launching the operator kernel 32 //------------------------------------------------------------------------------ 33 extern "C" int BlockGridCalculate_Hip_gen(const CeedInt dim, const CeedInt num_elem, const CeedInt P_1d, const CeedInt Q_1d, CeedInt *block_sizes) { 34 const CeedInt thread_1d = CeedIntMax(Q_1d, P_1d); 35 if (dim == 1) { 36 CeedInt elems_per_block = 64 * thread_1d > 256 ? 256 / thread_1d : 64; 37 38 elems_per_block = elems_per_block > 0 ? elems_per_block : 1; 39 block_sizes[0] = thread_1d; 40 block_sizes[1] = 1; 41 block_sizes[2] = elems_per_block; 42 } else if (dim == 2) { 43 const CeedInt elems_per_block = thread_1d < 4 ? 16 : 2; 44 45 block_sizes[0] = thread_1d; 46 block_sizes[1] = thread_1d; 47 block_sizes[2] = elems_per_block; 48 } else if (dim == 3) { 49 const CeedInt elems_per_block = thread_1d < 6 ? 4 : (thread_1d < 8 ? 2 : 1); 50 51 block_sizes[0] = thread_1d; 52 block_sizes[1] = thread_1d; 53 block_sizes[2] = elems_per_block; 54 } 55 return CEED_ERROR_SUCCESS; 56 } 57 58 //------------------------------------------------------------------------------ 59 // Determine type of operator 60 //------------------------------------------------------------------------------ 61 static int CeedOperatorBuildKernelData_Hip_gen(Ceed ceed, CeedInt num_input_fields, CeedOperatorField *op_input_fields, 62 CeedQFunctionField *qf_input_fields, CeedInt num_output_fields, CeedOperatorField *op_output_fields, 63 CeedQFunctionField *qf_output_fields, CeedInt *max_P_1d, CeedInt *Q_1d, CeedInt *dim, bool *is_tensor, 64 bool *use_3d_slices) { 65 // Find dim, P_1d, Q_1d 66 *max_P_1d = 0; 67 *Q_1d = 0; 68 *dim = 0; 69 *is_tensor = true; 70 71 for (CeedInt i = 0; i < num_input_fields; i++) { 72 CeedBasis basis; 73 74 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 75 if (basis != CEED_BASIS_NONE) { 76 bool is_field_tensor; 77 CeedInt field_P_1d = 0, field_Q_1d = 0, field_dim = 0; 78 79 // Collect dim, P_1d, and Q_1d 80 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 81 *is_tensor = *is_tensor && is_field_tensor; 82 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 83 else CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P_1d)); 84 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 85 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 86 CeedCheck(*dim == 0 || field_dim == *dim, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 87 *dim = field_dim; 88 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 89 else CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q_1d)); 90 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 91 *Q_1d = field_Q_1d; 92 } 93 CeedCallBackend(CeedBasisDestroy(&basis)); 94 } 95 for (CeedInt i = 0; i < num_output_fields; i++) { 96 CeedBasis basis; 97 98 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 99 if (basis != CEED_BASIS_NONE) { 100 bool is_field_tensor; 101 CeedInt field_P_1d = 0, field_Q_1d = 0, field_dim = 0; 102 103 // Collect dim, P_1d, and Q_1d 104 CeedCallBackend(CeedBasisIsTensor(basis, &is_field_tensor)); 105 *is_tensor = *is_tensor && is_field_tensor; 106 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &field_P_1d)); 107 else CeedCallBackend(CeedBasisGetNumNodes(basis, &field_P_1d)); 108 *max_P_1d = CeedIntMax(*max_P_1d, field_P_1d); 109 CeedCallBackend(CeedBasisGetDimension(basis, &field_dim)); 110 CeedCheck(*dim == 0 || field_dim == *dim, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 111 *dim = field_dim; 112 if (is_field_tensor) CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &field_Q_1d)); 113 else CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &field_Q_1d)); 114 CeedCheck(*Q_1d == 0 || field_Q_1d == *Q_1d, ceed, CEED_ERROR_BACKEND, "Quadrature spaces must be compatible"); 115 *Q_1d = field_Q_1d; 116 } 117 CeedCallBackend(CeedBasisDestroy(&basis)); 118 } 119 120 // Only use 3D collocated gradient parallelization strategy when gradient is computed 121 *use_3d_slices = false; 122 if (*dim == 3) { 123 bool was_grad_found = false; 124 125 for (CeedInt i = 0; i < num_input_fields; i++) { 126 CeedEvalMode eval_mode; 127 128 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 129 if (eval_mode == CEED_EVAL_GRAD) { 130 CeedBasis_Hip_shared *basis_data; 131 CeedBasis basis; 132 133 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 134 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 135 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 136 was_grad_found = true; 137 CeedCallBackend(CeedBasisDestroy(&basis)); 138 } 139 } 140 for (CeedInt i = 0; i < num_output_fields; i++) { 141 CeedEvalMode eval_mode; 142 143 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 144 if (eval_mode == CEED_EVAL_GRAD) { 145 CeedBasis_Hip_shared *basis_data; 146 CeedBasis basis; 147 148 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 149 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 150 *use_3d_slices = basis_data->d_collo_grad_1d && (was_grad_found ? *use_3d_slices : true); 151 was_grad_found = true; 152 CeedCallBackend(CeedBasisDestroy(&basis)); 153 } 154 } 155 } 156 return CEED_ERROR_SUCCESS; 157 } 158 159 //------------------------------------------------------------------------------ 160 // Setup fields 161 //------------------------------------------------------------------------------ 162 static int CeedOperatorBuildKernelFieldData_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedOperatorField op_field, 163 CeedQFunctionField qf_field, FieldReuse_Hip field_reuse, CeedInt Q_1d, bool is_input, 164 bool is_tensor, bool is_at_points, bool use_3d_slices) { 165 const char *field_name; 166 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 167 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 168 std::string option_name = (is_input ? "inputs" : "outputs"); 169 CeedEvalMode eval_mode = CEED_EVAL_NONE; 170 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 171 CeedElemRestriction elem_rstr; 172 CeedBasis_Hip_shared *basis_data; 173 CeedBasis basis; 174 175 // Field reuse info 176 bool use_previous_field = field_reuse.index != -1; 177 178 CeedCallBackend(CeedOperatorFieldGetName(op_field, &field_name)); 179 code << " // -- " << (is_input ? "Input" : "Output") << " field " << i << ": " << field_name << "\n"; 180 181 // Get field data 182 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 183 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 184 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 185 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 186 } 187 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 188 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 189 if (basis != CEED_BASIS_NONE) { 190 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 191 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 192 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 193 } 194 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 195 196 // Set field constants 197 if (eval_mode != CEED_EVAL_WEIGHT) { 198 code << " const CeedInt " << P_name << " = " << (basis == CEED_BASIS_NONE ? Q_1d : P_1d) << ";\n"; 199 code << " const CeedInt num_comp" << var_suffix << " = " << num_comp << ";\n"; 200 } 201 202 // Load basis data 203 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 204 switch (eval_mode) { 205 case CEED_EVAL_NONE: 206 break; 207 case CEED_EVAL_INTERP: 208 if (is_at_points) { 209 // AtPoints 210 if (!basis_data->d_chebyshev_interp_1d) { 211 CeedSize interp_bytes; 212 CeedScalar *chebyshev_interp_1d; 213 214 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 215 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 216 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 217 CeedCallHip(CeedBasisReturnCeed(basis), hipMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 218 CeedCallHip(CeedBasisReturnCeed(basis), 219 hipMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, hipMemcpyHostToDevice)); 220 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 221 } 222 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 223 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 224 } else { 225 // Standard quadrature 226 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 227 else data->B.outputs[i] = basis_data->d_interp_1d; 228 } 229 if (use_previous_field) { 230 std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 231 232 code << " CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 233 } else { 234 code << " __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 235 code << " LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 236 } 237 break; 238 case CEED_EVAL_GRAD: 239 if (is_at_points) { 240 // AtPoints 241 if (!basis_data->d_chebyshev_interp_1d) { 242 CeedSize interp_bytes; 243 CeedScalar *chebyshev_interp_1d; 244 245 interp_bytes = P_1d * Q_1d * sizeof(CeedScalar); 246 CeedCallBackend(CeedCalloc(P_1d * Q_1d, &chebyshev_interp_1d)); 247 CeedCallBackend(CeedBasisGetChebyshevInterp1D(basis, chebyshev_interp_1d)); 248 CeedCallHip(CeedBasisReturnCeed(basis), hipMalloc((void **)&basis_data->d_chebyshev_interp_1d, interp_bytes)); 249 CeedCallHip(CeedBasisReturnCeed(basis), 250 hipMemcpy(basis_data->d_chebyshev_interp_1d, chebyshev_interp_1d, interp_bytes, hipMemcpyHostToDevice)); 251 CeedCallBackend(CeedFree(&chebyshev_interp_1d)); 252 } 253 if (is_input) data->B.inputs[i] = basis_data->d_chebyshev_interp_1d; 254 else data->B.outputs[i] = basis_data->d_chebyshev_interp_1d; 255 } else { 256 // Standard quadrature 257 if (is_input) data->B.inputs[i] = basis_data->d_interp_1d; 258 else data->B.outputs[i] = basis_data->d_interp_1d; 259 } 260 if (is_tensor) { 261 if (use_previous_field) { 262 std::string reuse_var = "s_B" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 263 264 code << " CeedScalar *s_B" << var_suffix << " = " << reuse_var << ";\n"; 265 } else { 266 code << " __shared__ CeedScalar s_B" << var_suffix << "[" << P_name << "*" << Q_name << "];\n"; 267 code << " LoadMatrix<" << P_name << ", " << Q_name << ">(data, B." << option_name << "[" << i << "], s_B" << var_suffix << ");\n"; 268 } 269 } 270 if (is_at_points) break; // No G mat for AtPoints 271 if (use_3d_slices) { 272 if (is_input) data->G.inputs[i] = basis_data->d_collo_grad_1d; 273 else data->G.outputs[i] = basis_data->d_collo_grad_1d; 274 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 275 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 276 277 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 278 } else { 279 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 280 code << " LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 281 } 282 } else { 283 bool has_collo_grad = basis_data->d_collo_grad_1d; 284 285 if (is_input) data->G.inputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 286 else data->G.outputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 287 if (has_collo_grad) { 288 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 289 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 290 291 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 292 } else { 293 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << Q_name << "*" << Q_name << "];\n"; 294 code << " LoadMatrix<" << Q_name << ", " << Q_name << ">(data, G." << option_name << "[" << i << "], s_G" << var_suffix << ");\n"; 295 } 296 } else { 297 if (use_previous_field && field_reuse.eval_mode == CEED_EVAL_GRAD) { 298 std::string reuse_var = "s_G" + ((field_reuse.is_input ? "_in_" : "_out_") + std::to_string(field_reuse.index)); 299 300 code << " CeedScalar *s_G" << var_suffix << " = " << reuse_var << ";\n"; 301 } else { 302 code << " __shared__ CeedScalar s_G" << var_suffix << "[" << P_name << "*" << Q_name << (is_tensor ? "" : "*dim") << "];\n"; 303 code << " LoadMatrix<" << P_name << ", " << Q_name << (is_tensor ? "" : "*dim") << ">(data, G." << option_name << "[" << i << "], s_G" 304 << var_suffix << ");\n"; 305 } 306 } 307 } 308 break; 309 case CEED_EVAL_WEIGHT: 310 break; // No action 311 // LCOV_EXCL_START 312 case CEED_EVAL_DIV: 313 case CEED_EVAL_CURL: 314 break; // TODO: Not implemented 315 // LCOV_EXCL_STOP 316 } 317 CeedCallBackend(CeedBasisDestroy(&basis)); 318 return CEED_ERROR_SUCCESS; 319 } 320 321 //------------------------------------------------------------------------------ 322 // Restriction 323 //------------------------------------------------------------------------------ 324 static int CeedOperatorBuildKernelRestriction_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedInt dim, 325 CeedInt field_input_buffer[], CeedOperatorField op_field, CeedQFunctionField qf_field, 326 CeedInt Q_1d, bool is_input, bool is_tensor, bool is_at_points, bool use_3d_slices) { 327 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 328 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix; 329 CeedEvalMode eval_mode = CEED_EVAL_NONE; 330 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 331 CeedSize l_size; 332 CeedRestrictionType rstr_type = CEED_RESTRICTION_STANDARD; 333 CeedElemRestriction_Hip *rstr_data; 334 CeedElemRestriction elem_rstr; 335 CeedBasis basis; 336 337 // Get field data 338 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 339 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 340 CeedCallBackend(CeedElemRestrictionGetType(elem_rstr, &rstr_type)); 341 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 342 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 343 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 344 } 345 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 346 if (basis != CEED_BASIS_NONE) { 347 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 348 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 349 } 350 CeedCallBackend(CeedBasisDestroy(&basis)); 351 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 352 353 // Restriction 354 if (is_input) { 355 // Input 356 if (field_input_buffer[i] != i) { 357 std::string buffer_name = "r_e_in_" + std::to_string(field_input_buffer[i]); 358 359 // Restriction was already done for previous input 360 code << " CeedScalar *r_e" << var_suffix << " = " << buffer_name << ";\n"; 361 } else if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_3d_slices && is_at_points)) { 362 if (eval_mode == CEED_EVAL_NONE && rstr_type != CEED_RESTRICTION_POINTS) { 363 // No basis action, so r_e_in_* in also r_q_in_* and needs to be allocated 364 code << " CeedScalar r_e" << var_suffix << "[num_comp" << var_suffix << "*" << P_name << "];\n"; 365 } else if (rstr_type != CEED_RESTRICTION_POINTS) { 366 // Otherwise we're using the scratch space 367 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 368 } 369 switch (rstr_type) { 370 case CEED_RESTRICTION_STANDARD: { 371 CeedInt comp_stride; 372 373 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 374 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 375 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 376 code << " // CompStride: " << comp_stride << "\n"; 377 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 378 code << " ReadLVecStandard" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name 379 << ">(data, l_size" << var_suffix << ", elem, indices.inputs[" << i << "], d" << var_suffix << ", r_e" << var_suffix << ");\n"; 380 break; 381 } 382 case CEED_RESTRICTION_STRIDED: { 383 bool has_backend_strides; 384 CeedInt num_elem; 385 386 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 387 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 388 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 389 390 if (!has_backend_strides) { 391 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 392 } 393 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 394 code << " ReadLVecStrided" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", " 395 << strides[1] << ", " << strides[2] << ">(data, elem, d" << var_suffix << ", r_e" << var_suffix << ");\n"; 396 break; 397 } 398 case CEED_RESTRICTION_POINTS: { 399 CeedInt comp_stride; 400 401 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 402 code << " const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 403 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 404 break; 405 } 406 // LCOV_EXCL_START 407 case CEED_RESTRICTION_ORIENTED: 408 case CEED_RESTRICTION_CURL_ORIENTED: 409 break; // TODO: Not implemented 410 // LCOV_EXCL_STOP 411 } 412 } 413 } else { 414 // Output 415 switch (rstr_type) { 416 case CEED_RESTRICTION_STANDARD: { 417 CeedInt comp_stride; 418 419 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 420 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 421 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 422 code << " // CompStride: " << comp_stride << "\n"; 423 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 424 code << " WriteLVecStandard" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << comp_stride << ", " << P_name 425 << ">(data, l_size" << var_suffix << ", elem, indices.outputs[" << i << "], r_e" << var_suffix << ", d" << var_suffix << ");\n"; 426 break; 427 } 428 case CEED_RESTRICTION_STRIDED: { 429 bool has_backend_strides; 430 CeedInt num_elem; 431 432 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 433 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 434 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 435 436 if (!has_backend_strides) { 437 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 438 } 439 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 440 code << " WriteLVecStrided" << (is_tensor ? dim : 1) << "d<num_comp" << var_suffix << ", " << P_name << ", " << strides[0] << ", " 441 << strides[1] << ", " << strides[2] << ">(data, elem, r_e" << var_suffix << ", d" << var_suffix << ");\n"; 442 break; 443 } 444 case CEED_RESTRICTION_POINTS: 445 data->indices.outputs[i] = (CeedInt *)rstr_data->d_offsets; 446 break; 447 // LCOV_EXCL_START 448 case CEED_RESTRICTION_ORIENTED: 449 case CEED_RESTRICTION_CURL_ORIENTED: 450 break; // TODO: Not implemented 451 // LCOV_EXCL_STOP 452 } 453 } 454 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 455 return CEED_ERROR_SUCCESS; 456 } 457 458 //------------------------------------------------------------------------------ 459 // Basis 460 //------------------------------------------------------------------------------ 461 static int CeedOperatorBuildKernelBasis_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt i, CeedInt dim, 462 CeedOperatorField op_field, CeedQFunctionField qf_field, CeedInt Q_1d, bool is_input, bool is_tensor, 463 bool is_at_points, bool use_3d_slices) { 464 std::string var_suffix = (is_input ? "_in_" : "_out_") + std::to_string(i); 465 std::string P_name = (is_tensor ? "P_1d" : "P") + var_suffix, Q_name = is_tensor ? "Q_1d" : "Q"; 466 CeedEvalMode eval_mode = CEED_EVAL_NONE; 467 CeedInt elem_size = 0, num_comp = 0, P_1d = 0; 468 CeedElemRestriction elem_rstr; 469 CeedBasis basis; 470 471 // Get field data 472 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_field, &elem_rstr)); 473 if (elem_rstr != CEED_ELEMRESTRICTION_NONE) { 474 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 475 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 476 } 477 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 478 CeedCallBackend(CeedOperatorFieldGetBasis(op_field, &basis)); 479 if (basis != CEED_BASIS_NONE) { 480 if (is_tensor) CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 481 else CeedCallBackend(CeedBasisGetNumNodes(basis, &P_1d)); 482 } 483 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_field, &eval_mode)); 484 485 // Basis 486 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 487 if (is_input) { 488 switch (eval_mode) { 489 case CEED_EVAL_NONE: 490 if (!use_3d_slices && !is_at_points) { 491 code << " CeedScalar *r_q" << var_suffix << " = r_e" << var_suffix << ";\n"; 492 } 493 break; 494 case CEED_EVAL_INTERP: 495 if (is_at_points) { 496 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 497 498 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 499 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 500 << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n"; 501 } else { 502 std::string function_name = is_tensor ? ((dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d") : "InterpNonTensor"; 503 504 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 505 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 506 << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n"; 507 } 508 break; 509 case CEED_EVAL_GRAD: 510 if (is_at_points) { 511 std::string function_name = (dim == 1 ? "Interp" : "InterpTensor") + std::to_string(dim) + "d"; 512 513 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 514 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 515 << ", s_B" << var_suffix << ", r_c" << var_suffix << ");\n"; 516 } else if (use_3d_slices) { 517 std::string function_name = (dim > 1 ? "InterpTensor" : "Interp") + std::to_string(dim) + "d"; 518 519 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 520 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 521 << ", s_B" << var_suffix << ", r_q" << var_suffix << ");\n"; 522 } else if (is_tensor) { 523 bool is_collocated = dim == 3 && Q_1d >= P_1d; 524 std::string function_name = (dim == 1 ? "Grad" : (is_collocated ? "GradTensorCollocated" : "GradTensor")) + std::to_string(dim) + "d"; 525 526 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim*" << (dim >= 3 ? Q_name : "1") << "];\n"; 527 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" << var_suffix 528 << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 529 } else { 530 std::string function_name = "GradNonTensor"; 531 532 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 533 code << " " << function_name << "<num_comp" << var_suffix << ", dim, " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_e" 534 << var_suffix << ", s_G" << var_suffix << ", r_q" << var_suffix << ");\n"; 535 } 536 break; 537 case CEED_EVAL_WEIGHT: { 538 if (is_at_points) { 539 code << " // Nothing to do AtPoints\n"; 540 } else { 541 CeedBasis_Hip_shared *basis_data; 542 std::string function_name = is_tensor ? ((dim == 1 ? "Weight" : "WeightTensor") + std::to_string(dim) + "d") : "WeightNonTensor"; 543 544 code << " CeedScalar r_q" << var_suffix << "[" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 545 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 546 data->W = basis_data->d_q_weight_1d; 547 code << " " << function_name << "<" << Q_name << ">(data, W, r_q" << var_suffix << ");\n"; 548 } 549 break; 550 } 551 // LCOV_EXCL_START 552 case CEED_EVAL_DIV: 553 case CEED_EVAL_CURL: 554 break; // TODO: Not implemented 555 // LCOV_EXCL_STOP 556 } 557 } else { 558 switch (eval_mode) { 559 case CEED_EVAL_NONE: 560 code << " CeedScalar *r_e" << var_suffix << " = r_q" << var_suffix << ";\n"; 561 break; // No action 562 case CEED_EVAL_INTERP: 563 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 564 if (is_at_points) { 565 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 566 567 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix 568 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 569 } else { 570 std::string function_name = 571 is_tensor ? ((dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d") : "InterpTransposeNonTensor"; 572 573 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" << var_suffix 574 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 575 } 576 break; 577 case CEED_EVAL_GRAD: 578 code << " CeedScalar *r_e" << var_suffix << " = r_e_scratch;\n"; 579 if (is_at_points) { 580 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 581 582 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_c" << var_suffix 583 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 584 } else if (use_3d_slices) { 585 std::string function_name = (dim == 1 ? "InterpTranspose" : "InterpTransposeTensor") + std::to_string(dim) + "d"; 586 587 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" << var_suffix 588 << ", s_B" << var_suffix << ", r_e" << var_suffix << ");\n"; 589 } else if (is_tensor) { 590 bool is_collocated = dim == 3 && Q_1d >= P_1d; 591 std::string function_name = 592 (dim == 1 ? "GradTranspose" : (is_collocated ? "GradTransposeTensorCollocated" : "GradTransposeTensor")) + std::to_string(dim) + "d"; 593 594 code << " " << function_name << "<num_comp" << var_suffix << ", " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" << var_suffix 595 << ", s_B" << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 596 } else { 597 std::string function_name = "GradTransposeNonTensor"; 598 599 code << " " << function_name << "<num_comp" << var_suffix << ", dim, " << P_name << ", " << Q_name << ", OP_T_1D>(data, r_q" 600 << var_suffix << ", s_G" << var_suffix << ", r_e" << var_suffix << ");\n"; 601 } 602 break; 603 // LCOV_EXCL_START 604 case CEED_EVAL_WEIGHT: 605 break; // Should not occur 606 case CEED_EVAL_DIV: 607 case CEED_EVAL_CURL: 608 break; // TODO: Not implemented 609 // LCOV_EXCL_STOP 610 } 611 } 612 CeedCallBackend(CeedBasisDestroy(&basis)); 613 return CEED_ERROR_SUCCESS; 614 } 615 616 //------------------------------------------------------------------------------ 617 // QFunction 618 //------------------------------------------------------------------------------ 619 static int CeedOperatorBuildKernelQFunction_Hip_gen(std::ostringstream &code, CeedOperator_Hip_gen *data, CeedInt dim, CeedInt max_num_points, 620 CeedInt num_input_fields, CeedOperatorField *op_input_fields, CeedQFunctionField *qf_input_fields, 621 CeedInt num_output_fields, CeedOperatorField *op_output_fields, 622 CeedQFunctionField *qf_output_fields, std::string qfunction_name, CeedInt Q_1d, bool is_tensor, 623 bool is_at_points, bool use_3d_slices) { 624 std::string Q_name = is_tensor ? "Q_1d" : "Q"; 625 CeedEvalMode eval_mode = CEED_EVAL_NONE; 626 CeedElemRestriction elem_rstr; 627 628 // Setup output arrays 629 code << "\n // -- Output field setup\n"; 630 for (CeedInt i = 0; i < num_output_fields; i++) { 631 const char *field_name; 632 std::string var_suffix = "_out_" + std::to_string(i); 633 634 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 635 code << " // ---- Output field " << i << ": " << field_name << "\n"; 636 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 637 switch (eval_mode) { 638 case CEED_EVAL_NONE: 639 if (is_at_points) { 640 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "];\n"; 641 } else { 642 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 643 } 644 break; 645 case CEED_EVAL_INTERP: 646 if (is_at_points) { 647 // Accumulator for point data 648 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "];\n"; 649 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "; i++) {\n"; 650 code << " r_c" << var_suffix << "[i] = 0.0;\n"; 651 code << " }\n"; 652 } else { 653 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 654 } 655 break; 656 case CEED_EVAL_GRAD: 657 if (is_at_points) { 658 // Accumulator for point data 659 code << " CeedScalar r_c" << var_suffix << "[num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "*dim];\n"; 660 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << (dim >= 3 ? Q_name : "1") << "; i++) {\n"; 661 code << " r_c" << var_suffix << "[i] = 0.0;\n"; 662 code << " }\n"; 663 } else if (use_3d_slices) { 664 // Accumulator for gradient slices 665 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*" << Q_name << "];\n"; 666 code << " for (CeedInt i = 0; i < num_comp" << var_suffix << "*" << Q_name << "; i++) {\n"; 667 code << " r_q" << var_suffix << "[i] = 0.0;\n"; 668 code << " }\n"; 669 } else { 670 code << " CeedScalar r_q" << var_suffix << "[num_comp" << var_suffix << "*dim*" << (is_tensor && (dim >= 3) ? Q_name : "1") << "];\n"; 671 } 672 break; 673 case CEED_EVAL_WEIGHT: 674 break; 675 // LCOV_EXCL_START 676 case CEED_EVAL_DIV: 677 case CEED_EVAL_CURL: 678 break; // TODO: Not implemented 679 // LCOV_EXCL_STOP 680 } 681 } 682 683 if (is_at_points) { 684 // We need to handle batches of points 685 code << "\n // Note: Using batches of points\n"; 686 code << " const CeedInt point_loop_bound = (blockDim.x * blockDim.y) * ceil(1.0 * max_num_points / (blockDim.x * blockDim.y));\n\n"; 687 code << " #pragma unroll\n"; 688 code << " for (CeedInt i = threadIdx.x + threadIdx.y * blockDim.x; i < point_loop_bound; i += blockDim.x * blockDim.y) {\n"; 689 code << " const CeedInt p = i % max_num_points;\n\n"; 690 691 code << " // -- Coordinates\n"; 692 code << " CeedScalar r_x[dim];\n"; 693 code << " ReadPoint<dim, coords_comp_stride, max_num_points>(data, elem, p, max_num_points, points.indices, points.coords, r_x);\n\n"; 694 695 code << " // -- Input fields\n"; 696 for (CeedInt i = 0; i < num_input_fields; i++) { 697 const char *field_name; 698 std::string var_suffix = "_in_" + std::to_string(i); 699 std::string P_name = "P_1d" + var_suffix; 700 701 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 702 code << " // ---- Input field " << i << ": " << field_name << "\n"; 703 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 704 // Basis action 705 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 706 switch (eval_mode) { 707 case CEED_EVAL_NONE: 708 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 709 code << " ReadPoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 710 << ", max_num_points>(data, elem, p, max_num_points, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 711 break; 712 case CEED_EVAL_INTERP: 713 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 714 code << " InterpAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name << ">(data, i, r_c" 715 << var_suffix << ", r_x, r_s" << var_suffix << ");\n"; 716 break; 717 case CEED_EVAL_GRAD: 718 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 719 code << " GradAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name << ">(data, i, r_c" 720 << var_suffix << ", r_x, r_s" << var_suffix << ");\n"; 721 break; 722 case CEED_EVAL_WEIGHT: 723 code << " CeedScalar r_s" << var_suffix << "[1];\n"; 724 code << " r_s" << var_suffix << "[0] = 1.0;\n"; 725 break; 726 // LCOV_EXCL_START 727 case CEED_EVAL_DIV: 728 case CEED_EVAL_CURL: 729 break; // TODO: Not implemented 730 // LCOV_EXCL_STOP 731 } 732 } 733 code << "\n // -- Output fields\n"; 734 for (CeedInt i = 0; i < num_output_fields; i++) { 735 const char *field_name; 736 std::string var_suffix = "_out_" + std::to_string(i); 737 738 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 739 code << " // ---- Output field " << i << ": " << field_name << "\n"; 740 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 741 // Basis action 742 switch (eval_mode) { 743 case CEED_EVAL_NONE: 744 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 745 break; 746 case CEED_EVAL_INTERP: 747 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 748 break; 749 case CEED_EVAL_GRAD: 750 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 751 break; 752 // LCOV_EXCL_START 753 case CEED_EVAL_WEIGHT: 754 break; // Should not occur 755 case CEED_EVAL_DIV: 756 case CEED_EVAL_CURL: 757 break; // TODO: Not implemented 758 // LCOV_EXCL_STOP 759 } 760 } 761 762 } else if (use_3d_slices) { 763 // We treat quadrature points per slice in 3d to save registers 764 code << "\n // Note: Using planes of 3D elements\n"; 765 code << " #pragma unroll\n"; 766 code << " for (CeedInt q = 0; q < " << Q_name << "; q++) {\n"; 767 code << " // -- Input fields\n"; 768 for (CeedInt i = 0; i < num_input_fields; i++) { 769 const char *field_name; 770 std::string var_suffix = "_in_" + std::to_string(i); 771 772 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 773 code << " // ---- Input field " << i << ": " << field_name << "\n"; 774 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 775 // Basis action 776 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 777 switch (eval_mode) { 778 case CEED_EVAL_NONE: 779 bool is_strided; 780 781 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 782 783 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 784 CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided)); 785 if (is_strided) { 786 bool has_backend_strides; 787 CeedInt num_elem, elem_size; 788 789 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 790 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 791 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 792 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 793 794 if (!has_backend_strides) { 795 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, strides)); 796 } 797 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 798 code << " ReadEVecSliceStrided3d<num_comp" << var_suffix << ", " << Q_name << ", " << strides[0] << ", " << strides[1] << ", " 799 << strides[2] << ">(data, elem, q, d" << var_suffix << ", r_s" << var_suffix << ");\n"; 800 } else { 801 CeedSize l_size = 0; 802 CeedInt comp_stride; 803 CeedElemRestriction_Hip *rstr_data; 804 805 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 806 code << " const CeedInt l_size" << var_suffix << " = " << l_size << ";\n"; 807 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 808 code << " // CompStride: " << comp_stride << "\n"; 809 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 810 data->indices.inputs[i] = (CeedInt *)rstr_data->d_offsets; 811 code << " ReadEVecSliceStandard3d<num_comp" << var_suffix << ", " << comp_stride << ", " << Q_name << ">(data, l_size" << var_suffix 812 << ", elem, q, indices.inputs[" << i << "], d" << var_suffix << ", r_s" << var_suffix << ");\n"; 813 } 814 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 815 break; 816 case CEED_EVAL_INTERP: 817 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 818 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) {\n"; 819 code << " r_s" << var_suffix << "[j] = r_q" << var_suffix << "[q + j*" << Q_name << "];\n"; 820 code << " }\n"; 821 break; 822 case CEED_EVAL_GRAD: 823 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 824 code << " GradColloSlice3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_q" << var_suffix << ", s_G" 825 << var_suffix << ", r_s" << var_suffix << ");\n"; 826 break; 827 case CEED_EVAL_WEIGHT: 828 code << " CeedScalar r_s" << var_suffix << "[1];\n"; 829 code << " r_s" << var_suffix << "[0] = r_q" << var_suffix << "[q];\n"; 830 break; 831 // LCOV_EXCL_START 832 case CEED_EVAL_DIV: 833 case CEED_EVAL_CURL: 834 break; // TODO: Not implemented 835 // LCOV_EXCL_STOP 836 } 837 } 838 code << "\n // -- Output fields\n"; 839 for (CeedInt i = 0; i < num_output_fields; i++) { 840 const char *field_name; 841 std::string var_suffix = "_out_" + std::to_string(i); 842 843 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 844 code << " // ---- Output field " << i << ": " << field_name << "\n"; 845 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 846 // Basis action 847 switch (eval_mode) { 848 case CEED_EVAL_NONE: 849 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 850 break; 851 case CEED_EVAL_INTERP: 852 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "];\n"; 853 break; 854 case CEED_EVAL_GRAD: 855 code << " CeedScalar r_s" << var_suffix << "[num_comp" << var_suffix << "*dim];\n"; 856 break; 857 // LCOV_EXCL_START 858 case CEED_EVAL_WEIGHT: 859 break; // Should not occur 860 case CEED_EVAL_DIV: 861 case CEED_EVAL_CURL: 862 break; // TODO: Not implemented 863 // LCOV_EXCL_STOP 864 } 865 } 866 } else { 867 code << "\n // Note: Using full elements\n"; 868 code << " {\n"; 869 code << " // -- Input fields\n"; 870 for (CeedInt i = 0; i < num_input_fields; i++) { 871 const char *field_name; 872 873 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 874 code << " // ---- Input field " << i << ": " << field_name << "\n"; 875 code << " CeedScalar *r_s_in_" << i << " = r_q_in_" << i << ";\n"; 876 } 877 code << " // -- Output fields\n"; 878 for (CeedInt i = 0; i < num_output_fields; i++) { 879 const char *field_name; 880 881 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 882 code << " // ---- Output field " << i << ": " << field_name << "\n"; 883 code << " CeedScalar *r_s_out_" << i << " = r_q_out_" << i << ";\n"; 884 } 885 } 886 887 // Input and output buffers 888 code << "\n // -- QFunction inputs and outputs\n"; 889 code << " // ---- Inputs\n"; 890 code << " CeedScalar *inputs[" << CeedIntMax(num_input_fields, 1) << "];\n"; 891 for (CeedInt i = 0; i < num_input_fields; i++) { 892 const char *field_name; 893 894 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[i], &field_name)); 895 code << " // ------ Input field " << i << ": " << field_name << "\n"; 896 code << " inputs[" << i << "] = r_s_in_" << i << ";\n"; 897 } 898 code << " // ---- Outputs\n"; 899 code << " CeedScalar *outputs[" << CeedIntMax(num_output_fields, 1) << "];\n"; 900 for (CeedInt i = 0; i < num_output_fields; i++) { 901 const char *field_name; 902 903 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 904 code << " // ------ Output field " << i << ": " << field_name << "\n"; 905 code << " outputs[" << i << "] = r_s_out_" << i << ";\n"; 906 } 907 908 // Apply QFunction 909 code << "\n // -- Apply QFunction\n"; 910 code << " " << qfunction_name << "(ctx, "; 911 if (dim != 3 || is_at_points || use_3d_slices || !is_tensor) { 912 code << "1"; 913 } else { 914 code << Q_name; 915 } 916 code << ", inputs, outputs);\n"; 917 918 if (is_at_points) { 919 // Map back to coefficients 920 code << "\n // -- Output fields\n"; 921 for (CeedInt i = 0; i < num_output_fields; i++) { 922 const char *field_name; 923 std::string var_suffix = "_out_" + std::to_string(i); 924 std::string P_name = "P_1d" + var_suffix; 925 926 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 927 code << " // ---- Output field " << i << ": " << field_name << "\n"; 928 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 929 // Basis action 930 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 931 switch (eval_mode) { 932 case CEED_EVAL_NONE: { 933 CeedInt comp_stride; 934 CeedElemRestriction elem_rstr; 935 936 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 937 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 938 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 939 code << " const CeedInt comp_stride" << var_suffix << " = " << comp_stride << ";\n"; 940 code << " WritePoint<num_comp" << var_suffix << ", comp_stride" << var_suffix 941 << ", max_num_points>(data, elem, i, points.num_per_elem[elem], indices.outputs[" << i << "]" 942 << ", r_s" << var_suffix << ", d" << var_suffix << ");\n"; 943 break; 944 } 945 case CEED_EVAL_INTERP: 946 code << " if (i >= points.num_per_elem[elem]) {\n"; 947 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 948 code << " }\n"; 949 code << " InterpTransposeAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name 950 << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 951 break; 952 case CEED_EVAL_GRAD: 953 code << " if (i >= points.num_per_elem[elem]) {\n"; 954 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << "*dim; j++) r_s" << var_suffix << "[j] = 0.0;\n"; 955 code << " }\n"; 956 code << " GradTransposeAtPoints" << dim << "d<num_comp" << var_suffix << ", max_num_points, " << P_name << ", " << Q_name 957 << ">(data, i, r_s" << var_suffix << ", r_x, r_c" << var_suffix << ");\n"; 958 break; 959 // LCOV_EXCL_START 960 case CEED_EVAL_WEIGHT: 961 break; // Should not occur 962 case CEED_EVAL_DIV: 963 case CEED_EVAL_CURL: 964 break; // TODO: Not implemented 965 // LCOV_EXCL_STOP 966 } 967 } 968 } else if (use_3d_slices) { 969 // Copy or apply transpose grad, if needed 970 code << "\n // -- Output fields\n"; 971 for (CeedInt i = 0; i < num_output_fields; i++) { 972 const char *field_name; 973 std::string var_suffix = "_out_" + std::to_string(i); 974 std::string P_name = "P_1d" + var_suffix; 975 976 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 977 code << " // ---- Output field " << i << ": " << field_name << "\n"; 978 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 979 // Basis action 980 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 981 switch (eval_mode) { 982 case CEED_EVAL_NONE: 983 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 984 code << " r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 985 code << " }\n"; 986 break; 987 case CEED_EVAL_INTERP: 988 code << " for (CeedInt j = 0; j < num_comp" << var_suffix << " ; j++) {\n"; 989 code << " r_q" << var_suffix << "[q + j*" << Q_name << "] = r_s" << var_suffix << "[j];\n"; 990 code << " }\n"; 991 break; 992 case CEED_EVAL_GRAD: 993 code << " GradColloSliceTranspose3d<num_comp" << var_suffix << ", " << Q_name << ", OP_T_1D>(data, q, r_s" << var_suffix << ", s_G" 994 << var_suffix << ", r_q" << var_suffix << ");\n"; 995 break; 996 // LCOV_EXCL_START 997 case CEED_EVAL_WEIGHT: 998 break; // Should not occur 999 case CEED_EVAL_DIV: 1000 case CEED_EVAL_CURL: 1001 break; // TODO: Not implemented 1002 // LCOV_EXCL_STOP 1003 } 1004 } 1005 } 1006 code << " }\n"; 1007 return CEED_ERROR_SUCCESS; 1008 } 1009 1010 //------------------------------------------------------------------------------ 1011 // Build single operator kernel 1012 //------------------------------------------------------------------------------ 1013 extern "C" int CeedOperatorBuildKernel_Hip_gen(CeedOperator op, bool *is_good_build) { 1014 bool is_tensor = true, is_at_points = false, use_3d_slices = false; 1015 Ceed ceed; 1016 CeedInt Q_1d, num_input_fields, num_output_fields, dim = 1, max_num_points = 0, coords_comp_stride = 0; 1017 CeedQFunctionField *qf_input_fields, *qf_output_fields; 1018 CeedQFunction_Hip_gen *qf_data; 1019 CeedQFunction qf; 1020 CeedOperatorField *op_input_fields, *op_output_fields; 1021 CeedOperator_Hip_gen *data; 1022 std::ostringstream code; 1023 1024 CeedCallBackend(CeedOperatorGetData(op, &data)); 1025 { 1026 bool is_setup_done; 1027 1028 CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done)); 1029 if (is_setup_done) { 1030 *is_good_build = !data->use_fallback; 1031 return CEED_ERROR_SUCCESS; 1032 } 1033 } 1034 1035 // Check field compatibility 1036 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 1037 { 1038 bool has_shared_bases = true, is_all_tensor = true, is_all_nontensor = true; 1039 1040 for (CeedInt i = 0; i < num_input_fields; i++) { 1041 CeedBasis basis; 1042 1043 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 1044 if (basis != CEED_BASIS_NONE) { 1045 bool is_tensor = true; 1046 const char *resource; 1047 char *resource_root; 1048 Ceed basis_ceed; 1049 1050 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1051 is_all_tensor = is_all_tensor && is_tensor; 1052 is_all_nontensor = is_all_nontensor && !is_tensor; 1053 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1054 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1055 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1056 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/hip/shared"); 1057 CeedCallBackend(CeedFree(&resource_root)); 1058 CeedCallBackend(CeedDestroy(&basis_ceed)); 1059 } 1060 CeedCallBackend(CeedBasisDestroy(&basis)); 1061 } 1062 1063 for (CeedInt i = 0; i < num_output_fields; i++) { 1064 CeedBasis basis; 1065 1066 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 1067 if (basis != CEED_BASIS_NONE) { 1068 bool is_tensor = true; 1069 const char *resource; 1070 char *resource_root; 1071 Ceed basis_ceed; 1072 1073 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 1074 is_all_tensor = is_all_tensor && is_tensor; 1075 is_all_nontensor = is_all_nontensor && !is_tensor; 1076 1077 CeedCallBackend(CeedBasisGetCeed(basis, &basis_ceed)); 1078 CeedCallBackend(CeedGetResource(basis_ceed, &resource)); 1079 CeedCallBackend(CeedGetResourceRoot(basis_ceed, resource, ":", &resource_root)); 1080 has_shared_bases = has_shared_bases && !strcmp(resource_root, "/gpu/hip/shared"); 1081 CeedCallBackend(CeedFree(&resource_root)); 1082 CeedCallBackend(CeedDestroy(&basis_ceed)); 1083 } 1084 CeedCallBackend(CeedBasisDestroy(&basis)); 1085 } 1086 // -- Fallback to ref if not all bases are shared 1087 if (!has_shared_bases || (!is_all_tensor && !is_all_nontensor)) { 1088 *is_good_build = false; 1089 return CEED_ERROR_SUCCESS; 1090 } 1091 } 1092 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1093 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 1094 CeedCallBackend(CeedQFunctionGetData(qf, &qf_data)); 1095 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 1096 1097 // Get operator data 1098 CeedCallBackend(CeedOperatorIsAtPoints(op, &is_at_points)); 1099 CeedCallBackend(CeedOperatorBuildKernelData_Hip_gen(ceed, num_input_fields, op_input_fields, qf_input_fields, num_output_fields, op_output_fields, 1100 qf_output_fields, &data->max_P_1d, &Q_1d, &dim, &is_tensor, &use_3d_slices)); 1101 if (dim == 0) dim = 1; 1102 data->dim = dim; 1103 if (is_at_points) { 1104 CeedElemRestriction_Hip *rstr_data; 1105 CeedElemRestriction rstr_points = NULL; 1106 1107 CeedCallBackend(CeedOperatorAtPointsGetPoints(op, &rstr_points, NULL)); 1108 CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr_points, &max_num_points)); 1109 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr_points, &coords_comp_stride)); 1110 CeedCallBackend(CeedElemRestrictionGetData(rstr_points, &rstr_data)); 1111 data->points.indices = (CeedInt *)rstr_data->d_offsets; 1112 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_points)); 1113 } 1114 if (is_at_points) use_3d_slices = false; 1115 if (Q_1d == 0) { 1116 if (is_at_points) Q_1d = max_num_points; 1117 else CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q_1d)); 1118 } 1119 data->Q_1d = Q_1d; 1120 1121 // Check for restriction only identity operator 1122 { 1123 bool is_identity_qf; 1124 1125 CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf)); 1126 if (is_identity_qf) { 1127 CeedEvalMode eval_mode_in, eval_mode_out; 1128 1129 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in)); 1130 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out)); 1131 CeedCheck(eval_mode_in != CEED_EVAL_NONE || eval_mode_out != CEED_EVAL_NONE, ceed, CEED_ERROR_BACKEND, 1132 "Backend does not implement restriction only identity operators"); 1133 } 1134 } 1135 1136 // Load basis source files 1137 if (is_tensor) { 1138 code << "// Tensor basis source\n"; 1139 code << "#include <ceed/jit-source/hip/hip-shared-basis-tensor-templates.h>\n\n"; 1140 } else { 1141 code << "// Non-tensor basis source\n"; 1142 code << "#include <ceed/jit-source/hip/hip-shared-basis-nontensor-templates.h>\n\n"; 1143 } 1144 if (is_at_points) { 1145 code << "// AtPoints basis source\n"; 1146 code << "#include <ceed/jit-source/hip/hip-shared-basis-tensor-at-points-templates.h>\n\n"; 1147 } 1148 code << "// CodeGen operator source\n"; 1149 code << "#include <ceed/jit-source/hip/hip-gen-templates.h>\n\n"; 1150 1151 // Get QFunction name 1152 std::string qfunction_name(qf_data->qfunction_name); 1153 std::string operator_name; 1154 1155 operator_name = "CeedKernelHipGenOperator_" + qfunction_name; 1156 1157 // Define CEED_Q_VLA 1158 code << "\n#undef CEED_Q_VLA\n"; 1159 if (dim != 3 || is_at_points || use_3d_slices || !is_tensor) { 1160 code << "#define CEED_Q_VLA 1\n\n"; 1161 } else { 1162 code << "#define CEED_Q_VLA " << Q_1d << "\n\n"; 1163 } 1164 1165 // Add user QFunction source 1166 { 1167 const char *source_path; 1168 1169 CeedCallBackend(CeedQFunctionGetSourcePath(qf, &source_path)); 1170 CeedCheck(source_path, ceed, CEED_ERROR_UNSUPPORTED, "/gpu/hip/gen backend requires QFunction source code file"); 1171 1172 code << "// User QFunction source\n"; 1173 code << "#include \"" << source_path << "\"\n\n"; 1174 } 1175 1176 // Setup 1177 code << "\n// -----------------------------------------------------------------------------\n"; 1178 code << "// Operator Kernel\n"; 1179 code << "// \n"; 1180 code << "// d_[in,out]_i: CeedVector device array\n"; 1181 code << "// r_[in,out]_e_i: Element vector register\n"; 1182 code << "// r_[in,out]_q_i: Quadrature space vector register\n"; 1183 code << "// r_[in,out]_c_i: AtPoints Chebyshev coefficients register\n"; 1184 code << "// r_[in,out]_s_i: Quadrature space slice vector register\n"; 1185 code << "// \n"; 1186 code << "// s_B_[in,out]_i: Interpolation matrix, shared memory\n"; 1187 code << "// s_G_[in,out]_i: Gradient matrix, shared memory\n"; 1188 code << "// -----------------------------------------------------------------------------\n"; 1189 code << "\nextern \"C\" __launch_bounds__(BLOCK_SIZE)\n"; 1190 code << "__global__ void " << operator_name 1191 << "(CeedInt num_elem, void* ctx, FieldsInt_Hip indices, Fields_Hip fields, Fields_Hip B, Fields_Hip G, CeedScalar* W, Points_Hip points) {\n"; 1192 1193 // Scratch buffers 1194 for (CeedInt i = 0; i < num_input_fields; i++) { 1195 CeedEvalMode eval_mode; 1196 1197 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 1198 if (eval_mode != CEED_EVAL_WEIGHT) { // Skip CEED_EVAL_WEIGHT 1199 code << " const CeedScalar *__restrict__ d_in_" << i << " = fields.inputs[" << i << "];\n"; 1200 } 1201 } 1202 for (CeedInt i = 0; i < num_output_fields; i++) { 1203 code << " CeedScalar *__restrict__ d_out_" << i << " = fields.outputs[" << i << "];\n"; 1204 } 1205 1206 code << " const CeedInt dim = " << dim << ";\n"; 1207 code << " const CeedInt " << (is_tensor ? "Q_1d" : "Q") << " = " << Q_1d << ";\n"; 1208 if (is_at_points) { 1209 code << " const CeedInt max_num_points = " << max_num_points << ";\n"; 1210 code << " const CeedInt coords_comp_stride = " << coords_comp_stride << ";\n"; 1211 } 1212 1213 // Shared data 1214 code << " extern __shared__ CeedScalar slice[];\n"; 1215 code << " SharedData_Hip data;\n"; 1216 code << " data.t_id_x = threadIdx.x;\n"; 1217 code << " data.t_id_y = threadIdx.y;\n"; 1218 code << " data.t_id_z = threadIdx.z;\n"; 1219 code << " data.t_id = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n"; 1220 code << " data.slice = slice + data.t_id_z*OP_T_1D" << ((!is_tensor || dim == 1) ? "" : "*OP_T_1D") << ";\n"; 1221 1222 // -- Determine input mat reuse 1223 FieldReuse_Hip input_matrix_reuse[CEED_FIELD_MAX]; 1224 1225 for (CeedInt i = 0; i < num_input_fields; i++) { 1226 input_matrix_reuse[i].index = -1; 1227 } 1228 for (CeedInt i = 0; i < num_input_fields; i++) { 1229 CeedEvalMode eval_mode_i; 1230 CeedBasis basis_i; 1231 1232 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode_i)); 1233 if (eval_mode_i == CEED_EVAL_WEIGHT) continue; 1234 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis_i)); 1235 for (CeedInt j = 0; (input_matrix_reuse[i].index == -1) && (j < i); j++) { 1236 CeedEvalMode eval_mode_j; 1237 CeedBasis basis_j; 1238 1239 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1240 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1241 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1242 if (basis_i == basis_j) { 1243 if (is_tensor) { 1244 input_matrix_reuse[i].index = j; 1245 input_matrix_reuse[i].is_input = true; 1246 input_matrix_reuse[i].eval_mode = eval_mode_j; 1247 } else { 1248 // For non-tensor can only re-use with the same eval mode 1249 if (eval_mode_i == eval_mode_j) { 1250 input_matrix_reuse[i].index = j; 1251 input_matrix_reuse[i].is_input = true; 1252 input_matrix_reuse[i].eval_mode = eval_mode_j; 1253 } 1254 } 1255 } 1256 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1257 } 1258 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1259 } 1260 1261 // -- Determine output mat reuse 1262 FieldReuse_Hip output_matrix_reuse[CEED_FIELD_MAX]; 1263 1264 for (CeedInt i = 0; i < num_output_fields; i++) { 1265 output_matrix_reuse[i].index = -1; 1266 } 1267 for (CeedInt i = 0; i < num_output_fields; i++) { 1268 CeedEvalMode eval_mode_i; 1269 CeedBasis basis_i; 1270 1271 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode_i)); 1272 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis_i)); 1273 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < num_input_fields); j++) { 1274 CeedEvalMode eval_mode_j; 1275 CeedBasis basis_j; 1276 1277 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[j], &eval_mode_j)); 1278 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1279 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[j], &basis_j)); 1280 if (basis_i == basis_j) { 1281 if (is_tensor) { 1282 output_matrix_reuse[i].index = j; 1283 output_matrix_reuse[i].is_input = true; 1284 output_matrix_reuse[i].eval_mode = eval_mode_j; 1285 } else { 1286 // For non-tensor can only re-use with the same eval mode 1287 if (eval_mode_i == eval_mode_j) { 1288 output_matrix_reuse[i].index = j; 1289 output_matrix_reuse[i].is_input = true; 1290 output_matrix_reuse[i].eval_mode = eval_mode_j; 1291 } 1292 } 1293 } 1294 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1295 } 1296 for (CeedInt j = 0; (output_matrix_reuse[i].index == -1) && (j < i); j++) { 1297 CeedEvalMode eval_mode_j; 1298 CeedBasis basis_j; 1299 1300 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[j], &eval_mode_j)); 1301 if (eval_mode_j == CEED_EVAL_WEIGHT) continue; 1302 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[j], &basis_j)); 1303 if (basis_i == basis_j) { 1304 if (is_tensor) { 1305 output_matrix_reuse[i].index = j; 1306 output_matrix_reuse[i].is_input = false; 1307 output_matrix_reuse[i].eval_mode = eval_mode_j; 1308 } else { 1309 // For non-tensor can only re-use with the same eval mode 1310 if (eval_mode_i == eval_mode_j) { 1311 output_matrix_reuse[i].index = j; 1312 output_matrix_reuse[i].is_input = false; 1313 output_matrix_reuse[i].eval_mode = eval_mode_j; 1314 } 1315 } 1316 } 1317 CeedCallBackend(CeedBasisDestroy(&basis_j)); 1318 } 1319 CeedCallBackend(CeedBasisDestroy(&basis_i)); 1320 } 1321 1322 // Initialize constants, and matrices B and G 1323 code << "\n // Input field constants and basis data\n"; 1324 for (CeedInt i = 0; i < num_input_fields; i++) { 1325 CeedCallBackend(CeedOperatorBuildKernelFieldData_Hip_gen(code, data, i, op_input_fields[i], qf_input_fields[i], input_matrix_reuse[i], Q_1d, true, 1326 is_tensor, is_at_points, use_3d_slices)); 1327 } 1328 code << "\n // Output field constants and basis data\n"; 1329 for (CeedInt i = 0; i < num_output_fields; i++) { 1330 CeedCallBackend(CeedOperatorBuildKernelFieldData_Hip_gen(code, data, i, op_output_fields[i], qf_output_fields[i], output_matrix_reuse[i], Q_1d, 1331 false, is_tensor, is_at_points, use_3d_slices)); 1332 } 1333 1334 // Loop over all elements 1335 code << "\n // Element loop\n"; 1336 code << " __syncthreads();\n"; 1337 code << " for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n"; 1338 1339 // -- Compute minimum buffer space needed 1340 CeedInt max_rstr_buffer_size = 1; 1341 1342 for (CeedInt i = 0; i < num_input_fields; i++) { 1343 CeedInt num_comp, elem_size; 1344 CeedElemRestriction elem_rstr; 1345 1346 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 1347 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1348 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 1349 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_tensor && (dim >= 3) ? elem_size : 1)); 1350 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1351 } 1352 for (CeedInt i = 0; i < num_output_fields; i++) { 1353 CeedInt num_comp, elem_size; 1354 CeedElemRestriction elem_rstr; 1355 1356 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 1357 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 1358 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 1359 max_rstr_buffer_size = CeedIntMax(max_rstr_buffer_size, num_comp * (is_tensor && (dim >= 3) ? elem_size : 1)); 1360 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1361 } 1362 code << " // Scratch restriction buffer space\n"; 1363 code << " CeedScalar r_e_scratch[" << max_rstr_buffer_size << "];\n"; 1364 1365 // -- Determine best input field processing order 1366 CeedInt field_rstr_in_buffer[CEED_FIELD_MAX], input_field_order[CEED_FIELD_MAX]; 1367 1368 for (CeedInt i = 0; i < num_input_fields; i++) { 1369 field_rstr_in_buffer[i] = -1; 1370 input_field_order[i] = -1; 1371 } 1372 { 1373 bool is_ordered[CEED_FIELD_MAX]; 1374 CeedInt curr_index = 0; 1375 1376 for (CeedInt i = 0; i < num_input_fields; i++) is_ordered[i] = false; 1377 for (CeedInt i = 0; i < num_input_fields; i++) { 1378 CeedVector vec_i; 1379 CeedElemRestriction rstr_i; 1380 1381 if (is_ordered[i]) continue; 1382 field_rstr_in_buffer[i] = i; 1383 is_ordered[i] = true; 1384 input_field_order[curr_index] = i; 1385 curr_index++; 1386 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec_i)); 1387 if (vec_i == CEED_VECTOR_NONE) continue; // CEED_EVAL_WEIGHT 1388 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr_i)); 1389 for (CeedInt j = i + 1; j < num_input_fields; j++) { 1390 CeedVector vec_j; 1391 CeedElemRestriction rstr_j; 1392 1393 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[j], &vec_j)); 1394 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[j], &rstr_j)); 1395 if (rstr_i == rstr_j && vec_i == vec_j) { 1396 field_rstr_in_buffer[j] = i; 1397 is_ordered[j] = true; 1398 input_field_order[curr_index] = j; 1399 curr_index++; 1400 } 1401 CeedCallBackend(CeedVectorDestroy(&vec_j)); 1402 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_j)); 1403 } 1404 CeedCallBackend(CeedVectorDestroy(&vec_i)); 1405 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_i)); 1406 } 1407 } 1408 1409 // -- Input restriction and basis 1410 code << "\n // -- Input field restrictions and basis actions\n"; 1411 for (CeedInt i = 0; i < num_input_fields; i++) { 1412 const char *field_name; 1413 const CeedInt f = input_field_order[i]; 1414 1415 CeedCallBackend(CeedOperatorFieldGetName(op_input_fields[f], &field_name)); 1416 code << " // ---- Input field " << f << ": " << field_name << "\n"; 1417 1418 // ---- Restriction 1419 CeedCallBackend(CeedOperatorBuildKernelRestriction_Hip_gen(code, data, f, dim, field_rstr_in_buffer, op_input_fields[f], qf_input_fields[f], Q_1d, 1420 true, is_tensor, is_at_points, use_3d_slices)); 1421 1422 // ---- Basis action 1423 CeedCallBackend(CeedOperatorBuildKernelBasis_Hip_gen(code, data, f, dim, op_input_fields[f], qf_input_fields[f], Q_1d, true, is_tensor, 1424 is_at_points, use_3d_slices)); 1425 } 1426 1427 // -- Q function 1428 CeedCallBackend(CeedOperatorBuildKernelQFunction_Hip_gen(code, data, dim, max_num_points, num_input_fields, op_input_fields, qf_input_fields, 1429 num_output_fields, op_output_fields, qf_output_fields, qfunction_name, Q_1d, is_tensor, 1430 is_at_points, use_3d_slices)); 1431 1432 // -- Output basis and restriction 1433 code << "\n // -- Output field basis action and restrictions\n"; 1434 for (CeedInt i = 0; i < num_output_fields; i++) { 1435 const char *field_name; 1436 1437 CeedCallBackend(CeedOperatorFieldGetName(op_output_fields[i], &field_name)); 1438 code << " // ---- Output field " << i << ": " << field_name << "\n"; 1439 1440 // ---- Basis action 1441 CeedCallBackend(CeedOperatorBuildKernelBasis_Hip_gen(code, data, i, dim, op_output_fields[i], qf_output_fields[i], Q_1d, false, is_tensor, 1442 is_at_points, use_3d_slices)); 1443 1444 // ---- Restriction 1445 CeedCallBackend(CeedOperatorBuildKernelRestriction_Hip_gen(code, data, i, dim, NULL, op_output_fields[i], qf_output_fields[i], Q_1d, false, 1446 is_tensor, is_at_points, use_3d_slices)); 1447 } 1448 1449 // Close loop and function 1450 code << " }\n"; 1451 code << "}\n"; 1452 code << "// -----------------------------------------------------------------------------\n\n"; 1453 1454 CeedInt block_sizes[3] = {0, 0, 0}; 1455 CeedInt num_elem; 1456 1457 // Compile 1458 CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem)); 1459 CeedCallBackend(BlockGridCalculate_Hip_gen(is_tensor ? dim : 1, num_elem, data->max_P_1d, Q_1d, block_sizes)); 1460 if (is_at_points) block_sizes[2] = 1; 1461 { 1462 bool is_compile_good = false; 1463 1464 CeedCallBackend(CeedTryCompile_Hip(ceed, code.str().c_str(), &is_compile_good, &data->module, 2, "OP_T_1D", block_sizes[0], "BLOCK_SIZE", 1465 block_sizes[0] * block_sizes[1] * block_sizes[2])); 1466 if (is_compile_good) { 1467 *is_good_build = true; 1468 CeedCallBackend(CeedGetKernel_Hip(ceed, data->module, operator_name.c_str(), &data->op)); 1469 } else { 1470 *is_good_build = false; 1471 data->use_fallback = true; 1472 } 1473 } 1474 CeedCallBackend(CeedOperatorSetSetupDone(op)); 1475 CeedCallBackend(CeedDestroy(&ceed)); 1476 CeedCallBackend(CeedQFunctionDestroy(&qf)); 1477 return CEED_ERROR_SUCCESS; 1478 } 1479 1480 //------------------------------------------------------------------------------ 1481