1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #define CEED_DEBUG_COLOR 12 9 10 #include <ceed.h> 11 #include <ceed/backend.h> 12 #include <ceed/jit-tools.h> 13 #include <cuda_runtime.h> 14 15 #include <iostream> 16 #include <sstream> 17 #include <string> 18 19 #include "../cuda-ref/ceed-cuda-ref.h" 20 #include "../cuda-shared/ceed-cuda-shared.h" 21 #include "../cuda/ceed-cuda-common.h" 22 #include "../cuda/ceed-cuda-compile.h" 23 #include "ceed-cuda-gen.h" 24 25 //------------------------------------------------------------------------------ 26 // Build single operator kernel 27 //------------------------------------------------------------------------------ 28 extern "C" int CeedOperatorBuildKernel_Cuda_gen(CeedOperator op) { 29 using std::ostringstream; 30 using std::string; 31 32 bool is_setup_done, is_identity_qf; 33 struct cudaDeviceProp prop; 34 Ceed ceed; 35 Ceed_Cuda *ceed_data; 36 CeedSize l_size; 37 CeedInt Q, P_1d = 0, Q_1d = 0, elem_size, num_input_fields, num_output_fields, num_comp, dim = 1; 38 CeedEvalMode eval_mode; 39 CeedElemRestriction elem_rstr; 40 CeedElemRestriction_Cuda *rstr_data; 41 CeedBasis basis; 42 CeedBasis_Cuda_shared *basis_data; 43 CeedQFunctionField *qf_input_fields, *qf_output_fields; 44 CeedQFunction_Cuda_gen *qf_data; 45 CeedQFunction qf; 46 CeedOperatorField *op_input_fields, *op_output_fields; 47 CeedOperator_Cuda_gen *data; 48 49 CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done)); 50 if (is_setup_done) return CEED_ERROR_SUCCESS; 51 52 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 53 CeedCallBackend(CeedOperatorGetData(op, &data)); 54 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 55 CeedCallBackend(CeedQFunctionGetData(qf, &qf_data)); 56 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 57 Q_1d = Q; 58 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 59 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 60 61 // TODO: put in a function? 62 // Check for restriction only identity operator 63 CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf)); 64 if (is_identity_qf) { 65 CeedEvalMode eval_mode_in, eval_mode_out; 66 67 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in)); 68 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out)); 69 CeedCheck(eval_mode_in != CEED_EVAL_NONE || eval_mode_out != CEED_EVAL_NONE, ceed, CEED_ERROR_BACKEND, 70 "Backend does not implement restriction only identity operators"); 71 } 72 73 ostringstream code; 74 75 // TODO: put in a function? 76 // Add atomicAdd function for old NVidia architectures 77 CeedCallBackend(CeedGetData(ceed, &ceed_data)); 78 CeedCallBackend(cudaGetDeviceProperties(&prop, ceed_data->device_id)); 79 if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)) { 80 char *atomic_add_path, *atomic_add_source; 81 82 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-atomic-add-fallback.h", &atomic_add_path)); 83 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Atomic Add Source -----\n"); 84 CeedCallBackend(CeedLoadSourceToBuffer(ceed, atomic_add_path, &atomic_add_source)); 85 code << atomic_add_source; 86 CeedCallBackend(CeedFree(&atomic_add_path)); 87 CeedCallBackend(CeedFree(&atomic_add_source)); 88 } 89 90 // Load basis source files 91 // TODO: generalize to accept different device functions? 92 { 93 char *tensor_basis_kernel_path, *tensor_basis_kernel_source; 94 95 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h", &tensor_basis_kernel_path)); 96 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Tensor Basis Kernel Source -----\n"); 97 CeedCallBackend(CeedLoadSourceToBuffer(ceed, tensor_basis_kernel_path, &tensor_basis_kernel_source)); 98 code << tensor_basis_kernel_source; 99 CeedCallBackend(CeedFree(&tensor_basis_kernel_path)); 100 CeedCallBackend(CeedFree(&tensor_basis_kernel_source)); 101 } 102 { 103 char *cuda_gen_template_path, *cuda_gen_template_source; 104 105 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-gen-templates.h", &cuda_gen_template_path)); 106 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Cuda-Gen Template Source -----\n"); 107 CeedCallBackend(CeedLoadSourceToBuffer(ceed, cuda_gen_template_path, &cuda_gen_template_source)); 108 code << cuda_gen_template_source; 109 CeedCallBackend(CeedFree(&cuda_gen_template_path)); 110 CeedCallBackend(CeedFree(&cuda_gen_template_source)); 111 } 112 113 // Get QFunction source and name 114 string q_function_source(qf_data->q_function_source); 115 string q_function_name(qf_data->q_function_name); 116 string operator_name; 117 operator_name = "CeedKernelCudaGenOperator_" + q_function_name; 118 119 // Find dim, P_1d, Q_1d 120 data->max_P_1d = 0; 121 for (CeedInt i = 0; i < num_input_fields; i++) { 122 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 123 if (basis != CEED_BASIS_NONE) { 124 bool is_tensor; 125 126 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 127 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 128 129 // Collect dim, P_1d, and Q_1d 130 CeedCallBackend(CeedBasisGetDimension(basis, &dim)); 131 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 132 CeedCheck(is_tensor, ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis"); 133 CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d)); 134 CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 135 data->max_P_1d = CeedIntMax(data->max_P_1d, P_1d); 136 } 137 } 138 // Check output bases for Q_1d, dim as well 139 // The only input basis might be CEED_BASIS_NONE 140 for (CeedInt i = 0; i < num_output_fields; i++) { 141 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 142 if (basis != CEED_BASIS_NONE) { 143 bool is_tensor; 144 145 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 146 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 147 148 // Collect Q_1d 149 CeedCallBackend(CeedBasisGetDimension(basis, &dim)); 150 CeedCallBackend(CeedBasisIsTensor(basis, &is_tensor)); 151 CeedCheck(is_tensor, ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis"); 152 CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d)); 153 } 154 } 155 data->dim = dim; 156 data->Q_1d = Q_1d; 157 158 // Only use 3D collocated gradient parallelization strategy when gradient is computed 159 // TODO: put in a function? 160 bool use_collograd_parallelization = false; 161 162 if (dim == 3) { 163 bool was_grad_found = false; 164 165 for (CeedInt i = 0; i < num_input_fields; i++) { 166 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 167 if (eval_mode == CEED_EVAL_GRAD) { 168 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 169 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 170 use_collograd_parallelization = basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true); 171 was_grad_found = true; 172 } 173 } 174 for (CeedInt i = 0; i < num_output_fields; i++) { 175 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 176 if (eval_mode == CEED_EVAL_GRAD) { 177 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 178 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 179 use_collograd_parallelization = basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true); 180 was_grad_found = true; 181 } 182 } 183 } 184 185 // Define CEED_Q_VLA 186 code << "\n#undef CEED_Q_VLA\n"; 187 if (dim != 3 || use_collograd_parallelization) { 188 code << "#define CEED_Q_VLA 1\n\n"; 189 } else { 190 code << "#define CEED_Q_VLA " << Q_1d << "\n\n"; 191 } 192 193 code << q_function_source; 194 195 // Setup 196 code << "\n// -----------------------------------------------------------------------------\n"; 197 code << "\nextern \"C\" __global__ void " << operator_name 198 << "(CeedInt num_elem, void* ctx, FieldsInt_Cuda indices, Fields_Cuda fields, Fields_Cuda B, Fields_Cuda G, CeedScalar* W) {\n"; 199 for (CeedInt i = 0; i < num_input_fields; i++) { 200 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 201 if (eval_mode != CEED_EVAL_WEIGHT) { // Skip CEED_EVAL_WEIGHT 202 code << " const CeedScalar* d_u_" << i << " = fields.inputs[" << i << "];\n"; 203 } 204 } 205 206 for (CeedInt i = 0; i < num_output_fields; i++) { 207 code << " CeedScalar* d_v_" << i << " = fields.outputs[" << i << "];\n"; 208 } 209 210 code << " const CeedInt dim = " << dim << ";\n"; 211 code << " const CeedInt Q_1d = " << Q_1d << ";\n"; 212 213 code << " extern __shared__ CeedScalar slice[];\n"; 214 // TODO put in a function? InitSharedData_Cuda? 215 code << " SharedData_Cuda data;\n"; 216 code << " data.t_id_x = threadIdx.x;\n"; 217 code << " data.t_id_y = threadIdx.y;\n"; 218 code << " data.t_id_z = threadIdx.z;\n"; 219 code << " data.t_id = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n"; 220 code << " data.slice = slice+data.t_id_z*T_1D" << (dim > 1 ? "*T_1D" : "") << ";\n"; 221 222 code << "\n // -- Input field constants and basis data --\n"; 223 // TODO: Put in a function? 224 // Initialize constants, and matrices B and G 225 for (CeedInt i = 0; i < num_input_fields; i++) { 226 code << " // ---- Input field " << i << " ----\n"; 227 // Get elem_size, eval_mode, num_comp 228 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 229 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 230 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 231 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 232 233 // Set field constants 234 if (eval_mode != CEED_EVAL_WEIGHT) { 235 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 236 if (basis != CEED_BASIS_NONE) { 237 CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 238 code << " const CeedInt P_in_" << i << " = " << P_1d << ";\n"; 239 } else { 240 code << " const CeedInt P_in_" << i << " = " << Q_1d << ";\n"; 241 } 242 code << " const CeedInt num_comp_in_" << i << " = " << num_comp << ";\n"; 243 } 244 245 // Load basis data 246 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 247 switch (eval_mode) { 248 case CEED_EVAL_NONE: 249 break; 250 case CEED_EVAL_INTERP: 251 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 252 data->B.inputs[i] = basis_data->d_interp_1d; 253 code << " __shared__ CeedScalar s_B_in_" << i << "[" << P_1d * Q_1d << "];\n"; 254 code << " loadMatrix<P_in_" << i << ",Q_1d>(data, B.inputs[" << i << "], s_B_in_" << i << ");\n"; 255 break; 256 case CEED_EVAL_GRAD: 257 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 258 data->B.inputs[i] = basis_data->d_interp_1d; 259 code << " __shared__ CeedScalar s_B_in_" << i << "[" << P_1d * Q_1d << "];\n"; 260 code << " loadMatrix<P_in_" << i << ",Q_1d>(data, B.inputs[" << i << "], s_B_in_" << i << ");\n"; 261 if (use_collograd_parallelization) { 262 data->G.inputs[i] = basis_data->d_collo_grad_1d; 263 code << " __shared__ CeedScalar s_G_in_" << i << "[" << Q_1d * Q_1d << "];\n"; 264 code << " loadMatrix<Q_1d,Q_1d>(data, G.inputs[" << i << "], s_G_in_" << i << ");\n"; 265 } else { 266 bool has_collo_grad = basis_data->d_collo_grad_1d; 267 data->G.inputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 268 code << " __shared__ CeedScalar s_G_in_" << i << "[" << Q_1d * (has_collo_grad ? Q_1d : P_1d) << "];\n"; 269 code << " loadMatrix<" << (has_collo_grad ? "Q_1d" : ("P_in_" + std::to_string(i))) << ",Q_1d>(data, G.inputs[" << i << "], s_G_in_" << i 270 << ");\n"; 271 } 272 break; 273 case CEED_EVAL_WEIGHT: 274 break; // No action 275 case CEED_EVAL_DIV: 276 break; // TODO: Not implemented 277 case CEED_EVAL_CURL: 278 break; // TODO: Not implemented 279 } 280 } 281 282 code << "\n // -- Output field constants and basis data --\n"; 283 for (CeedInt i = 0; i < num_output_fields; i++) { 284 code << " // ---- Output field " << i << " ----\n"; 285 // Get elem_size, eval_mode, num_comp 286 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 287 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 288 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 289 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 290 291 // Set field constants 292 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 293 if (basis != CEED_BASIS_NONE) { 294 CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 295 code << " const CeedInt P_out_" << i << " = " << P_1d << ";\n"; 296 } else { 297 code << " const CeedInt P_out_" << i << " = " << Q_1d << ";\n"; 298 } 299 code << " const CeedInt num_comp_out_" << i << " = " << num_comp << ";\n"; 300 301 // Load basis data 302 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 303 switch (eval_mode) { 304 case CEED_EVAL_NONE: 305 break; // No action 306 case CEED_EVAL_INTERP: 307 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 308 data->B.outputs[i] = basis_data->d_interp_1d; 309 code << " __shared__ CeedScalar s_B_out_" << i << "[" << P_1d * Q_1d << "];\n"; 310 code << " loadMatrix<P_out_" << i << ",Q_1d>(data, B.outputs[" << i << "], s_B_out_" << i << ");\n"; 311 break; 312 case CEED_EVAL_GRAD: 313 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 314 data->B.outputs[i] = basis_data->d_interp_1d; 315 code << " __shared__ CeedScalar s_B_out_" << i << "[" << P_1d * Q_1d << "];\n"; 316 code << " loadMatrix<P_out_" << i << ",Q_1d>(data, B.outputs[" << i << "], s_B_out_" << i << ");\n"; 317 if (use_collograd_parallelization) { 318 data->G.outputs[i] = basis_data->d_collo_grad_1d; 319 code << " __shared__ CeedScalar s_G_out_" << i << "[" << Q_1d * Q_1d << "];\n"; 320 code << " loadMatrix<Q_1d,Q_1d>(data, G.outputs[" << i << "], s_G_out_" << i << ");\n"; 321 } else { 322 bool has_collo_grad = basis_data->d_collo_grad_1d; 323 data->G.outputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; 324 code << " __shared__ CeedScalar s_G_out_" << i << "[" << Q_1d * (has_collo_grad ? Q_1d : P_1d) << "];\n"; 325 code << " loadMatrix<" << (has_collo_grad ? "Q_1d" : ("P_out_" + std::to_string(i))) << ",Q_1d>(data, G.outputs[" << i << "], s_G_out_" 326 << i << ");\n"; 327 } 328 break; 329 // LCOV_EXCL_START 330 case CEED_EVAL_WEIGHT: { 331 Ceed ceed; 332 333 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 334 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode"); 335 break; // Should not occur 336 } 337 case CEED_EVAL_DIV: 338 case CEED_EVAL_CURL: { 339 Ceed ceed; 340 341 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 342 return CeedError(ceed, CEED_ERROR_BACKEND, "%s not supported", CeedEvalModes[eval_mode]); 343 break; // Should not occur 344 } 345 // LCOV_EXCL_STOP 346 } 347 } 348 code << "\n // -- Element loop --\n"; 349 code << " __syncthreads();\n"; 350 code << " for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < num_elem; elem += gridDim.x*blockDim.z) {\n"; 351 // Input basis apply if needed 352 // Generate the correct eval mode code for each input 353 code << " // -- Input field restrictions and basis actions --\n"; 354 for (CeedInt i = 0; i < num_input_fields; i++) { 355 code << " // ---- Input field " << i << " ----\n"; 356 // Get elem_size, eval_mode, num_comp 357 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 358 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 359 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 360 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 361 362 // TODO: put in a function? 363 // Restriction 364 if (eval_mode != CEED_EVAL_WEIGHT && !((eval_mode == CEED_EVAL_NONE) && use_collograd_parallelization)) { 365 code << " CeedScalar r_u_" << i << "[num_comp_in_" << i << "*P_in_" << i << "];\n"; 366 367 bool is_strided; 368 369 CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided)); 370 if (!is_strided) { 371 CeedInt comp_stride; 372 373 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 374 code << " const CeedInt l_size_in_" << i << " = " << l_size << ";\n"; 375 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 376 code << " // CompStride: " << comp_stride << "\n"; 377 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 378 data->indices.inputs[i] = rstr_data->d_ind; 379 code << " readDofsOffset" << dim << "d<num_comp_in_" << i << ", " << comp_stride << ", P_in_" << i << ">(data, l_size_in_" << i 380 << ", elem, indices.inputs[" << i << "], d_u_" << i << ", r_u_" << i << ");\n"; 381 } else { 382 bool has_backend_strides; 383 CeedInt num_elem; 384 385 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 386 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 387 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 388 389 if (!has_backend_strides) { 390 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, &strides)); 391 } 392 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 393 code << " readDofsStrided" << dim << "d<num_comp_in_" << i << ",P_in_" << i << "," << strides[0] << "," << strides[1] << "," << strides[2] 394 << ">(data, elem, d_u_" << i << ", r_u_" << i << ");\n"; 395 } 396 } 397 398 // TODO: put in a function? 399 // Basis action 400 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 401 switch (eval_mode) { 402 case CEED_EVAL_NONE: 403 if (!use_collograd_parallelization) { 404 code << " CeedScalar* r_t_" << i << " = r_u_" << i << ";\n"; 405 } 406 break; 407 case CEED_EVAL_INTERP: 408 code << " CeedScalar r_t_" << i << "[num_comp_in_" << i << "*Q_1d];\n"; 409 code << " Interp" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_in_" << i << ",P_in_" << i << ",Q_1d>(data, r_u_" << i << ", s_B_in_" 410 << i << ", r_t_" << i << ");\n"; 411 break; 412 case CEED_EVAL_GRAD: 413 if (use_collograd_parallelization) { 414 code << " CeedScalar r_t_" << i << "[num_comp_in_" << i << "*Q_1d];\n"; 415 code << " Interp" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_in_" << i << ",P_in_" << i << ",Q_1d>(data, r_u_" << i 416 << ", s_B_in_" << i << ", r_t_" << i << ");\n"; 417 } else { 418 CeedInt P_1d; 419 420 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 421 CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 422 code << " CeedScalar r_t_" << i << "[num_comp_in_" << i << "*dim*Q_1d];\n"; 423 code << " Grad" << (dim > 1 ? "Tensor" : "") << (dim == 3 && Q_1d >= P_1d ? "Collocated" : "") << dim << "d<num_comp_in_" << i 424 << ",P_in_" << i << ",Q_1d>(data, r_u_" << i << ", s_B_in_" << i << ", s_G_in_" << i << ", r_t_" << i << ");\n"; 425 } 426 break; 427 case CEED_EVAL_WEIGHT: 428 code << " CeedScalar r_t_" << i << "[Q_1d];\n"; 429 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 430 CeedCallBackend(CeedBasisGetData(basis, &basis_data)); 431 data->W = basis_data->d_q_weight_1d; 432 code << " Weight" << (dim > 1 ? "Tensor" : "") << dim << "d<Q_1d>(data, W, r_t_" << i << ");\n"; 433 break; // No action 434 case CEED_EVAL_DIV: 435 break; // TODO: Not implemented 436 case CEED_EVAL_CURL: 437 break; // TODO: Not implemented 438 } 439 } 440 441 // TODO: put in a function + separate collograd logic 442 // Q function 443 code << "\n // -- Output field setup --\n"; 444 for (CeedInt i = 0; i < num_output_fields; i++) { 445 code << "\n // ---- Output field " << i << " ----\n"; 446 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 447 if (eval_mode == CEED_EVAL_GRAD) { 448 if (use_collograd_parallelization) { 449 // Accumulator for gradient slices 450 code << " CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*Q_1d];\n"; 451 code << " for (CeedInt i = 0; i < num_comp_out_" << i << "; i++) {\n"; 452 code << " for (CeedInt j = 0; j < Q_1d; ++j) {\n"; 453 code << " r_tt_" << i << "[j + i*Q_1d] = 0.0;\n"; 454 code << " }\n"; 455 code << " }\n"; 456 } else { 457 code << " CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*dim*Q_1d];\n"; 458 } 459 } 460 if (eval_mode == CEED_EVAL_NONE || eval_mode == CEED_EVAL_INTERP) { 461 code << " CeedScalar r_tt_" << i << "[num_comp_out_" << i << "*Q_1d];\n"; 462 } 463 } 464 // We treat quadrature points per slice in 3d to save registers 465 if (use_collograd_parallelization) { 466 code << "\n // Note: Using planes of 3D elements\n"; 467 code << "#pragma unroll\n"; 468 code << " for (CeedInt q = 0; q < Q_1d; q++) {\n"; 469 code << " // -- Input fields --\n"; 470 for (CeedInt i = 0; i < num_input_fields; i++) { 471 code << " // ---- Input field " << i << " ----\n"; 472 // Get elem_size, eval_mode, num_comp 473 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 474 // Basis action 475 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 476 switch (eval_mode) { 477 case CEED_EVAL_NONE: 478 bool is_strided; 479 480 code << " CeedScalar r_q_" << i << "[num_comp_in_" << i << "];\n"; 481 482 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 483 CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided)); 484 if (!is_strided) { 485 CeedInt comp_stride; 486 487 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 488 code << " const CeedInt l_size_in_" << i << " = " << l_size << ";\n"; 489 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 490 code << " // CompStride: " << comp_stride << "\n"; 491 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 492 data->indices.inputs[i] = rstr_data->d_ind; 493 code << " readSliceQuadsOffset" 494 << "3d<num_comp_in_" << i << ", " << comp_stride << ", Q_1d>(data, l_size_in_" << i << ", elem, q, indices.inputs[" << i << "], d_u_" 495 << i << ", r_q_" << i << ");\n"; 496 } else { 497 bool has_backend_strides; 498 CeedInt num_elem; 499 500 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 501 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 502 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 503 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 504 505 if (!has_backend_strides) { 506 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, &strides)); 507 } 508 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 509 code << " readSliceQuadsStrided" 510 << "3d<num_comp_in_" << i 511 << ",Q_1d" 512 "," 513 << strides[0] << "," << strides[1] << "," << strides[2] << ">(data, elem, q, d_u_" << i << ", r_q_" << i << ");\n"; 514 } 515 break; 516 case CEED_EVAL_INTERP: 517 code << " CeedScalar r_q_" << i << "[num_comp_in_" << i << "];\n"; 518 code << " for (CeedInt j = 0; j < num_comp_in_" << i << " ; ++j) {\n"; 519 code << " r_q_" << i << "[j] = r_t_" << i << "[q + j*Q_1d];\n"; 520 code << " }\n"; 521 break; 522 case CEED_EVAL_GRAD: 523 code << " CeedScalar r_q_" << i << "[num_comp_in_" << i << "*dim];\n"; 524 code << " gradCollo3d<num_comp_in_" << i << ",Q_1d>(data, q, r_t_" << i << ", s_G_in_" << i << ", r_q_" << i << ");\n"; 525 break; 526 case CEED_EVAL_WEIGHT: 527 code << " CeedScalar r_q_" << i << "[1];\n"; 528 code << " r_q_" << i << "[0] = r_t_" << i << "[q];\n"; 529 break; // No action 530 case CEED_EVAL_DIV: 531 break; // TODO: Not implemented 532 case CEED_EVAL_CURL: 533 break; // TODO: Not implemented 534 } 535 } 536 code << "\n // -- Output fields --\n"; 537 for (CeedInt i = 0; i < num_output_fields; i++) { 538 code << " // ---- Output field " << i << " ----\n"; 539 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 540 // Basis action 541 switch (eval_mode) { 542 case CEED_EVAL_NONE: 543 code << " CeedScalar r_qq_" << i << "[num_comp_out_" << i << "];\n"; 544 break; // No action 545 case CEED_EVAL_INTERP: 546 code << " CeedScalar r_qq_" << i << "[num_comp_out_" << i << "];\n"; 547 break; 548 case CEED_EVAL_GRAD: 549 code << " CeedScalar r_qq_" << i << "[num_comp_out_" << i << "*dim];\n"; 550 break; 551 case CEED_EVAL_WEIGHT: 552 break; // Should not occur 553 case CEED_EVAL_DIV: 554 break; // TODO: Not implemented 555 case CEED_EVAL_CURL: 556 break; // TODO: Not implemented 557 } 558 } 559 } else { 560 code << "\n // Note: Using full elements\n"; 561 code << " // -- Input fields --\n"; 562 for (CeedInt i = 0; i < num_input_fields; i++) { 563 code << " // ---- Input field " << i << " ----\n"; 564 code << " CeedScalar* r_q_" << i << " = r_t_" << i << ";\n"; 565 } 566 code << " // -- Output fields --\n"; 567 for (CeedInt i = 0; i < num_output_fields; i++) { 568 code << " // ---- Output field " << i << " ----\n"; 569 code << " CeedScalar* r_qq_" << i << " = r_tt_" << i << ";\n"; 570 } 571 } 572 code << "\n // -- QFunction Inputs and outputs --\n"; 573 code << " CeedScalar* in[" << num_input_fields << "];\n"; 574 for (CeedInt i = 0; i < num_input_fields; i++) { 575 code << " // ---- Input field " << i << " ----\n"; 576 code << " in[" << i << "] = r_q_" << i << ";\n"; 577 } 578 code << " CeedScalar* out[" << num_output_fields << "];\n"; 579 for (CeedInt i = 0; i < num_output_fields; i++) { 580 code << " // ---- Output field " << i << " ----\n"; 581 code << " out[" << i << "] = r_qq_" << i << ";\n"; 582 } 583 code << "\n // -- Apply QFunction --\n"; 584 code << " " << q_function_name << "(ctx, "; 585 if (dim != 3 || use_collograd_parallelization) { 586 code << "1"; 587 } else { 588 code << "Q_1d"; 589 } 590 code << ", in, out);\n"; 591 if (use_collograd_parallelization) { 592 code << " // -- Output fields --\n"; 593 for (CeedInt i = 0; i < num_output_fields; i++) { 594 code << " // ---- Output field " << i << " ----\n"; 595 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 596 // Basis action 597 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 598 switch (eval_mode) { 599 case CEED_EVAL_NONE: 600 code << " for (CeedInt j = 0; j < num_comp_out_" << i << " ; ++j) {\n"; 601 code << " r_tt_" << i << "[q + j*Q_1d] = r_qq_" << i << "[j];\n"; 602 code << " }\n"; 603 break; // No action 604 case CEED_EVAL_INTERP: 605 code << " for (CeedInt j = 0; j < num_comp_out_" << i << " ; ++j) {\n"; 606 code << " r_tt_" << i << "[q + j*Q_1d] = r_qq_" << i << "[j];\n"; 607 code << " }\n"; 608 break; 609 case CEED_EVAL_GRAD: 610 code << " gradColloTranspose3d<num_comp_out_" << i << ",Q_1d>(data, q, r_qq_" << i << ", s_G_out_" << i << ", r_tt_" << i << ");\n"; 611 break; 612 case CEED_EVAL_WEIGHT: 613 break; // Should not occur 614 case CEED_EVAL_DIV: 615 break; // TODO: Not implemented 616 case CEED_EVAL_CURL: 617 break; // TODO: Not implemented 618 } 619 } 620 code << " }\n"; 621 } 622 623 // Output basis apply if needed 624 // Generate the correct eval mode code for each output 625 code << "\n // -- Output field basis action and restrictions --\n"; 626 for (CeedInt i = 0; i < num_output_fields; i++) { 627 code << " // ---- Output field " << i << " ----\n"; 628 // Get elem_size, eval_mode, num_comp 629 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 630 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 631 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 632 CeedCallBackend(CeedElemRestrictionGetNumComponents(elem_rstr, &num_comp)); 633 // TODO put in a function 634 // Basis action 635 code << " // EvalMode: " << CeedEvalModes[eval_mode] << "\n"; 636 switch (eval_mode) { 637 case CEED_EVAL_NONE: 638 code << " CeedScalar* r_v_" << i << " = r_tt_" << i << ";\n"; 639 break; // No action 640 case CEED_EVAL_INTERP: 641 code << " CeedScalar r_v_" << i << "[num_comp_out_" << i << "*P_out_" << i << "];\n"; 642 code << " InterpTranspose" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_out_" << i << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i 643 << ", s_B_out_" << i << ", r_v_" << i << ");\n"; 644 break; 645 case CEED_EVAL_GRAD: 646 code << " CeedScalar r_v_" << i << "[num_comp_out_" << i << "*P_out_" << i << "];\n"; 647 if (use_collograd_parallelization) { 648 code << " InterpTranspose" << (dim > 1 ? "Tensor" : "") << dim << "d<num_comp_out_" << i << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i 649 << ", s_B_out_" << i << ", r_v_" << i << ");\n"; 650 } else { 651 CeedInt P_1d; 652 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 653 CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 654 code << " GradTranspose" << (dim > 1 ? "Tensor" : "") << (dim == 3 && Q_1d >= P_1d ? "Collocated" : "") << dim << "d<num_comp_out_" << i 655 << ",P_out_" << i << ",Q_1d>(data, r_tt_" << i << ", s_B_out_" << i << ", s_G_out_" << i << ", r_v_" << i << ");\n"; 656 } 657 break; 658 // LCOV_EXCL_START 659 case CEED_EVAL_WEIGHT: { 660 Ceed ceed; 661 662 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 663 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode"); 664 break; // Should not occur 665 } 666 case CEED_EVAL_DIV: 667 case CEED_EVAL_CURL: { 668 Ceed ceed; 669 670 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 671 return CeedError(ceed, CEED_ERROR_BACKEND, "%s not supported", CeedEvalModes[eval_mode]); 672 break; // Should not occur 673 } 674 // LCOV_EXCL_STOP 675 } 676 // TODO put in a function 677 // Restriction 678 bool is_strided; 679 CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided)); 680 if (!is_strided) { 681 CeedInt comp_stride; 682 683 CeedCallBackend(CeedElemRestrictionGetLVectorSize(elem_rstr, &l_size)); 684 code << " const CeedInt l_size_out_" << i << " = " << l_size << ";\n"; 685 CeedCallBackend(CeedElemRestrictionGetCompStride(elem_rstr, &comp_stride)); 686 code << " // CompStride: " << comp_stride << "\n"; 687 CeedCallBackend(CeedElemRestrictionGetData(elem_rstr, &rstr_data)); 688 data->indices.outputs[i] = rstr_data->d_ind; 689 code << " writeDofsOffset" << dim << "d<num_comp_out_" << i << ", " << comp_stride << ", P_out_" << i << ">(data, l_size_out_" << i 690 << ", elem, indices.outputs[" << i << "], r_v_" << i << ", d_v_" << i << ");\n"; 691 } else { 692 bool has_backend_strides; 693 CeedInt num_elem; 694 695 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &has_backend_strides)); 696 CeedCallBackend(CeedElemRestrictionGetNumElements(elem_rstr, &num_elem)); 697 CeedInt strides[3] = {1, elem_size * num_elem, elem_size}; 698 699 if (!has_backend_strides) { 700 CeedCallBackend(CeedElemRestrictionGetStrides(elem_rstr, &strides)); 701 } 702 code << " // Strides: {" << strides[0] << ", " << strides[1] << ", " << strides[2] << "}\n"; 703 code << " writeDofsStrided" << dim << "d<num_comp_out_" << i << ",P_out_" << i << "," << strides[0] << "," << strides[1] << "," << strides[2] 704 << ">(data, elem, r_v_" << i << ", d_v_" << i << ");\n"; 705 } 706 } 707 708 code << " }\n"; 709 code << "}\n"; 710 code << "// -----------------------------------------------------------------------------\n\n"; 711 712 // View kernel for debugging 713 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "Generated Operator Kernels:\n"); 714 CeedDebug(ceed, code.str().c_str()); 715 716 CeedCallBackend(CeedCompile_Cuda(ceed, code.str().c_str(), &data->module, 1, "T_1D", CeedIntMax(Q_1d, data->max_P_1d))); 717 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, operator_name.c_str(), &data->op)); 718 719 CeedCallBackend(CeedOperatorSetSetupDone(op)); 720 return CEED_ERROR_SUCCESS; 721 } 722 723 //------------------------------------------------------------------------------ 724