// Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. // // SPDX-License-Identifier: BSD-2-Clause // // This file is part of CEED: http://github.com/ceed #define CEED_DEBUG_COLOR 12 #include #include #include #include #include #include #include "ceed-cuda-gen.h" #include "../cuda/ceed-cuda-compile.h" #include "../cuda-ref/ceed-cuda-ref.h" #include "../cuda-shared/ceed-cuda-shared.h" //------------------------------------------------------------------------------ // Build singe operator kernel //------------------------------------------------------------------------------ extern "C" int CeedCudaGenOperatorBuild(CeedOperator op) { using std::ostringstream; using std::string; int ierr; bool is_setup_done; ierr = CeedOperatorIsSetupDone(op, &is_setup_done); CeedChkBackend(ierr); if (is_setup_done) return CEED_ERROR_SUCCESS; Ceed ceed; ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); CeedOperator_Cuda_gen *data; ierr = CeedOperatorGetData(op, &data); CeedChkBackend(ierr); CeedQFunction qf; CeedQFunction_Cuda_gen *qf_data; ierr = CeedOperatorGetQFunction(op, &qf); CeedChkBackend(ierr); ierr = CeedQFunctionGetData(qf, &qf_data); CeedChkBackend(ierr); CeedSize lsize; CeedInt Q, P_1d = 0, Q_1d = 0, elem_size, num_input_fields, num_output_fields, num_comp, dim = 1; ierr = CeedOperatorGetNumQuadraturePoints(op, &Q); CeedChkBackend(ierr); Q_1d = Q; CeedOperatorField *op_input_fields, *op_output_fields; ierr = CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields); CeedChkBackend(ierr); CeedQFunctionField *qf_input_fields, *qf_output_fields; ierr = CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields); CeedChkBackend(ierr); CeedEvalMode eval_mode; CeedBasis basis; CeedBasis_Cuda_shared *basis_data; CeedElemRestriction Erestrict; CeedElemRestriction_Cuda *restr_data; // TODO: put in a function? // Check for restriction only identity operator bool is_identity_qf; ierr = CeedQFunctionIsIdentity(qf, &is_identity_qf); CeedChkBackend(ierr); if (is_identity_qf) { CeedEvalMode eval_mode_in, eval_mode_out; ierr = CeedQFunctionFieldGetEvalMode(qf_input_fields[0], &eval_mode_in); CeedChkBackend(ierr); ierr = CeedQFunctionFieldGetEvalMode(qf_output_fields[0], &eval_mode_out); CeedChkBackend(ierr); if (eval_mode_in == CEED_EVAL_NONE && eval_mode_out == CEED_EVAL_NONE) // LCOV_EXCL_START return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement restriction only identity operators"); // LCOV_EXCL_STOP } ostringstream code; // TODO: put in a function? // Add atomicAdd function for old NVidia architectures struct cudaDeviceProp prop; Ceed_Cuda *ceed_data; ierr = CeedGetData(ceed, &ceed_data); CeedChkBackend(ierr); CeedChkBackend(ierr); ierr = cudaGetDeviceProperties(&prop, ceed_data->device_id); CeedChkBackend(ierr); if ((prop.major < 6) && (CEED_SCALAR_TYPE != CEED_SCALAR_FP32)){ char *atomic_add_path, *atomic_add_source; ierr = CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-atomic-add-fallback.h", &atomic_add_path); CeedChkBackend(ierr); CeedDebug256(ceed, 2, "----- Loading Atomic Add Source -----\n"); ierr = CeedLoadSourceToBuffer(ceed, atomic_add_path, &atomic_add_source); CeedChkBackend(ierr); code << atomic_add_source; ierr = CeedFree(&atomic_add_path); CeedChkBackend(ierr); ierr = CeedFree(&atomic_add_source); CeedChkBackend(ierr); } // Load basis source files // TODO: generalize to accept different device functions? { char *tensor_basis_kernel_path, *tensor_basis_kernel_source; ierr = CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-shared-basis-tensor-templates.h", &tensor_basis_kernel_path); CeedChkBackend(ierr); CeedDebug256(ceed, 2, "----- Loading Tensor Basis Kernel Source -----\n"); ierr = CeedLoadSourceToBuffer(ceed, tensor_basis_kernel_path, &tensor_basis_kernel_source); CeedChkBackend(ierr); code << tensor_basis_kernel_source; ierr = CeedFree(&tensor_basis_kernel_path); CeedChkBackend(ierr); ierr = CeedFree(&tensor_basis_kernel_source); CeedChkBackend(ierr); } { char *cuda_gen_template_path, *cuda_gen_template_source; ierr = CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-gen-templates.h", &cuda_gen_template_path); CeedChkBackend(ierr); CeedDebug256(ceed, 2, "----- Loading Cuda-Gen Template Source -----\n"); ierr = CeedLoadSourceToBuffer(ceed, cuda_gen_template_path, &cuda_gen_template_source); CeedChkBackend(ierr); code << cuda_gen_template_source; ierr = CeedFree(&cuda_gen_template_path); CeedChkBackend(ierr); ierr = CeedFree(&cuda_gen_template_source); CeedChkBackend(ierr); } // Get QFunction source and name string q_function_source(qf_data->q_function_source); string q_function_name(qf_data->q_function_name); string operator_name; operator_name = "CeedKernelCudaGenOperator_" + q_function_name; // Find dim, P_1d, Q_1d data->max_P_1d = 0; for (CeedInt i = 0; i < num_input_fields; i++) { ierr = CeedOperatorFieldGetBasis(op_input_fields[i], &basis); CeedChkBackend(ierr); if (basis != CEED_BASIS_COLLOCATED) { ierr = CeedBasisGetData(basis, &basis_data); CeedChkBackend(ierr); ierr = CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode); CeedChkBackend(ierr); // Collect dim, P_1d, and Q_1d ierr = CeedBasisGetDimension(basis, &dim); CeedChkBackend(ierr); bool isTensor; ierr = CeedBasisIsTensor(basis, &isTensor); CeedChkBackend(ierr); if (isTensor) { ierr = CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d); CeedChkBackend(ierr); ierr = CeedBasisGetNumNodes1D(basis, &P_1d); CeedChkBackend(ierr); if (P_1d > data->max_P_1d) data->max_P_1d = P_1d; } else { // LCOV_EXCL_START return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis"); // LCOV_EXCL_STOP } } } // Check output bases for Q_1d, dim as well // The only input basis might be CEED_BASIS_COLLOCATED for (CeedInt i = 0; i < num_output_fields; i++) { ierr = CeedOperatorFieldGetBasis(op_output_fields[i], &basis); CeedChkBackend(ierr); if (basis != CEED_BASIS_COLLOCATED) { ierr = CeedBasisGetData(basis, &basis_data); CeedChkBackend(ierr); ierr = CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode); CeedChkBackend(ierr); // Collect Q_1d ierr = CeedBasisGetDimension(basis, &dim); CeedChkBackend(ierr); bool isTensor; ierr = CeedBasisIsTensor(basis, &isTensor); CeedChkBackend(ierr); if (isTensor) { ierr = CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d); CeedChkBackend(ierr); } else { // LCOV_EXCL_START return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement operators with non-tensor basis"); // LCOV_EXCL_STOP } } } data->dim = dim; data->Q_1d = Q_1d; // Only use 3D collocated gradient parallelization strategy when gradient is computed // TODO: put in a function? bool use_collograd_parallelization = false; if (dim == 3) { bool was_grad_found = false; for (CeedInt i = 0; i < num_input_fields; i++) { ierr = CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode); if (eval_mode == CEED_EVAL_GRAD) { ierr = CeedOperatorFieldGetBasis(op_input_fields[i], &basis); CeedChkBackend(ierr); ierr = CeedBasisGetData(basis, &basis_data); CeedChkBackend(ierr); use_collograd_parallelization = !!basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true); was_grad_found = true; } } for (CeedInt i = 0; i < num_output_fields; i++) { ierr = CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode); if (eval_mode == CEED_EVAL_GRAD) { ierr = CeedOperatorFieldGetBasis(op_output_fields[i], &basis); CeedChkBackend(ierr); ierr = CeedBasisGetData(basis, &basis_data); CeedChkBackend(ierr); use_collograd_parallelization = !!basis_data->d_collo_grad_1d && (was_grad_found ? use_collograd_parallelization : true); was_grad_found = true; } } } // Define CEED_Q_VLA code << "\n#undef CEED_Q_VLA\n"; if (dim != 3 || use_collograd_parallelization) { code << "#define CEED_Q_VLA 1\n\n"; } else { code << "#define CEED_Q_VLA "<1?"*T_1D":"")<<";\n"; code << "\n // -- Input field constants and basis data --\n"; // TODO: Put in a function? //Initialize constants, and matrices B and G for (CeedInt i = 0; i < num_input_fields; i++) { code << " // ---- Input field "<B.inputs[i] = basis_data->d_interp_1d; code << " __shared__ CeedScalar s_B_in_"<(data, B.inputs["<B.inputs[i] = basis_data->d_interp_1d; code << " __shared__ CeedScalar s_B_in_"<(data, B.inputs["<G.inputs[i] = basis_data->d_collo_grad_1d; code << " __shared__ CeedScalar s_G_in_"<(data, G.inputs["<d_collo_grad_1d; data->G.inputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; code << " __shared__ CeedScalar s_G_in_"<(data, G.inputs["<B.outputs[i] = basis_data->d_interp_1d; code << " __shared__ CeedScalar s_B_out_"<(data, B.outputs["<B.outputs[i] = basis_data->d_interp_1d; code << " __shared__ CeedScalar s_B_out_"<(data, B.outputs["<G.outputs[i] = basis_data->d_collo_grad_1d; code << " __shared__ CeedScalar s_G_out_"<(data, G.outputs["<d_collo_grad_1d; data->G.outputs[i] = has_collo_grad ? basis_data->d_collo_grad_1d : basis_data->d_grad_1d; code << " __shared__ CeedScalar s_G_out_"<(data, G.outputs["<indices.inputs[i] = restr_data->d_ind; code << " readDofsOffset"<(data, lsize_in_"<(data, elem, d_u_"<1?"Tensor":"")<(data, r_u_"<1?"Tensor":"")<(data, r_u_"<1?"Tensor":"")<<(dim==3&&Q_1d>=P_1d?"Collocated":"")<(data, r_u_"<W = basis_data->d_q_weight_1d; code << " Weight"<<(dim>1?"Tensor":"")<(data, W, r_t_"<indices.inputs[i] = restr_data->d_ind; code << " readSliceQuadsOffset"<<"3d(data, lsize_in_"<(data, elem, q, d_u_"<(data, q, r_t_"<(data, q, r_qq_"<1?"Tensor":"")<(data, r_tt_"<1?"Tensor":"")<(data, r_tt_"<1?"Tensor":"")<<(dim==3&&Q_1d>=P_1d?"Collocated":"")<(data, r_tt_"<indices.outputs[i] = restr_data->d_ind; code << " writeDofsOffset"<(data, lsize_out_"<(data, elem, r_v_"<module, 1, "T_1D", CeedIntMax(Q_1d, data->max_P_1d)); CeedChkBackend(ierr); ierr = CeedGetKernelCuda(ceed, data->module, operator_name.c_str(), &data->op); CeedChkBackend(ierr); ierr = CeedOperatorSetSetupDone(op); CeedChkBackend(ierr); return CEED_ERROR_SUCCESS; } //------------------------------------------------------------------------------