1 // Copyright (c) 2017-2025, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #include "ceed-cuda-compile.h" 9 10 #include <ceed.h> 11 #include <ceed/backend.h> 12 #include <ceed/jit-tools.h> 13 #include <cuda_runtime.h> 14 #include <nvrtc.h> 15 #include <stdarg.h> 16 #include <string.h> 17 18 #include <sstream> 19 20 #include "ceed-cuda-common.h" 21 22 #define CeedChk_Nvrtc(ceed, x) \ 23 do { \ 24 nvrtcResult result = static_cast<nvrtcResult>(x); \ 25 if (result != NVRTC_SUCCESS) return CeedError((ceed), CEED_ERROR_BACKEND, nvrtcGetErrorString(result)); \ 26 } while (0) 27 28 #define CeedCallNvrtc(ceed, ...) \ 29 do { \ 30 int ierr_q_ = __VA_ARGS__; \ 31 CeedChk_Nvrtc(ceed, ierr_q_); \ 32 } while (0) 33 34 //------------------------------------------------------------------------------ 35 // Compile CUDA kernel 36 //------------------------------------------------------------------------------ 37 static int CeedCompileCore_Cuda(Ceed ceed, const char *source, const bool throw_error, bool *is_compile_good, CUmodule *module, 38 const CeedInt num_defines, va_list args) { 39 size_t ptx_size; 40 char *ptx; 41 const int num_opts = 4; 42 CeedInt num_jit_source_dirs = 0, num_jit_defines = 0; 43 const char **opts; 44 nvrtcProgram prog; 45 struct cudaDeviceProp prop; 46 Ceed_Cuda *ceed_data; 47 48 cudaFree(0); // Make sure a Context exists for nvrtc 49 50 std::ostringstream code; 51 52 // Get kernel specific options, such as kernel constants 53 if (num_defines > 0) { 54 char *name; 55 int val; 56 57 for (int i = 0; i < num_defines; i++) { 58 name = va_arg(args, char *); 59 val = va_arg(args, int); 60 code << "#define " << name << " " << val << "\n"; 61 } 62 } 63 64 // Standard libCEED definitions for CUDA backends 65 code << "#include <ceed/jit-source/cuda/cuda-jit.h>\n\n"; 66 67 // Non-macro options 68 CeedCallBackend(CeedCalloc(num_opts, &opts)); 69 opts[0] = "-default-device"; 70 CeedCallBackend(CeedGetData(ceed, &ceed_data)); 71 CeedCallCuda(ceed, cudaGetDeviceProperties(&prop, ceed_data->device_id)); 72 std::string arch_arg = 73 #if CUDA_VERSION >= 11010 74 // NVRTC used to support only virtual architectures through the option 75 // -arch, since it was only emitting PTX. It will now support actual 76 // architectures as well to emit SASS. 77 // https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#dynamic-code-generation 78 "-arch=sm_" 79 #else 80 "-arch=compute_" 81 #endif 82 + std::to_string(prop.major) + std::to_string(prop.minor); 83 opts[1] = arch_arg.c_str(); 84 opts[2] = "-Dint32_t=int"; 85 opts[3] = "-DCEED_RUNNING_JIT_PASS=1"; 86 // Additional include dirs 87 { 88 const char **jit_source_dirs; 89 90 CeedCallBackend(CeedGetJitSourceRoots(ceed, &num_jit_source_dirs, &jit_source_dirs)); 91 CeedCallBackend(CeedRealloc(num_opts + num_jit_source_dirs, &opts)); 92 for (CeedInt i = 0; i < num_jit_source_dirs; i++) { 93 std::ostringstream include_dir_arg; 94 95 include_dir_arg << "-I" << jit_source_dirs[i]; 96 CeedCallBackend(CeedStringAllocCopy(include_dir_arg.str().c_str(), (char **)&opts[num_opts + i])); 97 } 98 CeedCallBackend(CeedRestoreJitSourceRoots(ceed, &jit_source_dirs)); 99 } 100 // User defines 101 { 102 const char **jit_defines; 103 104 CeedCallBackend(CeedGetJitDefines(ceed, &num_jit_defines, &jit_defines)); 105 CeedCallBackend(CeedRealloc(num_opts + num_jit_source_dirs + num_jit_defines, &opts)); 106 for (CeedInt i = 0; i < num_jit_defines; i++) { 107 std::ostringstream define_arg; 108 109 define_arg << "-D" << jit_defines[i]; 110 CeedCallBackend(CeedStringAllocCopy(define_arg.str().c_str(), (char **)&opts[num_opts + num_jit_source_dirs + i])); 111 } 112 CeedCallBackend(CeedRestoreJitDefines(ceed, &jit_defines)); 113 } 114 115 // Add string source argument provided in call 116 code << source; 117 118 // Create Program 119 CeedCallNvrtc(ceed, nvrtcCreateProgram(&prog, code.str().c_str(), NULL, 0, NULL, NULL)); 120 121 // Compile kernel 122 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- ATTEMPTING TO COMPILE JIT SOURCE ----------\n"); 123 CeedDebug(ceed, "Source:\n%s\n", code.str().c_str()); 124 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- END OF JIT SOURCE ----------\n"); 125 if (CeedDebugFlag(ceed)) { 126 // LCOV_EXCL_START 127 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- JiT COMPILER OPTIONS ----------\n"); 128 for (CeedInt i = 0; i < num_opts + num_jit_source_dirs + num_jit_defines; i++) { 129 CeedDebug(ceed, "Option %d: %s", i, opts[i]); 130 } 131 CeedDebug(ceed, ""); 132 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- END OF JiT COMPILER OPTIONS ----------\n"); 133 // LCOV_EXCL_STOP 134 } 135 nvrtcResult result = nvrtcCompileProgram(prog, num_opts + num_jit_source_dirs + num_jit_defines, opts); 136 137 for (CeedInt i = 0; i < num_jit_source_dirs; i++) { 138 CeedCallBackend(CeedFree(&opts[num_opts + i])); 139 } 140 for (CeedInt i = 0; i < num_jit_defines; i++) { 141 CeedCallBackend(CeedFree(&opts[num_opts + num_jit_source_dirs + i])); 142 } 143 CeedCallBackend(CeedFree(&opts)); 144 *is_compile_good = result == NVRTC_SUCCESS; 145 if (!*is_compile_good) { 146 char *log; 147 size_t log_size; 148 149 CeedCallNvrtc(ceed, nvrtcGetProgramLogSize(prog, &log_size)); 150 CeedCallBackend(CeedMalloc(log_size, &log)); 151 CeedCallNvrtc(ceed, nvrtcGetProgramLog(prog, log)); 152 if (throw_error) { 153 return CeedError(ceed, CEED_ERROR_BACKEND, "%s\n%s", nvrtcGetErrorString(result), log); 154 } else { 155 // LCOV_EXCL_START 156 CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- COMPILE ERROR DETECTED ----------\n"); 157 CeedDebug(ceed, "Error: %s\nCompile log:\n%s\n", nvrtcGetErrorString(result), log); 158 CeedDebug256(ceed, CEED_DEBUG_COLOR_WARNING, "---------- BACKEND MAY FALLBACK ----------\n"); 159 CeedCallBackend(CeedFree(&log)); 160 CeedCallNvrtc(ceed, nvrtcDestroyProgram(&prog)); 161 return CEED_ERROR_SUCCESS; 162 // LCOV_EXCL_STOP 163 } 164 } 165 166 #if CUDA_VERSION >= 11010 167 CeedCallNvrtc(ceed, nvrtcGetCUBINSize(prog, &ptx_size)); 168 CeedCallBackend(CeedMalloc(ptx_size, &ptx)); 169 CeedCallNvrtc(ceed, nvrtcGetCUBIN(prog, ptx)); 170 #else 171 CeedCallNvrtc(ceed, nvrtcGetPTXSize(prog, &ptx_size)); 172 CeedCallBackend(CeedMalloc(ptx_size, &ptx)); 173 CeedCallNvrtc(ceed, nvrtcGetPTX(prog, ptx)); 174 #endif 175 CeedCallNvrtc(ceed, nvrtcDestroyProgram(&prog)); 176 177 CeedCallCuda(ceed, cuModuleLoadData(module, ptx)); 178 CeedCallBackend(CeedFree(&ptx)); 179 return CEED_ERROR_SUCCESS; 180 } 181 182 int CeedCompile_Cuda(Ceed ceed, const char *source, CUmodule *module, const CeedInt num_defines, ...) { 183 bool is_compile_good = true; 184 va_list args; 185 186 va_start(args, num_defines); 187 const CeedInt ierr = CeedCompileCore_Cuda(ceed, source, true, &is_compile_good, module, num_defines, args); 188 189 va_end(args); 190 CeedCallBackend(ierr); 191 return CEED_ERROR_SUCCESS; 192 } 193 194 int CeedTryCompile_Cuda(Ceed ceed, const char *source, bool *is_compile_good, CUmodule *module, const CeedInt num_defines, ...) { 195 va_list args; 196 197 va_start(args, num_defines); 198 const CeedInt ierr = CeedCompileCore_Cuda(ceed, source, false, is_compile_good, module, num_defines, args); 199 200 va_end(args); 201 CeedCallBackend(ierr); 202 return CEED_ERROR_SUCCESS; 203 } 204 205 //------------------------------------------------------------------------------ 206 // Get CUDA kernel 207 //------------------------------------------------------------------------------ 208 int CeedGetKernel_Cuda(Ceed ceed, CUmodule module, const char *name, CUfunction *kernel) { 209 CeedCallCuda(ceed, cuModuleGetFunction(kernel, module, name)); 210 return CEED_ERROR_SUCCESS; 211 } 212 213 //------------------------------------------------------------------------------ 214 // Run CUDA kernel with block size selected automatically based on the kernel 215 // (which may use enough registers to require a smaller block size than the 216 // hardware is capable) 217 //------------------------------------------------------------------------------ 218 int CeedRunKernelAutoblockCuda(Ceed ceed, CUfunction kernel, size_t points, void **args) { 219 int min_grid_size, max_block_size; 220 221 CeedCallCuda(ceed, cuOccupancyMaxPotentialBlockSize(&min_grid_size, &max_block_size, kernel, NULL, 0, 0x10000)); 222 CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, CeedDivUpInt(points, max_block_size), max_block_size, args)); 223 return CEED_ERROR_SUCCESS; 224 } 225 226 //------------------------------------------------------------------------------ 227 // Run CUDA kernel 228 //------------------------------------------------------------------------------ 229 int CeedRunKernel_Cuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size, void **args) { 230 CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, kernel, NULL, grid_size, block_size, 1, 1, 0, args)); 231 return CEED_ERROR_SUCCESS; 232 } 233 234 //------------------------------------------------------------------------------ 235 // Run CUDA kernel for spatial dimension 236 //------------------------------------------------------------------------------ 237 int CeedRunKernelDim_Cuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size_x, const int block_size_y, const int block_size_z, 238 void **args) { 239 CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, kernel, NULL, grid_size, block_size_x, block_size_y, block_size_z, 0, args)); 240 return CEED_ERROR_SUCCESS; 241 } 242 243 //------------------------------------------------------------------------------ 244 // Run CUDA kernel for spatial dimension with shared memory 245 //------------------------------------------------------------------------------ 246 static int CeedRunKernelDimSharedCore_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x, 247 const int block_size_y, const int block_size_z, const int shared_mem_size, const bool throw_error, 248 bool *is_good_run, void **args) { 249 #if CUDA_VERSION >= 9000 250 cuFuncSetAttribute(kernel, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_mem_size); 251 #endif 252 CUresult result = cuLaunchKernel(kernel, grid_size, 1, 1, block_size_x, block_size_y, block_size_z, shared_mem_size, stream, args, NULL); 253 254 if (result == CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES) { 255 int max_threads_per_block, shared_size_bytes, num_regs; 256 257 cuFuncGetAttribute(&max_threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, kernel); 258 cuFuncGetAttribute(&shared_size_bytes, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, kernel); 259 cuFuncGetAttribute(&num_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, kernel); 260 if (throw_error) { 261 return CeedError(ceed, CEED_ERROR_BACKEND, 262 "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: max_threads_per_block %d on block size (%d,%d,%d), shared_size %d, num_regs %d", 263 max_threads_per_block, block_size_x, block_size_y, block_size_z, shared_size_bytes, num_regs); 264 } else { 265 // LCOV_EXCL_START 266 CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- LAUNCH ERROR DETECTED ----------\n"); 267 CeedDebug(ceed, "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: max_threads_per_block %d on block size (%d,%d,%d), shared_size %d, num_regs %d\n", 268 max_threads_per_block, block_size_x, block_size_y, block_size_z, shared_size_bytes, num_regs); 269 CeedDebug256(ceed, CEED_DEBUG_COLOR_WARNING, "---------- BACKEND MAY FALLBACK ----------\n"); 270 // LCOV_EXCL_STOP 271 } 272 *is_good_run = false; 273 } else CeedChk_Cu(ceed, result); 274 return CEED_ERROR_SUCCESS; 275 } 276 277 int CeedRunKernelDimShared_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x, const int block_size_y, 278 const int block_size_z, const int shared_mem_size, void **args) { 279 bool is_good_run = true; 280 281 CeedCallBackend(CeedRunKernelDimSharedCore_Cuda(ceed, kernel, stream, grid_size, block_size_x, block_size_y, block_size_z, shared_mem_size, true, 282 &is_good_run, args)); 283 return CEED_ERROR_SUCCESS; 284 } 285 286 int CeedTryRunKernelDimShared_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x, const int block_size_y, 287 const int block_size_z, const int shared_mem_size, bool *is_good_run, void **args) { 288 CeedCallBackend(CeedRunKernelDimSharedCore_Cuda(ceed, kernel, stream, grid_size, block_size_x, block_size_y, block_size_z, shared_mem_size, false, 289 is_good_run, args)); 290 return CEED_ERROR_SUCCESS; 291 } 292 293 //------------------------------------------------------------------------------ 294