xref: /libCEED/backends/cuda/ceed-cuda-compile.cpp (revision 9b5f41c81b637db3e5453a22df59a4f47deed499)
1 // Copyright (c) 2017-2025, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include "ceed-cuda-compile.h"
9 
10 #include <ceed.h>
11 #include <ceed/backend.h>
12 #include <ceed/jit-tools.h>
13 #include <cuda_runtime.h>
14 #include <dirent.h>
15 #include <nvrtc.h>
16 #include <stdarg.h>
17 #include <string.h>
18 #include <sys/stat.h>
19 #include <sys/types.h>
20 
21 #include <cstdlib>
22 #include <fstream>
23 #include <iostream>
24 #include <sstream>
25 #include <string>
26 
27 #include "ceed-cuda-common.h"
28 
29 #define CeedChk_Nvrtc(ceed, x)                                                                              \
30   do {                                                                                                      \
31     nvrtcResult result = static_cast<nvrtcResult>(x);                                                       \
32     if (result != NVRTC_SUCCESS) return CeedError((ceed), CEED_ERROR_BACKEND, nvrtcGetErrorString(result)); \
33   } while (0)
34 
35 #define CeedCallNvrtc(ceed, ...)  \
36   do {                            \
37     int ierr_q_ = __VA_ARGS__;    \
38     CeedChk_Nvrtc(ceed, ierr_q_); \
39   } while (0)
40 
41 #define CeedCallSystem(ceed, command, message) CeedCallBackend(CeedCallSystem_Core(ceed, command, message))
42 
43 //------------------------------------------------------------------------------
44 // Call system command and capture stdout + stderr
45 //------------------------------------------------------------------------------
46 static int CeedCallSystem_Core(Ceed ceed, const char *command, const char *message) {
47   CeedDebug(ceed, "Running command:\n$ %s\n", command);
48   FILE *output_stream = popen((command + std::string(" 2>&1")).c_str(), "r");
49 
50   CeedCheck(output_stream != nullptr, ceed, CEED_ERROR_BACKEND, "Failed to %s with command: %s", message, command);
51 
52   char output[4 * CEED_MAX_RESOURCE_LEN];
53 
54   while (fgets(output, sizeof(output), output_stream) != nullptr) {
55   }
56   CeedDebug(ceed, "Command output:\n%s\n", output);
57 
58   CeedCheck(pclose(output_stream) == 0, ceed, CEED_ERROR_BACKEND, "Failed to %s with command: %s\nand error: %s", message, command, output);
59   return CEED_ERROR_SUCCESS;
60 }
61 
62 //------------------------------------------------------------------------------
63 // Compile CUDA kernel
64 //------------------------------------------------------------------------------
65 using std::ifstream;
66 using std::ofstream;
67 using std::ostringstream;
68 
69 static int CeedCompileCore_Cuda(Ceed ceed, const char *source, const bool throw_error, bool *is_compile_good, CUmodule *module,
70                                 const CeedInt num_defines, va_list args) {
71   size_t                ptx_size;
72   char                 *ptx;
73   const int             num_opts            = 4;
74   CeedInt               num_jit_source_dirs = 0, num_jit_defines = 0;
75   const char          **opts;
76   nvrtcProgram          prog;
77   struct cudaDeviceProp prop;
78   Ceed_Cuda            *ceed_data;
79 
80   cudaFree(0);  // Make sure a Context exists for nvrtc
81 
82   std::ostringstream code;
83   bool               using_clang;
84 
85   CeedCallBackend(CeedGetIsClang(ceed, &using_clang));
86 
87   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS,
88                using_clang ? "Compiling CUDA with Clang backend (with Rust QFunction support)"
89                            : "Compiling CUDA with NVRTC backend (without Rust QFunction support).\nTo use the Clang backend, set the environment "
90                              "variable GPU_CLANG=1");
91 
92   // Get kernel specific options, such as kernel constants
93   if (num_defines > 0) {
94     char *name;
95     int   val;
96 
97     for (int i = 0; i < num_defines; i++) {
98       name = va_arg(args, char *);
99       val  = va_arg(args, int);
100       code << "#define " << name << " " << val << "\n";
101     }
102   }
103 
104   // Standard libCEED definitions for CUDA backends
105   code << "#include <ceed/jit-source/cuda/cuda-jit.h>\n\n";
106 
107   // Non-macro options
108   CeedCallBackend(CeedCalloc(num_opts, &opts));
109   opts[0] = "-default-device";
110   CeedCallBackend(CeedGetData(ceed, &ceed_data));
111   CeedCallCuda(ceed, cudaGetDeviceProperties(&prop, ceed_data->device_id));
112   std::string arch_arg =
113 #if CUDA_VERSION >= 11010
114       // NVRTC used to support only virtual architectures through the option
115       // -arch, since it was only emitting PTX. It will now support actual
116       // architectures as well to emit SASS.
117       // https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#dynamic-code-generation
118       "-arch=sm_"
119 #else
120       "-arch=compute_"
121 #endif
122       + std::to_string(prop.major) + std::to_string(prop.minor);
123   opts[1] = arch_arg.c_str();
124   opts[2] = "-Dint32_t=int";
125   opts[3] = "-DCEED_RUNNING_JIT_PASS=1";
126   // Additional include dirs
127   {
128     const char **jit_source_dirs;
129 
130     CeedCallBackend(CeedGetJitSourceRoots(ceed, &num_jit_source_dirs, &jit_source_dirs));
131     CeedCallBackend(CeedRealloc(num_opts + num_jit_source_dirs, &opts));
132     for (CeedInt i = 0; i < num_jit_source_dirs; i++) {
133       std::ostringstream include_dir_arg;
134 
135       include_dir_arg << "-I" << jit_source_dirs[i];
136       CeedCallBackend(CeedStringAllocCopy(include_dir_arg.str().c_str(), (char **)&opts[num_opts + i]));
137     }
138     CeedCallBackend(CeedRestoreJitSourceRoots(ceed, &jit_source_dirs));
139   }
140   // User defines
141   {
142     const char **jit_defines;
143 
144     CeedCallBackend(CeedGetJitDefines(ceed, &num_jit_defines, &jit_defines));
145     CeedCallBackend(CeedRealloc(num_opts + num_jit_source_dirs + num_jit_defines, &opts));
146     for (CeedInt i = 0; i < num_jit_defines; i++) {
147       std::ostringstream define_arg;
148 
149       define_arg << "-D" << jit_defines[i];
150       CeedCallBackend(CeedStringAllocCopy(define_arg.str().c_str(), (char **)&opts[num_opts + num_jit_source_dirs + i]));
151     }
152     CeedCallBackend(CeedRestoreJitDefines(ceed, &jit_defines));
153   }
154 
155   // Add string source argument provided in call
156   code << source;
157 
158   // Compile kernel
159   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- ATTEMPTING TO COMPILE JIT SOURCE ----------\n");
160   CeedDebug(ceed, "Source:\n%s\n", code.str().c_str());
161   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- END OF JIT SOURCE ----------\n");
162 
163   if (!using_clang) {
164     CeedCallNvrtc(ceed, nvrtcCreateProgram(&prog, code.str().c_str(), NULL, 0, NULL, NULL));
165 
166     if (CeedDebugFlag(ceed)) {
167       // LCOV_EXCL_START
168       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- JiT COMPILER OPTIONS ----------\n");
169       for (CeedInt i = 0; i < num_opts + num_jit_source_dirs + num_jit_defines; i++) {
170         CeedDebug(ceed, "Option %d: %s", i, opts[i]);
171       }
172       CeedDebug(ceed, "");
173       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- END OF JiT COMPILER OPTIONS ----------\n");
174       // LCOV_EXCL_STOP
175     }
176 
177     nvrtcResult result = nvrtcCompileProgram(prog, num_opts + num_jit_source_dirs + num_jit_defines, opts);
178 
179     for (CeedInt i = 0; i < num_jit_source_dirs; i++) {
180       CeedCallBackend(CeedFree(&opts[num_opts + i]));
181     }
182     for (CeedInt i = 0; i < num_jit_defines; i++) {
183       CeedCallBackend(CeedFree(&opts[num_opts + num_jit_source_dirs + i]));
184     }
185     CeedCallBackend(CeedFree(&opts));
186     *is_compile_good = result == NVRTC_SUCCESS;
187     if (!*is_compile_good) {
188       char  *log;
189       size_t log_size;
190 
191       CeedCallNvrtc(ceed, nvrtcGetProgramLogSize(prog, &log_size));
192       CeedCallBackend(CeedMalloc(log_size, &log));
193       CeedCallNvrtc(ceed, nvrtcGetProgramLog(prog, log));
194       if (throw_error) {
195         return CeedError(ceed, CEED_ERROR_BACKEND, "%s\n%s", nvrtcGetErrorString(result), log);
196       } else {
197         // LCOV_EXCL_START
198         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- COMPILE ERROR DETECTED ----------\n");
199         CeedDebug(ceed, "Error: %s\nCompile log:\n%s\n", nvrtcGetErrorString(result), log);
200         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- BACKEND MAY FALLBACK ----------\n");
201         CeedCallBackend(CeedFree(&log));
202         CeedCallNvrtc(ceed, nvrtcDestroyProgram(&prog));
203         return CEED_ERROR_SUCCESS;
204         // LCOV_EXCL_STOP
205       }
206     }
207 
208 #if CUDA_VERSION >= 11010
209     CeedCallNvrtc(ceed, nvrtcGetCUBINSize(prog, &ptx_size));
210     CeedCallBackend(CeedMalloc(ptx_size, &ptx));
211     CeedCallNvrtc(ceed, nvrtcGetCUBIN(prog, ptx));
212 #else
213     CeedCallNvrtc(ceed, nvrtcGetPTXSize(prog, &ptx_size));
214     CeedCallBackend(CeedMalloc(ptx_size, &ptx));
215     CeedCallNvrtc(ceed, nvrtcGetPTX(prog, ptx));
216 #endif
217     CeedCallNvrtc(ceed, nvrtcDestroyProgram(&prog));
218 
219     CeedCallCuda(ceed, cuModuleLoadData(module, ptx));
220     CeedCallBackend(CeedFree(&ptx));
221     return CEED_ERROR_SUCCESS;
222   } else {
223     srand(time(NULL));
224     const int build_id = rand();
225 
226     // Create temp dir if needed
227     {
228       DIR *dir = opendir("temp");
229 
230       if (dir) {
231         closedir(dir);
232       } else {
233         mkdir("temp", 0777);
234         chmod("temp", 0777);
235       }
236     }
237     // Write code to temp file
238     {
239       std::string filename = std::string("temp/kernel_") + std::to_string(build_id) + std::string("_0_source.cu");
240       FILE       *file     = fopen(filename.c_str(), "w");
241 
242       CeedCheck(file, ceed, CEED_ERROR_BACKEND, "Failed to create file. Write access is required for cuda-clang");
243       fputs(code.str().c_str(), file);
244       fclose(file);
245     }
246 
247     // Get rust crate directories
248     const char **rust_source_dirs     = nullptr;
249     int          num_rust_source_dirs = 0;
250 
251     CeedCallBackend(CeedGetRustSourceRoots(ceed, &num_rust_source_dirs, &rust_source_dirs));
252 
253     std::string rust_dirs[10];
254 
255     if (num_rust_source_dirs > 0) {
256       CeedDebug(ceed, "There are %d source dirs, including %s\n", num_rust_source_dirs, rust_source_dirs[0]);
257     }
258 
259     for (CeedInt i = 0; i < num_rust_source_dirs; i++) {
260       rust_dirs[i] = std::string(rust_source_dirs[i]);
261     }
262 
263     CeedCallBackend(CeedRestoreRustSourceRoots(ceed, &rust_source_dirs));
264 
265     char *rust_toolchain = std::getenv("RUST_TOOLCHAIN");
266 
267     if (rust_toolchain == nullptr) {
268       rust_toolchain = (char *)"nightly";
269       setenv("RUST_TOOLCHAIN", "nightly", 0);
270     }
271 
272     // Compile Rust crate(s) needed
273     std::string command;
274 
275     for (CeedInt i = 0; i < num_rust_source_dirs; i++) {
276       command = "cargo +" + std::string(rust_toolchain) + " build --release --target nvptx64-nvidia-cuda --config " + rust_dirs[i] +
277                 "/.cargo/config.toml --manifest-path " + rust_dirs[i] + "/Cargo.toml";
278       CeedCallSystem(ceed, command.c_str(), "build Rust crate");
279     }
280 
281     // Compile wrapper kernel
282     command = "clang++ -flto=thin --cuda-gpu-arch=sm_" + std::to_string(prop.major) + std::to_string(prop.minor) +
283               " --cuda-device-only -emit-llvm -S temp/kernel_" + std::to_string(build_id) + "_0_source.cu -o temp/kernel_" +
284               std::to_string(build_id) + "_1_wrapped.ll ";
285     command += opts[4];
286     CeedCallSystem(ceed, command.c_str(), "JiT kernel source");
287 
288     // the find command finds the rust-installed llvm-link tool and runs it
289     command = "$(find $(rustup run " + std::string(rust_toolchain) + " rustc --print sysroot) -name llvm-link) temp/kernel_" +
290               std::to_string(build_id) +
291               "_1_wrapped.ll --ignore-non-bitcode --internalize --only-needed -S -o "
292               "temp/kernel_" +
293               std::to_string(build_id) + "_2_linked.ll ";
294 
295     // Searches for .a files in rust directoy
296     // Note: this is necessary because rust crate names may not match the folder they are in
297     for (CeedInt i = 0; i < num_rust_source_dirs; i++) {
298       std::string dir = rust_dirs[i] + "/target/nvptx64-nvidia-cuda/release";
299       DIR        *dp  = opendir(dir.c_str());
300 
301       CeedCheck(dp != nullptr, ceed, CEED_ERROR_BACKEND, "Could not open directory: %s", dir.c_str());
302       struct dirent *entry;
303 
304       // finds files ending in .a
305       while ((entry = readdir(dp)) != nullptr) {
306         std::string filename(entry->d_name);
307 
308         if (filename.size() >= 2 && filename.substr(filename.size() - 2) == ".a") {
309           command += dir + "/" + filename + " ";
310         }
311       }
312       closedir(dp);
313       // TODO: when libCEED switches to c++17, switch to std::filesystem for the loop above
314     }
315 
316     // Link, optimize, and compile final CUDA kernel
317     // note that the find command is used to find the rust-installed llvm tool
318     CeedCallSystem(ceed, command.c_str(), "link C and Rust source");
319     CeedCallSystem(
320         ceed,
321         ("$(find $(rustup run " + std::string(rust_toolchain) + " rustc --print sysroot) -name opt) --passes internalize,inline temp/kernel_" +
322          std::to_string(build_id) + "_2_linked.ll -o temp/kernel_" + std::to_string(build_id) + "_3_opt.bc")
323             .c_str(),
324         "optimize linked C and Rust source");
325     CeedCallSystem(ceed,
326                    ("$(find $(rustup run " + std::string(rust_toolchain) + " rustc --print sysroot) -name llc) -O3 -mcpu=sm_" +
327                     std::to_string(prop.major) + std::to_string(prop.minor) + " temp/kernel_" + std::to_string(build_id) +
328                     "_3_opt.bc -o temp/kernel_" + std::to_string(build_id) + "_4_final.ptx")
329                        .c_str(),
330                    "compile final CUDA kernel");
331 
332     ifstream      ptxfile("temp/kernel_" + std::to_string(build_id) + "_4_final.ptx");
333     ostringstream sstr;
334 
335     sstr << ptxfile.rdbuf();
336 
337     auto ptx_data = sstr.str();
338     ptx_size      = ptx_data.length();
339 
340     int result = cuModuleLoadData(module, ptx_data.c_str());
341 
342     *is_compile_good = result == 0;
343     if (!*is_compile_good) {
344       if (throw_error) {
345         return CeedError(ceed, CEED_ERROR_BACKEND, "Failed to load module data");
346       } else {
347         // LCOV_EXCL_START
348         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- COMPILE ERROR DETECTED ----------\n");
349         CeedDebug(ceed, "Error: Failed to load module data");
350         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- BACKEND MAY FALLBACK ----------\n");
351         return CEED_ERROR_SUCCESS;
352         // LCOV_EXCL_STOP
353       }
354     }
355   }
356   return CEED_ERROR_SUCCESS;
357 }
358 
359 int CeedCompile_Cuda(Ceed ceed, const char *source, CUmodule *module, const CeedInt num_defines, ...) {
360   bool    is_compile_good = true;
361   va_list args;
362 
363   va_start(args, num_defines);
364   const CeedInt ierr = CeedCompileCore_Cuda(ceed, source, true, &is_compile_good, module, num_defines, args);
365 
366   va_end(args);
367   CeedCallBackend(ierr);
368   return CEED_ERROR_SUCCESS;
369 }
370 
371 int CeedTryCompile_Cuda(Ceed ceed, const char *source, bool *is_compile_good, CUmodule *module, const CeedInt num_defines, ...) {
372   va_list args;
373 
374   va_start(args, num_defines);
375   const CeedInt ierr = CeedCompileCore_Cuda(ceed, source, false, is_compile_good, module, num_defines, args);
376 
377   va_end(args);
378   CeedCallBackend(ierr);
379   return CEED_ERROR_SUCCESS;
380 }
381 
382 //------------------------------------------------------------------------------
383 // Get CUDA kernel
384 //------------------------------------------------------------------------------
385 int CeedGetKernel_Cuda(Ceed ceed, CUmodule module, const char *name, CUfunction *kernel) {
386   CeedCallCuda(ceed, cuModuleGetFunction(kernel, module, name));
387   return CEED_ERROR_SUCCESS;
388 }
389 
390 //------------------------------------------------------------------------------
391 // Run CUDA kernel with block size selected automatically based on the kernel
392 //     (which may use enough registers to require a smaller block size than the
393 //      hardware is capable)
394 //------------------------------------------------------------------------------
395 int CeedRunKernelAutoblockCuda(Ceed ceed, CUfunction kernel, size_t points, void **args) {
396   int min_grid_size, max_block_size;
397 
398   CeedCallCuda(ceed, cuOccupancyMaxPotentialBlockSize(&min_grid_size, &max_block_size, kernel, NULL, 0, 0x10000));
399   CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, CeedDivUpInt(points, max_block_size), max_block_size, args));
400   return CEED_ERROR_SUCCESS;
401 }
402 
403 //------------------------------------------------------------------------------
404 // Run CUDA kernel
405 //------------------------------------------------------------------------------
406 int CeedRunKernel_Cuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size, void **args) {
407   CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, kernel, NULL, grid_size, block_size, 1, 1, 0, args));
408   return CEED_ERROR_SUCCESS;
409 }
410 
411 //------------------------------------------------------------------------------
412 // Run CUDA kernel for spatial dimension
413 //------------------------------------------------------------------------------
414 int CeedRunKernelDim_Cuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size_x, const int block_size_y, const int block_size_z,
415                           void **args) {
416   CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, kernel, NULL, grid_size, block_size_x, block_size_y, block_size_z, 0, args));
417   return CEED_ERROR_SUCCESS;
418 }
419 
420 //------------------------------------------------------------------------------
421 // Run CUDA kernel for spatial dimension with shared memory
422 //------------------------------------------------------------------------------
423 static int CeedRunKernelDimSharedCore_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x,
424                                            const int block_size_y, const int block_size_z, const int shared_mem_size, const bool throw_error,
425                                            bool *is_good_run, void **args) {
426 #if CUDA_VERSION >= 9000
427   cuFuncSetAttribute(kernel, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_mem_size);
428 #endif
429   CUresult result = cuLaunchKernel(kernel, grid_size, 1, 1, block_size_x, block_size_y, block_size_z, shared_mem_size, stream, args, NULL);
430 
431   if (result == CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES) {
432     int max_threads_per_block, shared_size_bytes, num_regs;
433 
434     cuFuncGetAttribute(&max_threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, kernel);
435     cuFuncGetAttribute(&shared_size_bytes, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, kernel);
436     cuFuncGetAttribute(&num_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, kernel);
437     if (throw_error) {
438       return CeedError(ceed, CEED_ERROR_BACKEND,
439                        "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: max_threads_per_block %d on block size (%d,%d,%d), shared_size %d, num_regs %d",
440                        max_threads_per_block, block_size_x, block_size_y, block_size_z, shared_size_bytes, num_regs);
441     } else {
442       // LCOV_EXCL_START
443       CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- LAUNCH ERROR DETECTED ----------\n");
444       CeedDebug(ceed, "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: max_threads_per_block %d on block size (%d,%d,%d), shared_size %d, num_regs %d\n",
445                 max_threads_per_block, block_size_x, block_size_y, block_size_z, shared_size_bytes, num_regs);
446       CeedDebug256(ceed, CEED_DEBUG_COLOR_WARNING, "---------- BACKEND MAY FALLBACK ----------\n");
447       // LCOV_EXCL_STOP
448     }
449     *is_good_run = false;
450   } else CeedChk_Cu(ceed, result);
451   return CEED_ERROR_SUCCESS;
452 }
453 
454 int CeedRunKernelDimShared_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x, const int block_size_y,
455                                 const int block_size_z, const int shared_mem_size, void **args) {
456   bool is_good_run = true;
457 
458   CeedCallBackend(CeedRunKernelDimSharedCore_Cuda(ceed, kernel, stream, grid_size, block_size_x, block_size_y, block_size_z, shared_mem_size, true,
459                                                   &is_good_run, args));
460   return CEED_ERROR_SUCCESS;
461 }
462 
463 int CeedTryRunKernelDimShared_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x, const int block_size_y,
464                                    const int block_size_z, const int shared_mem_size, bool *is_good_run, void **args) {
465   CeedCallBackend(CeedRunKernelDimSharedCore_Cuda(ceed, kernel, stream, grid_size, block_size_x, block_size_y, block_size_z, shared_mem_size, false,
466                                                   is_good_run, args));
467   return CEED_ERROR_SUCCESS;
468 }
469 
470 //------------------------------------------------------------------------------
471