xref: /libCEED/rust/libceed-sys/c-src/backends/cuda/ceed-cuda-compile.cpp (revision 2027fb9d13fe34211738d8539f90542a9801ae2c)
1 // Copyright (c) 2017-2025, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include "ceed-cuda-compile.h"
9 
10 #include <ceed.h>
11 #include <ceed/backend.h>
12 #include <ceed/jit-tools.h>
13 #include <cuda_runtime.h>
14 #include <dirent.h>
15 #include <nvrtc.h>
16 #include <stdarg.h>
17 #include <string.h>
18 #include <sys/types.h>
19 
20 #include <cstdlib>
21 #include <fstream>
22 #include <iostream>
23 #include <sstream>
24 #include <string>
25 
26 #include "ceed-cuda-common.h"
27 
28 #define CeedChk_Nvrtc(ceed, x)                                                                              \
29   do {                                                                                                      \
30     nvrtcResult result = static_cast<nvrtcResult>(x);                                                       \
31     if (result != NVRTC_SUCCESS) return CeedError((ceed), CEED_ERROR_BACKEND, nvrtcGetErrorString(result)); \
32   } while (0)
33 
34 #define CeedCallNvrtc(ceed, ...)  \
35   do {                            \
36     int ierr_q_ = __VA_ARGS__;    \
37     CeedChk_Nvrtc(ceed, ierr_q_); \
38   } while (0)
39 
40 #define CeedCallSystem(ceed, command, message) CeedCallBackend(CeedCallSystem_Core(ceed, command, message))
41 
42 //------------------------------------------------------------------------------
43 // Call system command and capture stdout + stderr
44 //------------------------------------------------------------------------------
45 static int CeedCallSystem_Core(Ceed ceed, const char *command, const char *message) {
46   CeedDebug(ceed, "Running command:\n$ %s\n", command);
47   FILE *output_stream = popen((command + std::string(" 2>&1")).c_str(), "r");
48 
49   CeedCheck(output_stream != nullptr, ceed, CEED_ERROR_BACKEND, "Failed to %s with command: %s", message, command);
50 
51   char output[4 * CEED_MAX_RESOURCE_LEN];
52 
53   while (fgets(output, sizeof(output), output_stream) != nullptr) {
54   }
55   CeedDebug(ceed, "Command output:\n%s\n", output);
56 
57   CeedCheck(pclose(output_stream) == 0, ceed, CEED_ERROR_BACKEND, "Failed to %s with error: %s", message, output);
58   return CEED_ERROR_SUCCESS;
59 }
60 
61 //------------------------------------------------------------------------------
62 // Compile CUDA kernel
63 //------------------------------------------------------------------------------
64 using std::ifstream;
65 using std::ofstream;
66 using std::ostringstream;
67 
68 static int CeedCompileCore_Cuda(Ceed ceed, const char *source, const bool throw_error, bool *is_compile_good, CUmodule *module,
69                                 const CeedInt num_defines, va_list args) {
70   size_t                ptx_size;
71   char                 *ptx;
72   const int             num_opts            = 4;
73   CeedInt               num_jit_source_dirs = 0, num_jit_defines = 0;
74   const char          **opts;
75   nvrtcProgram          prog;
76   struct cudaDeviceProp prop;
77   Ceed_Cuda            *ceed_data;
78 
79   cudaFree(0);  // Make sure a Context exists for nvrtc
80 
81   std::ostringstream code;
82   bool               using_clang;
83 
84   CeedCallBackend(CeedGetIsClang(ceed, &using_clang));
85 
86   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS,
87                using_clang ? "Compiling CUDA with Clang backend (with Rust QFunction support)"
88                            : "Compiling CUDA with NVRTC backend (without Rust QFunction support).\nTo use the Clang backend, set the environment "
89                              "variable GPU_CLANG=1");
90 
91   // Get kernel specific options, such as kernel constants
92   if (num_defines > 0) {
93     char *name;
94     int   val;
95 
96     for (int i = 0; i < num_defines; i++) {
97       name = va_arg(args, char *);
98       val  = va_arg(args, int);
99       code << "#define " << name << " " << val << "\n";
100     }
101   }
102 
103   // Standard libCEED definitions for CUDA backends
104   code << "#include <ceed/jit-source/cuda/cuda-jit.h>\n\n";
105 
106   // Non-macro options
107   CeedCallBackend(CeedCalloc(num_opts, &opts));
108   opts[0] = "-default-device";
109   CeedCallBackend(CeedGetData(ceed, &ceed_data));
110   CeedCallCuda(ceed, cudaGetDeviceProperties(&prop, ceed_data->device_id));
111   std::string arch_arg =
112 #if CUDA_VERSION >= 11010
113       // NVRTC used to support only virtual architectures through the option
114       // -arch, since it was only emitting PTX. It will now support actual
115       // architectures as well to emit SASS.
116       // https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#dynamic-code-generation
117       "-arch=sm_"
118 #else
119       "-arch=compute_"
120 #endif
121       + std::to_string(prop.major) + std::to_string(prop.minor);
122   opts[1] = arch_arg.c_str();
123   opts[2] = "-Dint32_t=int";
124   opts[3] = "-DCEED_RUNNING_JIT_PASS=1";
125   // Additional include dirs
126   {
127     const char **jit_source_dirs;
128 
129     CeedCallBackend(CeedGetJitSourceRoots(ceed, &num_jit_source_dirs, &jit_source_dirs));
130     CeedCallBackend(CeedRealloc(num_opts + num_jit_source_dirs, &opts));
131     for (CeedInt i = 0; i < num_jit_source_dirs; i++) {
132       std::ostringstream include_dir_arg;
133 
134       include_dir_arg << "-I" << jit_source_dirs[i];
135       CeedCallBackend(CeedStringAllocCopy(include_dir_arg.str().c_str(), (char **)&opts[num_opts + i]));
136     }
137     CeedCallBackend(CeedRestoreJitSourceRoots(ceed, &jit_source_dirs));
138   }
139   // User defines
140   {
141     const char **jit_defines;
142 
143     CeedCallBackend(CeedGetJitDefines(ceed, &num_jit_defines, &jit_defines));
144     CeedCallBackend(CeedRealloc(num_opts + num_jit_source_dirs + num_jit_defines, &opts));
145     for (CeedInt i = 0; i < num_jit_defines; i++) {
146       std::ostringstream define_arg;
147 
148       define_arg << "-D" << jit_defines[i];
149       CeedCallBackend(CeedStringAllocCopy(define_arg.str().c_str(), (char **)&opts[num_opts + num_jit_source_dirs + i]));
150     }
151     CeedCallBackend(CeedRestoreJitDefines(ceed, &jit_defines));
152   }
153 
154   // Add string source argument provided in call
155   code << source;
156 
157   // Create Program
158 
159   // Compile kernel
160   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- ATTEMPTING TO COMPILE JIT SOURCE ----------\n");
161   CeedDebug(ceed, "Source:\n%s\n", code.str().c_str());
162   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- END OF JIT SOURCE ----------\n");
163 
164   if (!using_clang) {
165     CeedCallNvrtc(ceed, nvrtcCreateProgram(&prog, code.str().c_str(), NULL, 0, NULL, NULL));
166 
167     if (CeedDebugFlag(ceed)) {
168       // LCOV_EXCL_START
169       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- JiT COMPILER OPTIONS ----------\n");
170       for (CeedInt i = 0; i < num_opts + num_jit_source_dirs + num_jit_defines; i++) {
171         CeedDebug(ceed, "Option %d: %s", i, opts[i]);
172       }
173       CeedDebug(ceed, "");
174       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- END OF JiT COMPILER OPTIONS ----------\n");
175       // LCOV_EXCL_STOP
176     }
177 
178     nvrtcResult result = nvrtcCompileProgram(prog, num_opts + num_jit_source_dirs + num_jit_defines, opts);
179 
180     for (CeedInt i = 0; i < num_jit_source_dirs; i++) {
181       CeedCallBackend(CeedFree(&opts[num_opts + i]));
182     }
183     for (CeedInt i = 0; i < num_jit_defines; i++) {
184       CeedCallBackend(CeedFree(&opts[num_opts + num_jit_source_dirs + i]));
185     }
186     CeedCallBackend(CeedFree(&opts));
187     *is_compile_good = result == NVRTC_SUCCESS;
188     if (!*is_compile_good) {
189       char  *log;
190       size_t log_size;
191 
192       CeedCallNvrtc(ceed, nvrtcGetProgramLogSize(prog, &log_size));
193       CeedCallBackend(CeedMalloc(log_size, &log));
194       CeedCallNvrtc(ceed, nvrtcGetProgramLog(prog, log));
195       if (throw_error) {
196         return CeedError(ceed, CEED_ERROR_BACKEND, "%s\n%s", nvrtcGetErrorString(result), log);
197       } else {
198         // LCOV_EXCL_START
199         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- COMPILE ERROR DETECTED ----------\n");
200         CeedDebug(ceed, "Error: %s\nCompile log:\n%s\n", nvrtcGetErrorString(result), log);
201         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- BACKEND MAY FALLBACK ----------\n");
202         CeedCallBackend(CeedFree(&log));
203         CeedCallNvrtc(ceed, nvrtcDestroyProgram(&prog));
204         return CEED_ERROR_SUCCESS;
205         // LCOV_EXCL_STOP
206       }
207     }
208 
209 #if CUDA_VERSION >= 11010
210     CeedCallNvrtc(ceed, nvrtcGetCUBINSize(prog, &ptx_size));
211     CeedCallBackend(CeedMalloc(ptx_size, &ptx));
212     CeedCallNvrtc(ceed, nvrtcGetCUBIN(prog, ptx));
213 #else
214     CeedCallNvrtc(ceed, nvrtcGetPTXSize(prog, &ptx_size));
215     CeedCallBackend(CeedMalloc(ptx_size, &ptx));
216     CeedCallNvrtc(ceed, nvrtcGetPTX(prog, ptx));
217 #endif
218     CeedCallNvrtc(ceed, nvrtcDestroyProgram(&prog));
219 
220     CeedCallCuda(ceed, cuModuleLoadData(module, ptx));
221     CeedCallBackend(CeedFree(&ptx));
222     return CEED_ERROR_SUCCESS;
223   } else {
224     const char *full_filename = "temp_kernel_source.cu";
225     FILE       *file          = fopen(full_filename, "w");
226 
227     CeedCheck(file, ceed, CEED_ERROR_BACKEND, "Failed to create file. Write access is required for cuda-clang\n");
228     fputs(code.str().c_str(), file);
229     fclose(file);
230 
231     // Get rust crate directories
232 
233     const char **rust_source_dirs     = nullptr;
234     int          num_rust_source_dirs = 0;
235 
236     CeedCallBackend(CeedGetRustSourceRoots(ceed, &num_rust_source_dirs, &rust_source_dirs));
237 
238     std::string rust_dirs[10];
239 
240     if (num_rust_source_dirs > 0) {
241       CeedDebug(ceed, "There are %d source dirs, including %s\n", num_rust_source_dirs, rust_source_dirs[0]);
242     }
243 
244     for (CeedInt i = 0; i < num_rust_source_dirs; i++) {
245       rust_dirs[i] = std::string(rust_source_dirs[i]);
246     }
247 
248     CeedCallBackend(CeedRestoreRustSourceRoots(ceed, &rust_source_dirs));
249 
250     char *rust_toolchain = std::getenv("RUST_TOOLCHAIN");
251 
252     if (rust_toolchain == nullptr) {
253       rust_toolchain = (char *)"nightly";
254       setenv("RUST_TOOLCHAIN", "nightly", 0);
255     }
256 
257     // Compile Rust crate(s) needed
258     std::string command;
259 
260     for (CeedInt i = 0; i < num_rust_source_dirs; i++) {
261       command = "cargo +" + std::string(rust_toolchain) + " build --release --target nvptx64-nvidia-cuda --config " + rust_dirs[i] +
262                 "/.cargo/config.toml --manifest-path " + rust_dirs[i] + "/Cargo.toml";
263       CeedCallSystem(ceed, command.c_str(), "build Rust crate");
264     }
265 
266     // Compile wrapper kernel
267     command = "clang++ -flto=thin --cuda-gpu-arch=sm_" + std::to_string(prop.major) + std::to_string(prop.minor) +
268               " --cuda-device-only -emit-llvm -S temp_kernel_source.cu -o temp_kernel.ll ";
269     command += opts[4];
270     CeedCallSystem(ceed, command.c_str(), "JiT kernel source");
271 
272     // the find command finds the rust-installed llvm-link tool and runs it
273     command = "$(find $(rustup run " + std::string(rust_toolchain) +
274               " rustc --print sysroot) -name llvm-link) temp_kernel.ll --ignore-non-bitcode --internalize --only-needed -S -o "
275               "temp_kernel_linked.ll  ";
276 
277     // Searches for .a files in rust directoy
278     // Note: this is necessary because rust crate names may not match the folder they are in
279     for (CeedInt i = 0; i < num_rust_source_dirs; i++) {
280       std::string dir = rust_dirs[i] + "/target/nvptx64-nvidia-cuda/release";
281       DIR        *dp  = opendir(dir.c_str());
282 
283       CeedCheck(dp != nullptr, ceed, CEED_ERROR_BACKEND, "Could not open directory: %s", dir.c_str());
284       struct dirent *entry;
285 
286       // finds files ending in .a
287       while ((entry = readdir(dp)) != nullptr) {
288         std::string filename(entry->d_name);
289 
290         if (filename.size() >= 2 && filename.substr(filename.size() - 2) == ".a") {
291           command += dir + "/" + filename + " ";
292         }
293       }
294       closedir(dp);
295       // TODO: when libCEED switches to c++17, switch to std::filesystem for the loop above
296     }
297 
298     // Link, optimize, and compile final CUDA kernel
299     // note that the find command is used to find the rust-installed llvm tool
300     CeedCallSystem(ceed, command.c_str(), "link C and Rust source");
301     CeedCallSystem(ceed,
302                    ("$(find $(rustup run " + std::string(rust_toolchain) +
303                     " rustc --print sysroot) -name opt) --passes internalize,inline temp_kernel_linked.ll -o temp_kernel_opt.bc")
304                        .c_str(),
305                    "optimize linked C and Rust source");
306     CeedCallSystem(ceed,
307                    ("$(find $(rustup run " + std::string(rust_toolchain) + " rustc --print sysroot) -name llc) -O3 -mcpu=sm_" +
308                     std::to_string(prop.major) + std::to_string(prop.minor) + " temp_kernel_opt.bc -o temp_kernel_final.ptx")
309                        .c_str(),
310                    "compile final CUDA kernel");
311 
312     ifstream      ptxfile("temp_kernel_final.ptx");
313     ostringstream sstr;
314 
315     sstr << ptxfile.rdbuf();
316 
317     auto ptx_data = sstr.str();
318     ptx_size      = ptx_data.length();
319 
320     int result = cuModuleLoadData(module, ptx_data.c_str());
321 
322     *is_compile_good = result == 0;
323     if (!*is_compile_good) {
324       if (throw_error) {
325         return CeedError(ceed, CEED_ERROR_BACKEND, "Failed to load module data");
326       } else {
327         // LCOV_EXCL_START
328         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- COMPILE ERROR DETECTED ----------\n");
329         CeedDebug(ceed, "Error: Failed to load module data");
330         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- BACKEND MAY FALLBACK ----------\n");
331         return CEED_ERROR_SUCCESS;
332         // LCOV_EXCL_STOP
333       }
334     }
335   }
336   return CEED_ERROR_SUCCESS;
337 }
338 
339 int CeedCompile_Cuda(Ceed ceed, const char *source, CUmodule *module, const CeedInt num_defines, ...) {
340   bool    is_compile_good = true;
341   va_list args;
342 
343   va_start(args, num_defines);
344   const CeedInt ierr = CeedCompileCore_Cuda(ceed, source, true, &is_compile_good, module, num_defines, args);
345 
346   va_end(args);
347   CeedCallBackend(ierr);
348   return CEED_ERROR_SUCCESS;
349 }
350 
351 int CeedTryCompile_Cuda(Ceed ceed, const char *source, bool *is_compile_good, CUmodule *module, const CeedInt num_defines, ...) {
352   va_list args;
353 
354   va_start(args, num_defines);
355   const CeedInt ierr = CeedCompileCore_Cuda(ceed, source, false, is_compile_good, module, num_defines, args);
356 
357   va_end(args);
358   CeedCallBackend(ierr);
359   return CEED_ERROR_SUCCESS;
360 }
361 
362 //------------------------------------------------------------------------------
363 // Get CUDA kernel
364 //------------------------------------------------------------------------------
365 int CeedGetKernel_Cuda(Ceed ceed, CUmodule module, const char *name, CUfunction *kernel) {
366   CeedCallCuda(ceed, cuModuleGetFunction(kernel, module, name));
367   return CEED_ERROR_SUCCESS;
368 }
369 
370 //------------------------------------------------------------------------------
371 // Run CUDA kernel with block size selected automatically based on the kernel
372 //     (which may use enough registers to require a smaller block size than the
373 //      hardware is capable)
374 //------------------------------------------------------------------------------
375 int CeedRunKernelAutoblockCuda(Ceed ceed, CUfunction kernel, size_t points, void **args) {
376   int min_grid_size, max_block_size;
377 
378   CeedCallCuda(ceed, cuOccupancyMaxPotentialBlockSize(&min_grid_size, &max_block_size, kernel, NULL, 0, 0x10000));
379   CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, CeedDivUpInt(points, max_block_size), max_block_size, args));
380   return CEED_ERROR_SUCCESS;
381 }
382 
383 //------------------------------------------------------------------------------
384 // Run CUDA kernel
385 //------------------------------------------------------------------------------
386 int CeedRunKernel_Cuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size, void **args) {
387   CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, kernel, NULL, grid_size, block_size, 1, 1, 0, args));
388   return CEED_ERROR_SUCCESS;
389 }
390 
391 //------------------------------------------------------------------------------
392 // Run CUDA kernel for spatial dimension
393 //------------------------------------------------------------------------------
394 int CeedRunKernelDim_Cuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size_x, const int block_size_y, const int block_size_z,
395                           void **args) {
396   CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, kernel, NULL, grid_size, block_size_x, block_size_y, block_size_z, 0, args));
397   return CEED_ERROR_SUCCESS;
398 }
399 
400 //------------------------------------------------------------------------------
401 // Run CUDA kernel for spatial dimension with shared memory
402 //------------------------------------------------------------------------------
403 static int CeedRunKernelDimSharedCore_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x,
404                                            const int block_size_y, const int block_size_z, const int shared_mem_size, const bool throw_error,
405                                            bool *is_good_run, void **args) {
406 #if CUDA_VERSION >= 9000
407   cuFuncSetAttribute(kernel, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_mem_size);
408 #endif
409   CUresult result = cuLaunchKernel(kernel, grid_size, 1, 1, block_size_x, block_size_y, block_size_z, shared_mem_size, stream, args, NULL);
410 
411   if (result == CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES) {
412     int max_threads_per_block, shared_size_bytes, num_regs;
413 
414     cuFuncGetAttribute(&max_threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, kernel);
415     cuFuncGetAttribute(&shared_size_bytes, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, kernel);
416     cuFuncGetAttribute(&num_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, kernel);
417     if (throw_error) {
418       return CeedError(ceed, CEED_ERROR_BACKEND,
419                        "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: max_threads_per_block %d on block size (%d,%d,%d), shared_size %d, num_regs %d",
420                        max_threads_per_block, block_size_x, block_size_y, block_size_z, shared_size_bytes, num_regs);
421     } else {
422       // LCOV_EXCL_START
423       CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- LAUNCH ERROR DETECTED ----------\n");
424       CeedDebug(ceed, "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: max_threads_per_block %d on block size (%d,%d,%d), shared_size %d, num_regs %d\n",
425                 max_threads_per_block, block_size_x, block_size_y, block_size_z, shared_size_bytes, num_regs);
426       CeedDebug256(ceed, CEED_DEBUG_COLOR_WARNING, "---------- BACKEND MAY FALLBACK ----------\n");
427       // LCOV_EXCL_STOP
428     }
429     *is_good_run = false;
430   } else CeedChk_Cu(ceed, result);
431   return CEED_ERROR_SUCCESS;
432 }
433 
434 int CeedRunKernelDimShared_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x, const int block_size_y,
435                                 const int block_size_z, const int shared_mem_size, void **args) {
436   bool is_good_run = true;
437 
438   CeedCallBackend(CeedRunKernelDimSharedCore_Cuda(ceed, kernel, stream, grid_size, block_size_x, block_size_y, block_size_z, shared_mem_size, true,
439                                                   &is_good_run, args));
440   return CEED_ERROR_SUCCESS;
441 }
442 
443 int CeedTryRunKernelDimShared_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x, const int block_size_y,
444                                    const int block_size_z, const int shared_mem_size, bool *is_good_run, void **args) {
445   CeedCallBackend(CeedRunKernelDimSharedCore_Cuda(ceed, kernel, stream, grid_size, block_size_x, block_size_y, block_size_z, shared_mem_size, false,
446                                                   is_good_run, args));
447   return CEED_ERROR_SUCCESS;
448 }
449 
450 //------------------------------------------------------------------------------
451