xref: /libCEED/backends/cuda/ceed-cuda-compile.cpp (revision a550dd668ad5fdaeaade4afb48509e4b41c51584)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include "ceed-cuda-compile.h"
9 
10 #include <ceed.h>
11 #include <ceed/backend.h>
12 #include <ceed/jit-tools.h>
13 #include <cuda.h>
14 #include <cuda_runtime.h>
15 #include <nvrtc.h>
16 #include <stdarg.h>
17 #include <string.h>
18 
19 #include <sstream>
20 
21 #include "ceed-cuda-common.h"
22 
23 #define CeedChk_Nvrtc(ceed, x)                                                                              \
24   do {                                                                                                      \
25     nvrtcResult result = static_cast<nvrtcResult>(x);                                                       \
26     if (result != NVRTC_SUCCESS) return CeedError((ceed), CEED_ERROR_BACKEND, nvrtcGetErrorString(result)); \
27   } while (0)
28 
29 #define CeedCallNvrtc(ceed, ...)  \
30   do {                            \
31     int ierr_q_ = __VA_ARGS__;    \
32     CeedChk_Nvrtc(ceed, ierr_q_); \
33   } while (0);
34 
35 //------------------------------------------------------------------------------
36 // Compile CUDA kernel
37 //------------------------------------------------------------------------------
38 int CeedCompileCuda(Ceed ceed, const char *source, CUmodule *module, const CeedInt num_defines, ...) {
39   cudaFree(0);  // Make sure a Context exists for nvrtc
40   nvrtcProgram prog;
41 
42   std::ostringstream code;
43 
44   // Get kernel specific options, such as kernel constants
45   if (num_defines > 0) {
46     va_list args;
47     va_start(args, num_defines);
48     char *name;
49     int   val;
50     for (int i = 0; i < num_defines; i++) {
51       name = va_arg(args, char *);
52       val  = va_arg(args, int);
53       code << "#define " << name << " " << val << "\n";
54     }
55     va_end(args);
56   }
57 
58   // Standard libCEED definitions for CUDA backends
59   char *jit_defs_path, *jit_defs_source;
60   CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-jit.h", &jit_defs_path));
61   CeedCallBackend(CeedLoadSourceToBuffer(ceed, jit_defs_path, &jit_defs_source));
62   code << jit_defs_source;
63   code << "\n\n";
64   CeedCallBackend(CeedFree(&jit_defs_path));
65   CeedCallBackend(CeedFree(&jit_defs_source));
66 
67   // Non-macro options
68   const int   num_opts = 3;
69   const char *opts[num_opts];
70   opts[0] = "-default-device";
71   struct cudaDeviceProp prop;
72   Ceed_Cuda            *ceed_data;
73   CeedCallBackend(CeedGetData(ceed, &ceed_data));
74   CeedCallCuda(ceed, cudaGetDeviceProperties(&prop, ceed_data->device_id));
75   std::string arch_arg = "-arch=compute_" + std::to_string(prop.major) + std::to_string(prop.minor);
76   opts[1]              = arch_arg.c_str();
77   opts[2]              = "-Dint32_t=int";
78 
79   // Add string source argument provided in call
80   code << source;
81 
82   // Create Program
83   CeedCallNvrtc(ceed, nvrtcCreateProgram(&prog, code.str().c_str(), NULL, 0, NULL, NULL));
84 
85   // Compile kernel
86   nvrtcResult result = nvrtcCompileProgram(prog, num_opts, opts);
87   if (result != NVRTC_SUCCESS) {
88     size_t log_size;
89     CeedCallNvrtc(ceed, nvrtcGetProgramLogSize(prog, &log_size));
90     char *log;
91     CeedCallBackend(CeedMalloc(log_size, &log));
92     CeedCallNvrtc(ceed, nvrtcGetProgramLog(prog, log));
93     return CeedError(ceed, CEED_ERROR_BACKEND, "%s\n%s", nvrtcGetErrorString(result), log);
94   }
95 
96   size_t ptx_size;
97   CeedCallNvrtc(ceed, nvrtcGetPTXSize(prog, &ptx_size));
98   char *ptx;
99   CeedCallBackend(CeedMalloc(ptx_size, &ptx));
100   CeedCallNvrtc(ceed, nvrtcGetPTX(prog, ptx));
101   CeedCallNvrtc(ceed, nvrtcDestroyProgram(&prog));
102 
103   CeedCallCuda(ceed, cuModuleLoadData(module, ptx));
104   CeedCallBackend(CeedFree(&ptx));
105   return CEED_ERROR_SUCCESS;
106 }
107 
108 //------------------------------------------------------------------------------
109 // Get CUDA kernel
110 //------------------------------------------------------------------------------
111 int CeedGetKernelCuda(Ceed ceed, CUmodule module, const char *name, CUfunction *kernel) {
112   CeedCallCuda(ceed, cuModuleGetFunction(kernel, module, name));
113   return CEED_ERROR_SUCCESS;
114 }
115 
116 // Run kernel with block size selected automatically based on the kernel (which may use enough registers to require a smaller block size than the
117 // hardware is capable).
118 int CeedRunKernelAutoblockCuda(Ceed ceed, CUfunction kernel, size_t points, void **args) {
119   int min_grid_size, max_block_size;
120   CeedCallCuda(ceed, cuOccupancyMaxPotentialBlockSize(&min_grid_size, &max_block_size, kernel, NULL, 0, 0x10000));
121   CeedCallBackend(CeedRunKernelCuda(ceed, kernel, CeedDivUpInt(points, max_block_size), max_block_size, args));
122   return 0;
123 }
124 
125 //------------------------------------------------------------------------------
126 // Run CUDA kernel
127 //------------------------------------------------------------------------------
128 int CeedRunKernelCuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size, void **args) {
129   CeedCallBackend(CeedRunKernelDimSharedCuda(ceed, kernel, grid_size, block_size, 1, 1, 0, args));
130   return CEED_ERROR_SUCCESS;
131 }
132 
133 //------------------------------------------------------------------------------
134 // Run CUDA kernel for spatial dimension
135 //------------------------------------------------------------------------------
136 int CeedRunKernelDimCuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size_x, const int block_size_y, const int block_size_z,
137                          void **args) {
138   CeedCallBackend(CeedRunKernelDimSharedCuda(ceed, kernel, grid_size, block_size_x, block_size_y, block_size_z, 0, args));
139   return CEED_ERROR_SUCCESS;
140 }
141 
142 //------------------------------------------------------------------------------
143 // Run CUDA kernel for spatial dimension with shared memory
144 //------------------------------------------------------------------------------
145 int CeedRunKernelDimSharedCuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size_x, const int block_size_y,
146                                const int block_size_z, const int shared_mem_size, void **args) {
147 #if CUDA_VERSION >= 9000
148   cuFuncSetAttribute(kernel, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_mem_size);
149 #endif
150   CUresult result = cuLaunchKernel(kernel, grid_size, 1, 1, block_size_x, block_size_y, block_size_z, shared_mem_size, NULL, args, NULL);
151   if (result == CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES) {
152     int max_threads_per_block, shared_size_bytes, num_regs;
153     cuFuncGetAttribute(&max_threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, kernel);
154     cuFuncGetAttribute(&shared_size_bytes, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, kernel);
155     cuFuncGetAttribute(&num_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, kernel);
156     return CeedError(ceed, CEED_ERROR_BACKEND,
157                      "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: max_threads_per_block %d on block size (%d,%d,%d), shared_size %d, num_regs %d",
158                      max_threads_per_block, block_size_x, block_size_y, block_size_z, shared_size_bytes, num_regs);
159   } else CeedChk_Cu(ceed, result);
160   return CEED_ERROR_SUCCESS;
161 }
162 
163 //------------------------------------------------------------------------------
164