xref: /libCEED/backends/cuda/ceed-cuda-compile.cpp (revision 33cc410d9e7c7c1ec2b31835f296422ec750fa8c)
1 // Copyright (c) 2017-2025, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include "ceed-cuda-compile.h"
9 
10 #include <ceed.h>
11 #include <ceed/backend.h>
12 #include <ceed/jit-tools.h>
13 #include <cuda_runtime.h>
14 #include <dirent.h>
15 #include <nvrtc.h>
16 #include <stdarg.h>
17 #include <string.h>
18 #include <sys/stat.h>
19 #include <sys/types.h>
20 
21 #include <cstdlib>
22 #include <fstream>
23 #include <iostream>
24 #include <sstream>
25 #include <string>
26 
27 #include "ceed-cuda-common.h"
28 
29 #define CeedChk_Nvrtc(ceed, x)                                                                              \
30   do {                                                                                                      \
31     nvrtcResult result = static_cast<nvrtcResult>(x);                                                       \
32     if (result != NVRTC_SUCCESS) return CeedError((ceed), CEED_ERROR_BACKEND, nvrtcGetErrorString(result)); \
33   } while (0)
34 
35 #define CeedCallNvrtc(ceed, ...)  \
36   do {                            \
37     int ierr_q_ = __VA_ARGS__;    \
38     CeedChk_Nvrtc(ceed, ierr_q_); \
39   } while (0)
40 
41 #define CeedCallSystem(ceed, command, message) CeedCallBackend(CeedCallSystem_Core(ceed, command, message))
42 
43 //------------------------------------------------------------------------------
44 // Call system command and capture stdout + stderr
45 //------------------------------------------------------------------------------
46 static int CeedCallSystem_Core(Ceed ceed, const char *command, const char *message) {
47   CeedDebug(ceed, "Running command:\n$ %s\n", command);
48   FILE *output_stream = popen((command + std::string(" 2>&1")).c_str(), "r");
49 
50   CeedCheck(output_stream != nullptr, ceed, CEED_ERROR_BACKEND, "Failed to %s with command: %s", message, command);
51 
52   char output[4 * CEED_MAX_RESOURCE_LEN];
53 
54   while (fgets(output, sizeof(output), output_stream) != nullptr) {
55   }
56   CeedDebug(ceed, "Command output:\n%s\n", output);
57 
58   CeedCheck(pclose(output_stream) == 0, ceed, CEED_ERROR_BACKEND, "Failed to %s with command: %s\nand error: %s", message, command, output);
59   return CEED_ERROR_SUCCESS;
60 }
61 
62 //------------------------------------------------------------------------------
63 // Compile CUDA kernel
64 //------------------------------------------------------------------------------
65 using std::ifstream;
66 using std::ofstream;
67 using std::ostringstream;
68 
69 static int CeedCompileCore_Cuda(Ceed ceed, const char *source, const bool throw_error, bool *is_compile_good, CUmodule *module,
70                                 const CeedInt num_defines, va_list args) {
71   size_t                ptx_size;
72   char                 *ptx;
73   const int             num_opts            = 4;
74   CeedInt               num_jit_source_dirs = 0, num_jit_defines = 0;
75   const char          **opts;
76   nvrtcProgram          prog;
77   struct cudaDeviceProp prop;
78   Ceed_Cuda            *ceed_data;
79 
80   cudaFree(0);  // Make sure a Context exists for nvrtc
81 
82   std::ostringstream code;
83   bool               using_clang;
84 
85   CeedCallBackend(CeedGetIsClang(ceed, &using_clang));
86 
87   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS,
88                using_clang ? "Compiling CUDA with Clang backend (with Rust QFunction support)"
89                            : "Compiling CUDA with NVRTC backend (without Rust QFunction support).\nTo use the Clang backend, set the environment "
90                              "variable GPU_CLANG=1");
91 
92   // Get kernel specific options, such as kernel constants
93   if (num_defines > 0) {
94     char *name;
95     int   val;
96 
97     for (int i = 0; i < num_defines; i++) {
98       name = va_arg(args, char *);
99       val  = va_arg(args, int);
100       code << "#define " << name << " " << val << "\n";
101     }
102   }
103 
104   // Standard libCEED definitions for CUDA backends
105   code << "#include <ceed/jit-source/cuda/cuda-jit.h>\n\n";
106 
107   // Non-macro options
108   CeedCallBackend(CeedCalloc(num_opts, &opts));
109   opts[0] = "-default-device";
110   CeedCallBackend(CeedGetData(ceed, &ceed_data));
111   CeedCallCuda(ceed, cudaGetDeviceProperties(&prop, ceed_data->device_id));
112   std::string arch_arg =
113 #if CUDA_VERSION >= 11010
114       // NVRTC used to support only virtual architectures through the option
115       // -arch, since it was only emitting PTX. It will now support actual
116       // architectures as well to emit SASS.
117       // https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#dynamic-code-generation
118       "-arch=sm_"
119 #else
120       "-arch=compute_"
121 #endif
122       + std::to_string(prop.major) + std::to_string(prop.minor);
123   opts[1] = arch_arg.c_str();
124   opts[2] = "-Dint32_t=int";
125   opts[3] = "-DCEED_RUNNING_JIT_PASS=1";
126   // Additional include dirs
127   {
128     const char **jit_source_dirs;
129 
130     CeedCallBackend(CeedGetJitSourceRoots(ceed, &num_jit_source_dirs, &jit_source_dirs));
131     CeedCallBackend(CeedRealloc(num_opts + num_jit_source_dirs, &opts));
132     for (CeedInt i = 0; i < num_jit_source_dirs; i++) {
133       std::ostringstream include_dir_arg;
134 
135       include_dir_arg << "-I" << jit_source_dirs[i];
136       CeedCallBackend(CeedStringAllocCopy(include_dir_arg.str().c_str(), (char **)&opts[num_opts + i]));
137     }
138     CeedCallBackend(CeedRestoreJitSourceRoots(ceed, &jit_source_dirs));
139   }
140   // User defines
141   {
142     const char **jit_defines;
143 
144     CeedCallBackend(CeedGetJitDefines(ceed, &num_jit_defines, &jit_defines));
145     CeedCallBackend(CeedRealloc(num_opts + num_jit_source_dirs + num_jit_defines, &opts));
146     for (CeedInt i = 0; i < num_jit_defines; i++) {
147       std::ostringstream define_arg;
148 
149       define_arg << "-D" << jit_defines[i];
150       CeedCallBackend(CeedStringAllocCopy(define_arg.str().c_str(), (char **)&opts[num_opts + num_jit_source_dirs + i]));
151     }
152     CeedCallBackend(CeedRestoreJitDefines(ceed, &jit_defines));
153   }
154 
155   // Add string source argument provided in call
156   code << source;
157 
158   // Compile kernel
159   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- ATTEMPTING TO COMPILE JIT SOURCE ----------\n");
160   CeedDebug(ceed, "Source:\n%s\n", code.str().c_str());
161   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- END OF JIT SOURCE ----------\n");
162 
163   if (!using_clang) {
164     CeedCallNvrtc(ceed, nvrtcCreateProgram(&prog, code.str().c_str(), NULL, 0, NULL, NULL));
165 
166     if (CeedDebugFlag(ceed)) {
167       // LCOV_EXCL_START
168       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- JiT COMPILER OPTIONS ----------\n");
169       for (CeedInt i = 0; i < num_opts + num_jit_source_dirs + num_jit_defines; i++) {
170         CeedDebug(ceed, "Option %d: %s", i, opts[i]);
171       }
172       CeedDebug(ceed, "");
173       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "---------- END OF JiT COMPILER OPTIONS ----------\n");
174       // LCOV_EXCL_STOP
175     }
176 
177     nvrtcResult result = nvrtcCompileProgram(prog, num_opts + num_jit_source_dirs + num_jit_defines, opts);
178 
179     for (CeedInt i = 0; i < num_jit_source_dirs; i++) {
180       CeedCallBackend(CeedFree(&opts[num_opts + i]));
181     }
182     for (CeedInt i = 0; i < num_jit_defines; i++) {
183       CeedCallBackend(CeedFree(&opts[num_opts + num_jit_source_dirs + i]));
184     }
185     CeedCallBackend(CeedFree(&opts));
186     *is_compile_good = result == NVRTC_SUCCESS;
187     if (!*is_compile_good) {
188       char  *log;
189       size_t log_size;
190 
191       CeedCallNvrtc(ceed, nvrtcGetProgramLogSize(prog, &log_size));
192       CeedCallBackend(CeedMalloc(log_size, &log));
193       CeedCallNvrtc(ceed, nvrtcGetProgramLog(prog, log));
194       if (throw_error) {
195         return CeedError(ceed, CEED_ERROR_BACKEND, "%s\n%s", nvrtcGetErrorString(result), log);
196       } else {
197         // LCOV_EXCL_START
198         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- COMPILE ERROR DETECTED ----------\n");
199         CeedDebug(ceed, "Error: %s\nCompile log:\n%s\n", nvrtcGetErrorString(result), log);
200         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- BACKEND MAY FALLBACK ----------\n");
201         CeedCallBackend(CeedFree(&log));
202         CeedCallNvrtc(ceed, nvrtcDestroyProgram(&prog));
203         return CEED_ERROR_SUCCESS;
204         // LCOV_EXCL_STOP
205       }
206     }
207 
208 #if CUDA_VERSION >= 11010
209     CeedCallNvrtc(ceed, nvrtcGetCUBINSize(prog, &ptx_size));
210     CeedCallBackend(CeedMalloc(ptx_size, &ptx));
211     CeedCallNvrtc(ceed, nvrtcGetCUBIN(prog, ptx));
212 #else
213     CeedCallNvrtc(ceed, nvrtcGetPTXSize(prog, &ptx_size));
214     CeedCallBackend(CeedMalloc(ptx_size, &ptx));
215     CeedCallNvrtc(ceed, nvrtcGetPTX(prog, ptx));
216 #endif
217     CeedCallNvrtc(ceed, nvrtcDestroyProgram(&prog));
218 
219     CeedCallCuda(ceed, cuModuleLoadData(module, ptx));
220     CeedCallBackend(CeedFree(&ptx));
221     return CEED_ERROR_SUCCESS;
222   } else {
223     srand(time(NULL));
224     const int build_id = rand();
225 
226     // Create temp dir if needed
227     {
228       DIR *dir = opendir("temp");
229 
230       if (dir) {
231         closedir(dir);
232       } else {
233         // In parallel multiple processes may attempt
234         // Only one process needs to succeed
235         mkdir("temp", 0777);
236         chmod("temp", 0777);
237       }
238     }
239     // Write code to temp file
240     {
241       std::string filename = std::string("temp/kernel_") + std::to_string(build_id) + std::string("_0_source.cu");
242       FILE       *file     = fopen(filename.c_str(), "w");
243 
244       CeedCheck(file, ceed, CEED_ERROR_BACKEND, "Failed to create file. Write access is required for cuda-clang");
245       fputs(code.str().c_str(), file);
246       fclose(file);
247     }
248 
249     // Get rust crate directories
250     const char **rust_source_dirs     = nullptr;
251     int          num_rust_source_dirs = 0;
252 
253     CeedCallBackend(CeedGetRustSourceRoots(ceed, &num_rust_source_dirs, &rust_source_dirs));
254 
255     std::string rust_dirs[10];
256 
257     if (num_rust_source_dirs > 0) {
258       CeedDebug(ceed, "There are %d source dirs, including %s\n", num_rust_source_dirs, rust_source_dirs[0]);
259     }
260 
261     for (CeedInt i = 0; i < num_rust_source_dirs; i++) {
262       rust_dirs[i] = std::string(rust_source_dirs[i]);
263     }
264 
265     CeedCallBackend(CeedRestoreRustSourceRoots(ceed, &rust_source_dirs));
266 
267     char *rust_toolchain = std::getenv("RUST_TOOLCHAIN");
268 
269     if (rust_toolchain == nullptr) {
270       rust_toolchain = (char *)"nightly";
271       setenv("RUST_TOOLCHAIN", "nightly", 0);
272     }
273 
274     // Compile Rust crate(s) needed
275     std::string command;
276 
277     for (CeedInt i = 0; i < num_rust_source_dirs; i++) {
278       command = "cargo +" + std::string(rust_toolchain) + " build --release --target nvptx64-nvidia-cuda --config " + rust_dirs[i] +
279                 "/.cargo/config.toml --manifest-path " + rust_dirs[i] + "/Cargo.toml";
280       CeedCallSystem(ceed, command.c_str(), "build Rust crate");
281     }
282 
283     // Compile wrapper kernel
284     command = "clang++ -flto=thin --cuda-gpu-arch=sm_" + std::to_string(prop.major) + std::to_string(prop.minor) +
285               " --cuda-device-only -emit-llvm -S temp/kernel_" + std::to_string(build_id) + "_0_source.cu -o temp/kernel_" +
286               std::to_string(build_id) + "_1_wrapped.ll ";
287     command += opts[4];
288     CeedCallSystem(ceed, command.c_str(), "JiT kernel source");
289     CeedCallSystem(ceed, ("chmod 0777 temp/kernel_" + std::to_string(build_id) + "_1_wrapped.ll").c_str(), "update JiT file permissions");
290 
291     // the find command finds the rust-installed llvm-link tool and runs it
292     command = "$(find $(rustup run " + std::string(rust_toolchain) + " rustc --print sysroot) -name llvm-link) temp/kernel_" +
293               std::to_string(build_id) +
294               "_1_wrapped.ll --ignore-non-bitcode --internalize --only-needed -S -o "
295               "temp/kernel_" +
296               std::to_string(build_id) + "_2_linked.ll ";
297 
298     // Searches for .a files in rust directoy
299     // Note: this is necessary because rust crate names may not match the folder they are in
300     for (CeedInt i = 0; i < num_rust_source_dirs; i++) {
301       std::string dir = rust_dirs[i] + "/target/nvptx64-nvidia-cuda/release";
302       DIR        *dp  = opendir(dir.c_str());
303 
304       CeedCheck(dp != nullptr, ceed, CEED_ERROR_BACKEND, "Could not open directory: %s", dir.c_str());
305       struct dirent *entry;
306 
307       // finds files ending in .a
308       while ((entry = readdir(dp)) != nullptr) {
309         std::string filename(entry->d_name);
310 
311         if (filename.size() >= 2 && filename.substr(filename.size() - 2) == ".a") {
312           command += dir + "/" + filename + " ";
313         }
314       }
315       closedir(dp);
316       // TODO: when libCEED switches to c++17, switch to std::filesystem for the loop above
317     }
318 
319     // Link, optimize, and compile final CUDA kernel
320     // note that the find command is used to find the rust-installed llvm tool
321     CeedCallSystem(ceed, command.c_str(), "link C and Rust source");
322     CeedCallSystem(
323         ceed,
324         ("$(find $(rustup run " + std::string(rust_toolchain) + " rustc --print sysroot) -name opt) --passes internalize,inline temp/kernel_" +
325          std::to_string(build_id) + "_2_linked.ll -o temp/kernel_" + std::to_string(build_id) + "_3_opt.bc")
326             .c_str(),
327         "optimize linked C and Rust source");
328     CeedCallSystem(ceed, ("chmod 0777 temp/kernel_" + std::to_string(build_id) + "_2_linked.ll").c_str(), "update JiT file permissions");
329     CeedCallSystem(ceed,
330                    ("$(find $(rustup run " + std::string(rust_toolchain) + " rustc --print sysroot) -name llc) -O3 -mcpu=sm_" +
331                     std::to_string(prop.major) + std::to_string(prop.minor) + " temp/kernel_" + std::to_string(build_id) +
332                     "_3_opt.bc -o temp/kernel_" + std::to_string(build_id) + "_4_final.ptx")
333                        .c_str(),
334                    "compile final CUDA kernel");
335     CeedCallSystem(ceed, ("chmod 0777 temp/kernel_" + std::to_string(build_id) + "_4_final.ptx").c_str(), "update JiT file permissions");
336 
337     ifstream      ptxfile("temp/kernel_" + std::to_string(build_id) + "_4_final.ptx");
338     ostringstream sstr;
339 
340     sstr << ptxfile.rdbuf();
341 
342     auto ptx_data = sstr.str();
343     ptx_size      = ptx_data.length();
344 
345     int result = cuModuleLoadData(module, ptx_data.c_str());
346 
347     *is_compile_good = result == 0;
348     if (!*is_compile_good) {
349       if (throw_error) {
350         return CeedError(ceed, CEED_ERROR_BACKEND, "Failed to load module data");
351       } else {
352         // LCOV_EXCL_START
353         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- COMPILE ERROR DETECTED ----------\n");
354         CeedDebug(ceed, "Error: Failed to load module data");
355         CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- BACKEND MAY FALLBACK ----------\n");
356         return CEED_ERROR_SUCCESS;
357         // LCOV_EXCL_STOP
358       }
359     }
360   }
361   return CEED_ERROR_SUCCESS;
362 }
363 
364 int CeedCompile_Cuda(Ceed ceed, const char *source, CUmodule *module, const CeedInt num_defines, ...) {
365   bool    is_compile_good = true;
366   va_list args;
367 
368   va_start(args, num_defines);
369   const CeedInt ierr = CeedCompileCore_Cuda(ceed, source, true, &is_compile_good, module, num_defines, args);
370 
371   va_end(args);
372   CeedCallBackend(ierr);
373   return CEED_ERROR_SUCCESS;
374 }
375 
376 int CeedTryCompile_Cuda(Ceed ceed, const char *source, bool *is_compile_good, CUmodule *module, const CeedInt num_defines, ...) {
377   va_list args;
378 
379   va_start(args, num_defines);
380   const CeedInt ierr = CeedCompileCore_Cuda(ceed, source, false, is_compile_good, module, num_defines, args);
381 
382   va_end(args);
383   CeedCallBackend(ierr);
384   return CEED_ERROR_SUCCESS;
385 }
386 
387 //------------------------------------------------------------------------------
388 // Get CUDA kernel
389 //------------------------------------------------------------------------------
390 int CeedGetKernel_Cuda(Ceed ceed, CUmodule module, const char *name, CUfunction *kernel) {
391   CeedCallCuda(ceed, cuModuleGetFunction(kernel, module, name));
392   return CEED_ERROR_SUCCESS;
393 }
394 
395 //------------------------------------------------------------------------------
396 // Run CUDA kernel with block size selected automatically based on the kernel
397 //     (which may use enough registers to require a smaller block size than the
398 //      hardware is capable)
399 //------------------------------------------------------------------------------
400 int CeedRunKernelAutoblockCuda(Ceed ceed, CUfunction kernel, size_t points, void **args) {
401   int min_grid_size, max_block_size;
402 
403   CeedCallCuda(ceed, cuOccupancyMaxPotentialBlockSize(&min_grid_size, &max_block_size, kernel, NULL, 0, 0x10000));
404   CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, CeedDivUpInt(points, max_block_size), max_block_size, args));
405   return CEED_ERROR_SUCCESS;
406 }
407 
408 //------------------------------------------------------------------------------
409 // Run CUDA kernel
410 //------------------------------------------------------------------------------
411 int CeedRunKernel_Cuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size, void **args) {
412   CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, kernel, NULL, grid_size, block_size, 1, 1, 0, args));
413   return CEED_ERROR_SUCCESS;
414 }
415 
416 //------------------------------------------------------------------------------
417 // Run CUDA kernel for spatial dimension
418 //------------------------------------------------------------------------------
419 int CeedRunKernelDim_Cuda(Ceed ceed, CUfunction kernel, const int grid_size, const int block_size_x, const int block_size_y, const int block_size_z,
420                           void **args) {
421   CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, kernel, NULL, grid_size, block_size_x, block_size_y, block_size_z, 0, args));
422   return CEED_ERROR_SUCCESS;
423 }
424 
425 //------------------------------------------------------------------------------
426 // Run CUDA kernel for spatial dimension with shared memory
427 //------------------------------------------------------------------------------
428 static int CeedRunKernelDimSharedCore_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x,
429                                            const int block_size_y, const int block_size_z, const int shared_mem_size, const bool throw_error,
430                                            bool *is_good_run, void **args) {
431 #if CUDA_VERSION >= 9000
432   cuFuncSetAttribute(kernel, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_mem_size);
433 #endif
434   CUresult result = cuLaunchKernel(kernel, grid_size, 1, 1, block_size_x, block_size_y, block_size_z, shared_mem_size, stream, args, NULL);
435 
436   if (result == CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES) {
437     int max_threads_per_block, shared_size_bytes, num_regs;
438 
439     cuFuncGetAttribute(&max_threads_per_block, CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK, kernel);
440     cuFuncGetAttribute(&shared_size_bytes, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, kernel);
441     cuFuncGetAttribute(&num_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, kernel);
442     if (throw_error) {
443       return CeedError(ceed, CEED_ERROR_BACKEND,
444                        "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: max_threads_per_block %d on block size (%d,%d,%d), shared_size %d, num_regs %d",
445                        max_threads_per_block, block_size_x, block_size_y, block_size_z, shared_size_bytes, num_regs);
446     } else {
447       // LCOV_EXCL_START
448       CeedDebug256(ceed, CEED_DEBUG_COLOR_ERROR, "---------- LAUNCH ERROR DETECTED ----------\n");
449       CeedDebug(ceed, "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: max_threads_per_block %d on block size (%d,%d,%d), shared_size %d, num_regs %d\n",
450                 max_threads_per_block, block_size_x, block_size_y, block_size_z, shared_size_bytes, num_regs);
451       CeedDebug256(ceed, CEED_DEBUG_COLOR_WARNING, "---------- BACKEND MAY FALLBACK ----------\n");
452       // LCOV_EXCL_STOP
453     }
454     *is_good_run = false;
455   } else CeedChk_Cu(ceed, result);
456   return CEED_ERROR_SUCCESS;
457 }
458 
459 int CeedRunKernelDimShared_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x, const int block_size_y,
460                                 const int block_size_z, const int shared_mem_size, void **args) {
461   bool is_good_run = true;
462 
463   CeedCallBackend(CeedRunKernelDimSharedCore_Cuda(ceed, kernel, stream, grid_size, block_size_x, block_size_y, block_size_z, shared_mem_size, true,
464                                                   &is_good_run, args));
465   return CEED_ERROR_SUCCESS;
466 }
467 
468 int CeedTryRunKernelDimShared_Cuda(Ceed ceed, CUfunction kernel, CUstream stream, const int grid_size, const int block_size_x, const int block_size_y,
469                                    const int block_size_z, const int shared_mem_size, bool *is_good_run, void **args) {
470   CeedCallBackend(CeedRunKernelDimSharedCore_Cuda(ceed, kernel, stream, grid_size, block_size_x, block_size_y, block_size_z, shared_mem_size, false,
471                                                   is_good_run, args));
472   return CEED_ERROR_SUCCESS;
473 }
474 
475 //------------------------------------------------------------------------------
476