1 #include <petsc/private/cpp/memory.hpp> // make_unique 2 3 #include "cupmdevice.hpp" 4 5 #include <algorithm> 6 #include <csetjmp> // for cuda mpi awareness 7 #include <csignal> // SIGSEGV 8 #include <iterator> 9 #include <type_traits> 10 11 namespace Petsc 12 { 13 14 namespace device 15 { 16 17 namespace cupm 18 { 19 20 // internal "impls" class for CUPMDevice. Each instance represents a single cupm device 21 template <DeviceType T> 22 class Device<T>::DeviceInternal { 23 const int id_; 24 bool devInitialized_ = false; 25 cupmDeviceProp_t dprop_{}; // cudaDeviceProp appears to be an actual struct, i.e. you can't 26 // initialize it with nullptr or NULL (i've tried) 27 28 static PetscErrorCode CUPMAwareMPI_(bool *) noexcept; 29 30 public: 31 // default constructor 32 explicit constexpr DeviceInternal(int dev) noexcept : id_(dev) { } 33 34 // gather all relevant information for a particular device, a cupmDeviceProp_t is 35 // usually sufficient here 36 PetscErrorCode initialize() noexcept; 37 PetscErrorCode configure() noexcept; 38 PetscErrorCode view(PetscViewer) const noexcept; 39 PetscErrorCode getattribute(PetscDeviceAttribute, void *) const noexcept; 40 PetscErrorCode shutdown() noexcept; 41 42 PETSC_NODISCARD auto id() const -> decltype(id_) { return id_; } 43 PETSC_NODISCARD auto initialized() const -> decltype(devInitialized_) { return devInitialized_; } 44 PETSC_NODISCARD auto prop() const -> const decltype(dprop_) & { return dprop_; } 45 }; 46 47 // the goal here is simply to get the cupm backend to create its context, not to do any type of 48 // modification of it, or create objects (since these may be affected by subsequent 49 // configuration changes) 50 template <DeviceType T> 51 PetscErrorCode Device<T>::DeviceInternal::initialize() noexcept 52 { 53 PetscFunctionBegin; 54 if (initialized()) PetscFunctionReturn(PETSC_SUCCESS); 55 devInitialized_ = true; 56 // need to do this BEFORE device has been set, although if the user 57 // has already done this then we just ignore it 58 if (cupmSetDeviceFlags(cupmDeviceMapHost) == cupmErrorSetOnActiveProcess) { 59 // reset the error if it was cupmErrorSetOnActiveProcess 60 const auto PETSC_UNUSED unused = cupmGetLastError(); 61 } else PetscCallCUPM(cupmGetLastError()); 62 // cuda 5.0+ will create a context when cupmSetDevice is called 63 if (cupmSetDevice(id()) != cupmErrorDeviceAlreadyInUse) PetscCallCUPM(cupmGetLastError()); 64 // and in case it doesn't, explicitly call init here 65 PetscCallCUPM(cupmInit(0)); 66 #if PetscDefined(HAVE_CUDA) 67 // nvmlInit() deprecated in NVML 5.319 68 PetscCallNVML(nvmlInit_v2()); 69 #endif 70 // where is this variable defined and when is it set? who knows! but it is defined and set 71 // at this point. either way, each device must make this check since I guess MPI might not be 72 // aware of all of them? 73 if (use_gpu_aware_mpi) { 74 bool aware; 75 76 PetscCall(CUPMAwareMPI_(&aware)); 77 // For Open MPI, we could do a compile time check with 78 // "defined(PETSC_HAVE_OPENMPI) && defined(MPIX_CUDA_AWARE_SUPPORT) && 79 // MPIX_CUDA_AWARE_SUPPORT" to see if it is CUDA-aware. However, recent versions of IBM 80 // Spectrum MPI (e.g., 10.3.1) on Summit meet above conditions, but one has to use jsrun 81 // --smpiargs=-gpu to really enable GPU-aware MPI. So we do the check at runtime with a 82 // code that works only with GPU-aware MPI. 83 if (PetscUnlikely(!aware)) { 84 PetscCall((*PetscErrorPrintf)("PETSc is configured with GPU support, but your MPI is not GPU-aware. For better performance, please use a GPU-aware MPI.\n")); 85 PetscCall((*PetscErrorPrintf)("If you do not care, add option -use_gpu_aware_mpi 0. To not see the message again, add the option to your .petscrc, OR add it to the env var PETSC_OPTIONS.\n")); 86 PetscCall((*PetscErrorPrintf)("If you do care, for IBM Spectrum MPI on OLCF Summit, you may need jsrun --smpiargs=-gpu.\n")); 87 PetscCall((*PetscErrorPrintf)("For Open MPI, you need to configure it --with-cuda (https://www.open-mpi.org/faq/?category=buildcuda)\n")); 88 PetscCall((*PetscErrorPrintf)("For MVAPICH2-GDR, you need to set MV2_USE_CUDA=1 (http://mvapich.cse.ohio-state.edu/userguide/gdr/)\n")); 89 PetscCall((*PetscErrorPrintf)("For Cray-MPICH, you need to set MPICH_GPU_SUPPORT_ENABLED=1 (man mpi to see manual of cray-mpich)\n")); 90 PETSCABORT(PETSC_COMM_SELF, PETSC_ERR_LIB); 91 } 92 } 93 PetscFunctionReturn(PETSC_SUCCESS); 94 } 95 96 template <DeviceType T> 97 PetscErrorCode Device<T>::DeviceInternal::configure() noexcept 98 { 99 PetscFunctionBegin; 100 PetscAssert(initialized(), PETSC_COMM_SELF, PETSC_ERR_COR, "Device %d being configured before it was initialized", id()); 101 // why on EARTH nvidia insists on making otherwise informational states into 102 // fully-fledged error codes is beyond me. Why couldn't a pointer to bool argument have 103 // sufficed?!?!?! 104 if (cupmSetDevice(id_) != cupmErrorDeviceAlreadyInUse) PetscCallCUPM(cupmGetLastError()); 105 // need to update the device properties 106 PetscCallCUPM(cupmGetDeviceProperties(&dprop_, id_)); 107 PetscDeviceCUPMRuntimeArch = dprop_.major * 10 + dprop_.minor; 108 PetscCall(PetscInfo(nullptr, "Configured device %d\n", id_)); 109 PetscFunctionReturn(PETSC_SUCCESS); 110 } 111 112 template <DeviceType T> 113 PetscErrorCode Device<T>::DeviceInternal::view(PetscViewer viewer) const noexcept 114 { 115 PetscBool isascii; 116 117 PetscFunctionBegin; 118 PetscAssert(initialized(), PETSC_COMM_SELF, PETSC_ERR_COR, "Device %d being viewed before it was initialized or configured", id()); 119 // we don't print device-specific info in CI-mode 120 if (PetscUnlikely(PetscCIEnabled)) PetscFunctionReturn(PETSC_SUCCESS); 121 PetscCall(PetscObjectTypeCompare(PetscObjectCast(viewer), PETSCVIEWERASCII, &isascii)); 122 if (isascii) { 123 MPI_Comm comm; 124 PetscMPIInt rank; 125 PetscViewer sviewer; 126 127 int clock, memclock; 128 PetscCallCUPM(cupmDeviceGetAttribute(&clock, cupmDevAttrClockRate, id_)); 129 PetscCallCUPM(cupmDeviceGetAttribute(&memclock, cupmDevAttrMemoryClockRate, id_)); 130 131 PetscCall(PetscObjectGetComm(PetscObjectCast(viewer), &comm)); 132 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 133 PetscCall(PetscViewerGetSubViewer(viewer, PETSC_COMM_SELF, &sviewer)); 134 PetscCall(PetscViewerASCIIPrintf(sviewer, "[%d] name: %s\n", rank, dprop_.name)); 135 PetscCall(PetscViewerASCIIPushTab(sviewer)); 136 PetscCall(PetscViewerASCIIPrintf(sviewer, "Compute capability: %d.%d\n", dprop_.major, dprop_.minor)); 137 PetscCall(PetscViewerASCIIPrintf(sviewer, "Multiprocessor Count: %d\n", dprop_.multiProcessorCount)); 138 PetscCall(PetscViewerASCIIPrintf(sviewer, "Maximum Grid Dimensions: %d x %d x %d\n", dprop_.maxGridSize[0], dprop_.maxGridSize[1], dprop_.maxGridSize[2])); 139 PetscCall(PetscViewerASCIIPrintf(sviewer, "Maximum Block Dimensions: %d x %d x %d\n", dprop_.maxThreadsDim[0], dprop_.maxThreadsDim[1], dprop_.maxThreadsDim[2])); 140 PetscCall(PetscViewerASCIIPrintf(sviewer, "Maximum Threads Per Block: %d\n", dprop_.maxThreadsPerBlock)); 141 PetscCall(PetscViewerASCIIPrintf(sviewer, "Warp Size: %d\n", dprop_.warpSize)); 142 PetscCall(PetscViewerASCIIPrintf(sviewer, "Total Global Memory (bytes): %zu\n", dprop_.totalGlobalMem)); 143 PetscCall(PetscViewerASCIIPrintf(sviewer, "Total Constant Memory (bytes): %zu\n", dprop_.totalConstMem)); 144 PetscCall(PetscViewerASCIIPrintf(sviewer, "Shared Memory Per Block (bytes): %zu\n", dprop_.sharedMemPerBlock)); 145 PetscCall(PetscViewerASCIIPrintf(sviewer, "Multiprocessor Clock Rate (kHz): %d\n", clock)); 146 PetscCall(PetscViewerASCIIPrintf(sviewer, "Memory Clock Rate (kHz): %d\n", memclock)); 147 PetscCall(PetscViewerASCIIPrintf(sviewer, "Memory Bus Width (bits): %d\n", dprop_.memoryBusWidth)); 148 PetscCall(PetscViewerASCIIPrintf(sviewer, "Peak Memory Bandwidth (GB/s): %f\n", 2.0 * memclock * (dprop_.memoryBusWidth / 8) / 1.0e6)); 149 PetscCall(PetscViewerASCIIPrintf(sviewer, "Can map host memory: %s\n", dprop_.canMapHostMemory ? "PETSC_TRUE" : "PETSC_FALSE")); 150 PetscCall(PetscViewerASCIIPrintf(sviewer, "Can execute multiple kernels concurrently: %s\n", dprop_.concurrentKernels ? "PETSC_TRUE" : "PETSC_FALSE")); 151 PetscCall(PetscViewerASCIIPopTab(sviewer)); 152 PetscCall(PetscViewerRestoreSubViewer(viewer, PETSC_COMM_SELF, &sviewer)); 153 } 154 PetscFunctionReturn(PETSC_SUCCESS); 155 } 156 157 template <DeviceType T> 158 PetscErrorCode Device<T>::DeviceInternal::getattribute(PetscDeviceAttribute attr, void *value) const noexcept 159 { 160 PetscFunctionBegin; 161 PetscAssert(initialized(), PETSC_COMM_SELF, PETSC_ERR_COR, "Device %d was not initialized", id()); 162 switch (attr) { 163 case PETSC_DEVICE_ATTR_SIZE_T_SHARED_MEM_PER_BLOCK: 164 *static_cast<std::size_t *>(value) = prop().sharedMemPerBlock; 165 case PETSC_DEVICE_ATTR_MAX: 166 break; 167 } 168 PetscFunctionReturn(PETSC_SUCCESS); 169 } 170 171 template <DeviceType T> 172 PetscErrorCode Device<T>::DeviceInternal::shutdown() noexcept 173 { 174 PetscFunctionBegin; 175 if (!initialized()) PetscFunctionReturn(PETSC_SUCCESS); 176 #if PetscDefined(HAVE_CUDA) 177 PetscCallNVML(nvmlShutdown()); 178 #endif 179 PetscFunctionReturn(PETSC_SUCCESS); 180 } 181 182 static std::jmp_buf cupmMPIAwareJumpBuffer; 183 static bool cupmMPIAwareJumpBufferSet; 184 185 // godspeed to anyone that attempts to call this function 186 void SilenceVariableIsNotNeededAndWillNotBeEmittedWarning_ThisFunctionShouldNeverBeCalled() 187 { 188 PETSCABORT(MPI_COMM_NULL, (PetscErrorCode)INT_MAX); 189 if (cupmMPIAwareJumpBufferSet) (void)cupmMPIAwareJumpBuffer; 190 } 191 192 template <DeviceType T> 193 PetscErrorCode Device<T>::DeviceInternal::CUPMAwareMPI_(bool *awareness) noexcept 194 { 195 constexpr int hbuf[] = {1, 0}; 196 int *dbuf = nullptr; 197 const auto cupmSignalHandler = [](int signal, void *ptr) -> PetscErrorCode { 198 if ((signal == SIGSEGV) && cupmMPIAwareJumpBufferSet) std::longjmp(cupmMPIAwareJumpBuffer, 1); 199 return PetscSignalHandlerDefault(signal, ptr); 200 }; 201 202 PetscFunctionBegin; 203 *awareness = false; 204 PetscCallCUPM(cupmMalloc(reinterpret_cast<void **>(&dbuf), sizeof(hbuf))); 205 PetscCallCUPM(cupmMemcpy(dbuf, hbuf, sizeof(hbuf), cupmMemcpyHostToDevice)); 206 PetscCallCUPM(cupmDeviceSynchronize()); 207 PetscCall(PetscPushSignalHandler(cupmSignalHandler, nullptr)); 208 cupmMPIAwareJumpBufferSet = true; 209 if (!setjmp(cupmMPIAwareJumpBuffer) && !MPI_Allreduce(dbuf, dbuf + 1, 1, MPI_INT, MPI_SUM, PETSC_COMM_SELF)) *awareness = true; 210 cupmMPIAwareJumpBufferSet = false; 211 PetscCall(PetscPopSignalHandler()); 212 PetscCallCUPM(cupmFree(dbuf)); 213 PetscFunctionReturn(PETSC_SUCCESS); 214 } 215 216 template <DeviceType T> 217 PetscErrorCode Device<T>::finalize_() noexcept 218 { 219 PetscFunctionBegin; 220 if (PetscUnlikely(!initialized_)) PetscFunctionReturn(PETSC_SUCCESS); 221 for (auto &&device : devices_) { 222 if (device) PetscCall(device->shutdown()); 223 device.reset(); 224 } 225 defaultDevice_ = PETSC_CUPM_DEVICE_NONE; // disabled by default 226 initialized_ = false; 227 PetscFunctionReturn(PETSC_SUCCESS); 228 } 229 230 template <DeviceType T> 231 PETSC_NODISCARD static PETSC_CONSTEXPR_14 const char *CUPM_VISIBLE_DEVICES() noexcept 232 { 233 switch (T) { 234 case DeviceType::CUDA: 235 return "CUDA_VISIBLE_DEVICES"; 236 case DeviceType::HIP: 237 return "HIP_VISIBLE_DEVICES"; 238 } 239 PetscUnreachable(); 240 return "PETSC_ERROR_PLIB"; 241 } 242 243 /* 244 The default device ID is 245 MPI -- rank % number_local_devices 246 PyTorch -- getenv("LOCAL_RANK") 247 */ 248 template <DeviceType T> 249 PetscErrorCode Device<T>::initialize(MPI_Comm comm, PetscInt *defaultDeviceId, PetscBool *defaultView, PetscDeviceInitType *defaultInitType) noexcept 250 { 251 auto initId = std::make_pair(*defaultDeviceId, PETSC_FALSE); 252 auto initView = std::make_pair(*defaultView, PETSC_FALSE); 253 auto initType = std::make_pair(*defaultInitType, PETSC_FALSE); 254 int ndev = 0; 255 256 PetscFunctionBegin; 257 if (initialized_) PetscFunctionReturn(PETSC_SUCCESS); 258 initialized_ = true; 259 PetscCall(PetscRegisterFinalize(finalize_)); 260 PetscCall(base_type::PetscOptionDeviceAll(comm, initType, initId, initView)); 261 262 if (initType.first == PETSC_DEVICE_INIT_NONE) { 263 initId.first = PETSC_CUPM_DEVICE_NONE; 264 } else if (const auto cerr = cupmGetDeviceCount(&ndev)) { 265 auto PETSC_UNUSED ignored = cupmGetLastError(); 266 267 PetscCheck((initType.first != PETSC_DEVICE_INIT_EAGER) && !initView.first, comm, PETSC_ERR_USER_INPUT, "Cannot eagerly initialize %s, as doing so results in %s error %d (%s) : %s", cupmName(), cupmName(), static_cast<PetscErrorCode>(cerr), cupmGetErrorName(cerr), cupmGetErrorString(cerr)); 268 // we won't be initializing anything anyways 269 initType.first = PETSC_DEVICE_INIT_NONE; 270 // save the error code for later 271 initId.first = -static_cast<decltype(initId.first)>(cerr); 272 } 273 274 // check again for init type, since the device count may have changed it 275 if (initType.first == PETSC_DEVICE_INIT_NONE) { 276 // id < 0 (excluding PETSC_DECIDE) indicates an error has occurred during setup 277 if ((initId.first > 0) || (initId.first == PETSC_DECIDE)) initId.first = PETSC_CUPM_DEVICE_NONE; 278 // initType overrides initView 279 initView.first = PETSC_FALSE; 280 } else { 281 PetscCall(PetscDeviceCheckDeviceCount_Internal(ndev)); 282 if (initId.first == PETSC_DECIDE) { 283 if (ndev) { 284 /* TORCHELASTIC_RUN_ID is used as a proxy to determine if the current process was launched with torchrun */ 285 char *pytorch_exists = (char *)getenv("TORCHELASTIC_RUN_ID"); 286 char *pytorch_rank = (char *)getenv("LOCAL_RANK"); 287 288 if (pytorch_exists && pytorch_rank) { 289 char *endptr; 290 291 initId.first = (PetscInt)strtol(pytorch_rank, &endptr, 10); 292 PetscCheck(initId.first < ndev, PETSC_COMM_SELF, PETSC_ERR_LIB, "PyTorch environmental variable LOCAL_RANK %s > number devices %d", pytorch_rank, ndev); 293 } else { 294 PetscMPIInt rank; 295 296 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 297 initId.first = rank % ndev; 298 } 299 } else initId.first = 0; 300 } 301 if (initView.first) initType.first = PETSC_DEVICE_INIT_EAGER; 302 } 303 304 static_assert(std::is_same<PetscMPIInt, decltype(defaultDevice_)>::value, ""); 305 // initId.first is PetscInt, _defaultDevice is int 306 PetscCall(PetscMPIIntCast(initId.first, &defaultDevice_)); 307 // record the results of the initialization 308 *defaultDeviceId = initId.first; 309 *defaultView = initView.first; 310 *defaultInitType = initType.first; 311 PetscFunctionReturn(PETSC_SUCCESS); 312 } 313 314 template <DeviceType T> 315 PetscErrorCode Device<T>::init_device_id_(PetscInt *inid) const noexcept 316 { 317 const auto id = *inid == PETSC_DECIDE ? defaultDevice_ : (int)*inid; 318 const auto cerr = static_cast<cupmError_t>(-defaultDevice_); 319 320 PetscFunctionBegin; 321 PetscCheck(defaultDevice_ != PETSC_CUPM_DEVICE_NONE, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Trying to retrieve a %s PetscDevice when it has been disabled", cupmName()); 322 PetscCheck(defaultDevice_ >= 0, PETSC_COMM_SELF, PETSC_ERR_GPU, "Cannot lazily initialize PetscDevice: %s error %d (%s) : %s", cupmName(), static_cast<PetscErrorCode>(cerr), cupmGetErrorName(cerr), cupmGetErrorString(cerr)); 323 PetscAssert(static_cast<decltype(devices_.size())>(id) < devices_.size(), PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Only supports %zu number of devices but trying to get device with id %d", devices_.size(), id); 324 325 if (!devices_[id]) devices_[id] = util::make_unique<DeviceInternal>(id); 326 PetscAssert(id == devices_[id]->id(), PETSC_COMM_SELF, PETSC_ERR_PLIB, "Entry %d contains device with mismatching id %d", id, devices_[id]->id()); 327 PetscCall(devices_[id]->initialize()); 328 *inid = id; 329 PetscFunctionReturn(PETSC_SUCCESS); 330 } 331 332 template <DeviceType T> 333 PetscErrorCode Device<T>::configure_device_(PetscDevice device) noexcept 334 { 335 PetscFunctionBegin; 336 PetscCall(devices_[device->deviceId]->configure()); 337 PetscFunctionReturn(PETSC_SUCCESS); 338 } 339 340 template <DeviceType T> 341 PetscErrorCode Device<T>::view_device_(PetscDevice device, PetscViewer viewer) noexcept 342 { 343 PetscFunctionBegin; 344 // now this __shouldn't__ reconfigure the device, but there is a petscinfo call to indicate 345 // it is being reconfigured 346 PetscCall(devices_[device->deviceId]->configure()); 347 PetscCall(devices_[device->deviceId]->view(viewer)); 348 PetscFunctionReturn(PETSC_SUCCESS); 349 } 350 351 template <DeviceType T> 352 PetscErrorCode Device<T>::get_attribute_(PetscInt id, PetscDeviceAttribute attr, void *value) noexcept 353 { 354 PetscFunctionBegin; 355 PetscCall(devices_[id]->getattribute(attr, value)); 356 PetscFunctionReturn(PETSC_SUCCESS); 357 } 358 359 // explicitly instantiate the classes 360 #if PetscDefined(HAVE_CUDA) 361 template class Device<DeviceType::CUDA>; 362 #endif 363 #if PetscDefined(HAVE_HIP) 364 template class Device<DeviceType::HIP>; 365 #endif 366 367 } // namespace cupm 368 369 } // namespace device 370 371 } // namespace Petsc 372