xref: /petsc/config/examples/arch-alcf-polaris.py (revision 9b43db70b9c026bfd5e783b6b6af8129a8c6066a)
1#!/usr/bin/python3
2
3# Use GNU compilers:
4#
5# Note cray-libsci provides BLAS etc. In summary, we have
6# module use /soft/modulefiles
7# module unload darshan
8# module load PrgEnv-gnu cray-libsci nvhpc-mixed craype-accel-nvidia80 cudatoolkit-standalone/12.4.1
9# export MPICH_GPU_SUPPORT_ENABLED=1
10# export MPICH_GPU_IPC_ENABLED=0
11#
12# $ module list
13# Currently Loaded Modules:
14#   1) libfabric/1.15.2.0       7) nghttp2/1.57.0-ciat5hu         13) cray-mpich/8.1.28   19) cray-libsci/23.12.5
15#   2) craype-network-ofi       8) curl/8.4.0-2ztev25             14) cray-pmi/6.1.13     20) nvhpc-mixed/23.9
16#   3) perftools-base/23.12.0   9) cmake/3.27.7                   15) cray-pals/1.3.4     21) craype-accel-nvidia80
17#   4) darshan/3.4.4           10) cudatoolkit-standalone/12.4.1  16) cray-libpals/1.3.4
18#   5) gcc-native/12.3         11) craype/2.7.30                  17) craype-x86-milan
19#   6) spack-pe-base/0.6.1     12) cray-dsmml/0.2.2               18) PrgEnv-gnu/8.5.0
20#
21# With above, Cray-MPICH GPU-aware MPI works on a node but still fail with multiple nodes. In the latter case, you can
22# add the PETSc runtime option -use_gpu_aware_mpi 0 as a workaround.
23
24if __name__ == '__main__':
25  import sys
26  import os
27  sys.path.insert(0, os.path.abspath('config'))
28  import configure
29  configure_options = [
30    '--with-cc=cc',
31    '--with-cxx=CC',
32    '--with-fc=ftn',
33    '--with-debugging=0',
34    '--with-cuda',
35    '--with-cudac=nvcc',
36    '--with-cuda-arch=80', # Since there is no easy way to auto-detect the cuda arch on the gpu-less Polaris login nodes, we explicitly set it.
37    '--download-kokkos',
38    '--download-kokkos-kernels',
39    '--download-umpire',
40    '--download-hypre',
41  ]
42  configure.petsc_configure(configure_options)
43
44# Use NVHPC compilers
45#
46# Unset so that cray won't add -gpu to nvc even when craype-accel-nvidia80 is loaded
47# unset CRAY_ACCEL_TARGET
48# module load nvhpc/22.11 PrgEnv-nvhpc
49#
50# I met two problems with nvhpc and Kokkos (and Kokkos-Kernels) 4.2.0.
51# 1) Kokkos-Kernles failed at configuration to find TPL cublas and cusparse from NVHPC.
52#    As a workaround, I just load cudatoolkit-standalone/11.8.0 to let KK use cublas and cusparse from cudatoolkit-standalone.
53# 2) KK failed at compilation
54# "/home/jczhang/petsc/arch-kokkos-dbg/externalpackages/git.kokkos-kernels/batched/dense/impl/KokkosBatched_Gemm_Serial_Internal.hpp", line 94: error: expression must have a constant value
55#     constexpr int nbAlgo = Algo::Gemm::Blocked::mb();
56#                            ^
57# "/home/jczhang/petsc/arch-kokkos-dbg/externalpackages/git.kokkos-kernels/blas/impl/KokkosBlas_util.hpp", line 58: note: cannot call non-constexpr function "__builtin_is_device_code" (declared implicitly)
58#           KOKKOS_IF_ON_HOST((return 4;))
59#           ^
60#           detected during:
61#
62# It is a KK problem and I have to wait for their fix.
63