xref: /petsc/config/BuildSystem/config/packages/MPI.py (revision 6c5693054f5123506dab0f5da2d352ed973d0e50)
1from __future__ import generators
2import config.base
3import config.package
4import os
5from stat import *
6
7def noCheck(command, status, output, error):
8  ''' Do no check result'''
9  return
10
11class Configure(config.package.Package):
12  def __init__(self, framework):
13    config.package.Package.__init__(self, framework)
14    self.minversion         = '2'
15    self.versionname        = 'MPI_VERSION'
16    self.functions          = ['MPI_Init', 'MPI_Comm_create']
17    self.includes           = ['mpi.h']
18    liblist_mpich         = [['fmpich2.lib','fmpich2g.lib','fmpich2s.lib','mpi.lib'],
19                             ['fmpich2.lib','fmpich2g.lib','mpi.lib'],['fmpich2.lib','mpich2.lib'],
20                             ['libfmpich2g.a','libmpi.a'],['libfmpich.a','libmpich.a', 'libpmpich.a'],
21                             ['libmpich.a', 'libpmpich.a'],
22                             ['libfmpich.a','libmpich.a', 'libpmpich.a', 'libmpich.a', 'libpmpich.a', 'libpmpich.a'],
23                             ['libmpich.a', 'libpmpich.a', 'libmpich.a', 'libpmpich.a', 'libpmpich.a'],
24                             ['libmpich.a','librt.a','libaio.a','libsnl.a','libpthread.a'],
25                             ['libmpich.a','libssl.a','libuuid.a','libpthread.a','librt.a','libdl.a'],
26                             ['libmpich.a','libnsl.a','libsocket.a','librt.a','libnsl.a','libsocket.a'],
27                             ['libmpich.a','libgm.a','libpthread.a']]
28    liblist_lam           = [['liblamf77mpi.a','libmpi++.a','libmpi.a','liblam.a'],
29                             ['liblammpi++.a','libmpi.a','liblam.a'],
30                             ['liblammpio.a','libpmpi.a','liblamf77mpi.a','libmpi.a','liblam.a'],
31                             ['liblammpio.a','libpmpi.a','liblamf90mpi.a','libmpi.a','liblam.a'],
32                             ['liblammpio.a','libpmpi.a','libmpi.a','liblam.a'],
33                             ['liblammpi++.a','libmpi.a','liblam.a'],
34                             ['libmpi.a','liblam.a']]
35    liblist_msmpi         = [[os.path.join('amd64','msmpifec.lib'),os.path.join('amd64','msmpi.lib')],
36                             [os.path.join('i386','msmpifec.lib'),os.path.join('i386','msmpi.lib')],
37                             [os.path.join('x64','msmpifec.lib'),os.path.join('x64','msmpi.lib')],
38                             [os.path.join('x86','msmpifec.lib'),os.path.join('x86','msmpi.lib')],
39                             ['msmpi.lib']]
40    liblist_other         = [['libmpich.a','libpthread.a'],['libmpi++.a','libmpi.a']]
41    liblist_single        = [['libmpi.a'],['libmpich.a'],['mpi.lib'],['mpich2.lib'],['mpich.lib'],
42                             [os.path.join('amd64','msmpi.lib')],[os.path.join('i386','msmpi.lib')],
43                             [os.path.join('x64','msmpi.lib')],[os.path.join('x86','msmpi.lib')]]
44    self.liblist          = liblist_mpich + liblist_lam + liblist_msmpi + liblist_other + liblist_single
45    # defaults to --with-mpi=yes
46    self.required         = 1
47    self.isPOE            = 0
48    self.usingMPIUni      = 0
49    self.shared           = 0
50    # local state
51    self.needBatchMPI     = 1
52    self.alternativedownload = 'mpich'
53    self.haveReduceLocal  = 0
54    # support MPI-3 process shared memory
55    self.support_mpi3_shm = 0
56    self.mpi_pkg_version  = ''
57    self.mpi_pkg          = '' # mpich,mpich2,mpich3,openmpi,intel,intel2,intel3
58
59    # mpiexec, absolute path, and sequential run
60    self.mpiexec           = None
61    self.mpiexecExecutable = None
62    self.mpiexecseq        = None
63    return
64
65  def setupHelp(self, help):
66    config.package.Package.setupHelp(self,help)
67    import nargs
68    help.addArgument('MPI', '-with-mpiexec=<prog>',                              nargs.Arg(None, None, 'The utility used to launch MPI jobs. (should support "-n <np>" option)'))
69    help.addArgument('MPI', '-with-mpiexec-tail=<prog>',                         nargs.Arg(None, None, 'The utility you want to put at the very end of "mpiexec -n <np> ..." and right before your executable to launch MPI jobs.'))
70    help.addArgument('MPI', '-with-mpi-compilers=<bool>',                        nargs.ArgBool(None, 1, 'Try to use the MPI compilers, e.g. mpicc'))
71    help.addArgument('MPI', '-known-mpi-shared-libraries=<bool>',                nargs.ArgBool(None, None, 'Indicates the MPI libraries are shared (the usual test will be skipped)'))
72    help.addArgument('MPI', '-with-mpi-ftn-module=<mpi or mpi_f08>',                      nargs.ArgString(None, "mpi", 'Specify the MPI Fortran module to build with'))
73    return
74
75  def setupDependencies(self, framework):
76    config.package.Package.setupDependencies(self, framework)
77    self.mpich   = framework.require('config.packages.MPICH', self)
78    self.openmpi = framework.require('config.packages.OpenMPI', self)
79    self.cuda    = framework.require('config.packages.CUDA',self)
80    self.hip     = framework.require('config.packages.HIP',self)
81    self.sycl    = framework.require('config.packages.SYCL',self)
82    self.odeps   = [self.cuda,self.hip,self.sycl]
83    return
84
85  def __str__(self):
86    output  = config.package.Package.__str__(self)
87    if self.mpiexec: output  += '  mpiexec: '+self.mpiexec+'\n'
88    if self.mpiexec_tail: output  += '  mpiexec_tail: '+self.mpiexec_tail+'\n'
89    if self.mpi_pkg: output  += '  Implementation: '+self.mpi_pkg+'\n'
90    if hasattr(self,'includepaths'):
91      output  += '  MPI C++ include paths: '+ self.includepaths+'\n'
92      output += '  MPI C++ libraries: '+ self.libpaths + ' ' + self.mpilibs+'\n'
93    return output+self.mpi_pkg_version
94
95  def generateLibList(self, directory):
96    if self.setCompilers.usedMPICompilers:
97      self.liblist = []
98      self.libDirs = ['']
99    return config.package.Package.generateLibList(self,directory)
100
101  # search many obscure locations for MPI
102  def getSearchDirectories(self):
103    import re
104    if self.mpich.found:
105      yield (self.mpich.installDir)
106      raise RuntimeError('--download-mpich libraries cannot be used')
107    if self.openmpi.found:
108      yield (self.openmpi.installDir)
109      raise RuntimeError('--download-openmpi libraries cannot be used')
110
111    yield ''
112    # Try configure package directories
113    dirExp = re.compile(r'mpi(ch)?(-.*)?')
114    for packageDir in self.argDB['with-packages-search-path']:
115      packageDir = os.path.abspath(packageDir)
116      if not os.path.isdir(packageDir):
117        raise RuntimeError('Invalid package directory: '+packageDir)
118      for f in os.listdir(packageDir):
119        dir = os.path.join(packageDir, f)
120        if not os.path.isdir(dir):
121          continue
122        if not dirExp.match(f):
123          continue
124        yield (dir)
125    # Try SUSE location
126    yield (os.path.abspath(os.path.join('/opt', 'mpich')))
127    # Try IBM
128    self.isPOE = 1
129    dir = os.path.abspath(os.path.join('/usr', 'lpp', 'ppe.poe'))
130    yield (os.path.abspath(os.path.join('/usr', 'lpp', 'ppe.poe')))
131    self.isPOE = 0
132    # Try /usr/local
133    yield (os.path.abspath(os.path.join('/usr', 'local')))
134    # Try /usr/local/*mpich*
135    if os.path.isdir(dir):
136      ls = os.listdir(dir)
137      for dir in ls:
138        if dir.find('mpich') >= 0:
139          dir = os.path.join('/usr','local',dir)
140          if os.path.isdir(dir):
141            yield (dir)
142    # Try ~/mpich*
143    homedir = os.getenv('HOME')
144    if homedir:
145      ls = os.listdir(homedir)
146      for dir in ls:
147        if dir.find('mpich') >= 0:
148          dir = os.path.join(homedir,dir)
149          if os.path.isdir(dir):
150            yield (dir)
151    # Try MSMPI/MPICH install locations under Windows
152    # ex: /cygdrive/c/Program Files/Microsoft HPC Pack 2008 SDK
153    for root in ['/',os.path.join('/','cygdrive')]:
154      for drive in ['c']:
155        for programFiles in ['Program Files','Program Files (x86)']:
156          for packageDir in ['Microsoft HPC Pack 2008 SDK','Microsoft Compute Cluster Pack','MPICH2','MPICH',os.path.join('MPICH','SDK.gcc'),os.path.join('MPICH','SDK')]:
157            yield(os.path.join(root,drive,programFiles,packageDir))
158    return
159
160  def checkSharedLibrary_ThisIsBroken(self):
161  # TODO: Fix this routine, currently
162  #       * the visibility flag is passed to the compiling/linking so the symbols are not visible to the loader and the test fails (this is easily fixed)
163  #       * even with that fixed the dlsym() is unable to locate the checkInit symbol in the library even though nm shows it is there; I am not sure the cause
164    '''Sets flag indicating if MPI libraries are shared or not and
165    determines if MPI libraries CANNOT be used by shared libraries'''
166    if self.argDB['with-batch']:
167      if self.argDB['with-shared-libraries']:
168        if not 'known-mpi-shared-libraries' in self.argDB:
169          self.logPrintWarning('Cannot verify that MPI is a shared library - in \
170batch-mode! If MPI is a static library but linked into multiple shared \
171libraries that the application uses, sometimes compiles work - \
172but one might get run-time errors. If you know that the MPI library is \
173shared - run with --known-mpi-shared-libraries=1 option to remove this \
174warning message')
175        elif not self.argDB['known-mpi-shared-libraries']:
176          raise RuntimeError('Provided MPI library is flagged as static library! If its linked\n\
177into multiple shared libraries that an application uses, sometimes\n\
178compiles go through - but one might get run-time errors.  Either\n\
179reconfigure PETSc with --with-shared-libraries=0 or provide MPI with\n\
180shared libraries and run with --known-mpi-shared-libraries=1')
181      return
182    self.shared = self.libraries.checkShared('#include <mpi.h>\n','MPI_Init','MPI_Initialized','MPI_Finalize',checkLink = self.checkPackageLink,libraries = self.lib, defaultArg = 'known-mpi-shared-libraries', executor = self.mpiexecseq)
183
184    # TODO: Turn this on once shared library checks are working again
185    #if self.argDB['with-shared-libraries'] and not self.shared:
186    #  self.logPrint('MPI libraries cannot be used with shared libraries')
187    #  raise RuntimeError('Shared libraries cannot be built using MPI provided.\nEither reconfigure with --with-shared-libraries=0 or rebuild MPI with shared library support')
188    return
189
190  def configureMPIEXEC_TAIL(self):
191    '''Checking for location of mpiexec_tail'''
192    if 'with-mpiexec-tail' in self.argDB:
193      self.argDB['with-mpiexec-tail'] = os.path.expanduser(self.argDB['with-mpiexec-tail'])
194      # If found, the call below defines a make macro MPIEXEC_TAIL with full path
195      if not self.getExecutable(self.argDB['with-mpiexec-tail'], getFullPath=1, resultName = 'mpiexec_tail'):
196        raise RuntimeError('Invalid mpiexec-tail specified: '+str(self.argDB['with-mpiexec-tail']))
197    else:
198      self.mpiexec_tail =''
199      self.addMakeMacro('MPIEXEC_TAIL', '')
200
201  def configureMPIEXEC(self):
202    '''Checking for location of mpiexec'''
203    mpiexecargs = ''
204    if self.argDB['with-batch']:
205      if 'with-mpiexec' in self.argDB:
206        self.logPrintBox('--with-mpiexec is ignored since --with-batch is provided; one cannot run generated executables on the compile server')
207      self.mpiexec = 'Not_appropriate_for_batch_systems_You_must_use_your_batch_system_to_submit_MPI_jobs_speak_with_your_local_sys_admin'
208      self.mpiexecseq = 'Not_appropriate_for_batch_systems_You_must_use_your_batch_system_to_submit_MPI_jobs_speak_with_your_local_sys_admin'
209      self.addMakeMacro('MPIEXEC', self.mpiexec)
210      return
211    if 'with-mpiexec' in self.argDB:
212      self.argDB['with-mpiexec'] = os.path.expanduser(self.argDB['with-mpiexec'])
213      if not self.getExecutable(self.argDB['with-mpiexec'], resultName = 'mpiexec'):
214        raise RuntimeError('Invalid mpiexec specified: '+str(self.argDB['with-mpiexec']))
215      self.mpiexec = self.argDB['with-mpiexec']
216    elif self.isPOE:
217      self.mpiexec = os.path.abspath(os.path.join('bin', 'mpiexec.poe'))
218    else:
219      mpiexecs = ['mpiexec', 'mpirun', 'mprun', 'srun']
220      path    = []
221      if 'with-mpi-dir' in self.argDB:
222        path.append(os.path.join(os.path.abspath(self.argDB['with-mpi-dir']), 'bin'))
223        # MPICH-NT-1.2.5 installs MPIRun.exe in mpich/mpd/bin
224        path.append(os.path.join(os.path.abspath(self.argDB['with-mpi-dir']), 'mpd','bin'))
225        useDefaultPath = 0
226      else:
227        for inc in self.include:
228          path.append(os.path.join(os.path.dirname(inc), 'bin'))
229          # MPICH-NT-1.2.5 installs MPIRun.exe in mpich/SDK/include/../../mpd/bin
230          path.append(os.path.join(os.path.dirname(os.path.dirname(inc)),'mpd','bin'))
231        for lib in self.lib:
232          path.append(os.path.join(os.path.dirname(os.path.dirname(lib)), 'bin'))
233        self.pushLanguage('C')
234        if (os.path.basename(self.getCompiler()) == 'mpicc' or os.path.basename(self.getCompiler()) == 'mpiicc'):
235          if os.path.dirname(self.getCompiler()):
236            path.append(os.path.dirname(self.getCompiler()))
237          else:
238            try:
239              (out,err,status) = config.base.Configure.executeShellCommand('which '+self.getCompiler())
240              if not status and not err:
241                path.append(os.path.dirname(out))
242            except:
243              pass
244        self.popLanguage()
245        useDefaultPath = 1
246      if not self.getExecutable(mpiexecs, path = path, useDefaultPath = useDefaultPath, resultName = 'mpiexec',setMakeMacro=0):
247        if not self.getExecutable('/bin/false', path = [], useDefaultPath = 0, resultName = 'mpiexec',setMakeMacro=0):
248          raise RuntimeError('Could not locate MPIEXEC - please specify --with-mpiexec option')
249      # Support for spaces and () in executable names; also needs to handle optional arguments at the end
250      # TODO: This support for spaces and () should be moved to core BuildSystem
251      self.mpiexec = self.mpiexec.replace(' ', r'\\ ').replace('(', r'\\(').replace(')', r'\\)').replace(r'\ -',' -')
252      if hasattr(self, 'ompi_major_version'):
253        if int(self.ompi_major_version) >= 5:
254          mpiexecargs += ' --oversubscribe' # alias to --map-by :OVERSUBSCRIBE
255        elif int(self.ompi_major_version) >= 3:
256          (out, err, ret) = Configure.executeShellCommand(self.mpiexec+' -help all', checkCommand = noCheck, timeout = 60, log = self.log, threads = 1)
257          if out.find('--oversubscribe') >=0:
258            mpiexecargs += ' --oversubscribe'
259
260    self.getExecutable(self.mpiexec, getFullPath=1, resultName='mpiexecExecutable',setMakeMacro=0)
261
262    if not 'with-mpiexec' in self.argDB and hasattr(self,'isNecMPI') and hasattr(self,'mpiexecExecutable'):
263      self.getExecutable('venumainfo', getFullPath=1, path = os.path.dirname(self.mpiexecExecutable))
264      if hasattr(self,'venumainfo'):
265        try:
266          (out, err, ret) = Configure.executeShellCommand(self.venumainfo + ' | grep "available"',timeout = 60, log = self.log, threads = 1)
267        except Exception as e:
268          self.logWrite('NEC utility venumainfo failed '+str(e)+'\n')
269        else:
270          try:
271            nve = len(out.split('\n'))
272            mpiexecargs += ' -nve ' + str(nve)
273          except Exception as e:
274            self.logWrite('Unable to parse the number of VEs from the NEC utility venumainfo\n'+str(e)+'\n')
275
276    # using mpiexec environmental variables make sure mpiexec matches the MPI libraries and save the variables for testing in PetscInitialize()
277    # the variable HAVE_MPIEXEC_ENVIRONMENTAL_VARIABLE is not currently used. PetscInitialize() can check the existence of the environmental variable to
278    # determine if the program has been started with the correct mpiexec (will only be set for parallel runs so not clear how to check appropriately)
279    (out, err, ret) = Configure.executeShellCommand(self.mpiexec+' -n 1 printenv | grep -v KEY', checkCommand = noCheck, timeout = 120, threads = 1, log = self.log)
280    if ret:
281      self.logWrite('Unable to run '+self.mpiexec+' with option "-n 1 printenv"\nThis could be ok, some MPI implementations such as SGI produce a non-zero status with non-MPI programs\n'+out+err)
282    else:
283      if out.find('MPIR_CVAR_CH3') > -1:
284        if hasattr(self,'ompi_major_version'): raise RuntimeError("Your libraries are from Open MPI but it appears your mpiexec is from MPICH");
285        self.addDefine('HAVE_MPIEXEC_ENVIRONMENTAL_VARIABLE', 'MPIR_CVAR_CH3')
286      elif  out.find('MPIR_CVAR_CH3') > -1:
287        if hasattr(self,'ompi_major_version'): raise RuntimeError("Your libraries are from Open MPI but it appears your mpiexec is from MPICH");
288        self.addDefine('HAVE_MPIEXEC_ENVIRONMENTAL_VARIABLE', 'MPICH')
289      elif out.find('OMPI_COMM_WORLD_SIZE') > -1:
290        if hasattr(self,'mpich_numversion'): raise RuntimeError("Your libraries are from MPICH but it appears your mpiexec is from Open MPI");
291        self.addDefine('HAVE_MPIEXEC_ENVIRONMENTAL_VARIABLE', 'OMP')
292    if hasattr(self,'isNecMPI'):
293      (out, err, ret) = Configure.executeShellCommand(self.mpiexec+' -n 1 -V /usr/bin/true', checkCommand = noCheck, timeout = 120, threads = 1, log = self.log)
294      if ret:
295        self.logWrite('Unable to run '+self.mpiexec+' with option "-n 1 -V /usr/bin/true"\n'+str(out)+'\n'+str(err))
296      else:
297        try:
298          necmpimajor = out.split(' ')[4].split('.')[0]
299          necmpiminor = out.split(' ')[4].split('.')[1]
300          self.addDefine('NECMPI_VERSION_MAJOR', necmpimajor)
301          self.addDefine('NECMPI_VERSION_MINOR', necmpiminor)
302        except Exception as e:
303            self.logWrite('Unable to parse the output of '+self.mpiexec+' with option "-n 1 -V /usr/bin/true"\n'+str(e)+'\n'+str(out)+'\n'+str(err))
304
305    # use full path if found
306    if self.mpiexecExecutable:
307      self.mpiexec = self.mpiexecExecutable
308    self.addMakeMacro('MPIEXEC', self.mpiexec + mpiexecargs)
309
310    # use sequential runs for testing
311    self.mpiexecseq = self.mpiexec + mpiexecargs + ' -n 1'
312
313    if hasattr(self,'mpich_numversion') or hasattr(self,'ompi_major_version'):
314
315      hostnameworks = 0
316      # turn of checks if Apple firewall is on since it prevents success of the tests even though MPI will work
317      self.getExecutable('socketfilterfw', path = ['/usr/libexec/ApplicationFirewall'])
318      if hasattr(self,'socketfilterfw'):
319        try:
320          (result, err, ret) = Configure.executeShellCommand(self.socketfilterfw + ' --getglobalstate', timeout = 60, log = self.log, threads = 1)
321          if result.find("Firewall is enabled") > -1:  hostnameworks = 1
322        except:
323          self.logPrint("Exception: Unable to get result from socketfilterfw\n")
324
325      self.getExecutable('hostname')
326      if not hostnameworks and hasattr(self,'hostname'):
327        try:
328          (hostname, err, ret) = Configure.executeShellCommand(self.hostname, timeout = 60, log = self.log, threads = 1)
329          self.logPrint("Return code from hostname: %s\n" % ret)
330        except:
331          self.logPrint("Exception: Unable to get result from hostname, skipping network checks\n")
332        else:
333          if ret == 0:
334            self.logPrint("Hostname works, running network checks")
335
336            self.getExecutable('ping', path = ['/sbin'], useDefaultPath = 1, setMakeMacro = 0)
337            if not hasattr(self,'ping'):
338              self.getExecutable('fping', resultName = 'ping', setMakeMacro = 0)
339            if hasattr(self,'ping'):
340              if self.setCompilers.isCygwin(self.log):
341                count = ' -n 2 '
342              else:
343                count = ' -c 2 '
344              try:
345                (ok, err, ret) = Configure.executeShellCommand(self.ping + count + hostname, timeout = 60, log = self.log, threads = 1)
346                self.logPrint("Return code from ping: %s\n" % ret)
347                if not ret: hostnameworks = 1
348              except:
349                self.logPrint("Exception: while running ping skipping ping check\n")
350
351              if not hostnameworks:
352                # Note: host may not work on macOS, this is normal
353                self.getExecutable('host', setMakeMacro = 0)
354                if hasattr(self,'host'):
355                  try:
356                    (ok, err, ret) = Configure.executeShellCommand(self.host + ' '+ hostname, timeout = 60, log = self.log, threads = 1)
357                    self.logPrint("Return code from host: %s\n" % ret)
358                    # host works even with broken VPN it is not a useful test
359                  except:
360                    self.logPrint("Exception: while running host skipping host check\n")
361
362              if not hostnameworks:
363                self.getExecutable('traceroute', path = ['/usr/sbin'], useDefaultPath = 1)
364                if hasattr(self,'traceroute'):
365                  try:
366                    (ok, err, ret) = Configure.executeShellCommand(self.traceroute + ' ' + hostname, timeout = 60, log = self.log, threads = 1)
367                    self.logPrint("Return code from traceroute: %s\n" % ret)
368                    if not ret: hostnameworks = 1
369                  except:
370                    self.logPrint("Exception: while running traceroute skipping traceroute check\n")
371
372              if not hostnameworks:
373                self.logPrintWarning('mpiexec may not work on your system due to network issues. \
374Perhaps you have VPN running whose network settings may not work with mpiexec or your network is misconfigured')
375          else:
376            self.logPrintWarning('mpiexec may not work on your system due to network issues. \
377Unable to run hostname to check the network')
378          self.logPrintDivider()
379
380    # check that mpiexec runs an MPI program correctly
381    error_message = 'Unable to run MPI program with '+self.mpiexecseq+'\n\
382    (1) make sure this is the correct program to run MPI jobs\n\
383    (2) your network may be misconfigured; see https://petsc.org/release/faq/#mpi-network-misconfigure\n\
384    (3) you may have VPN running whose network settings may not play nice with MPI\n'
385
386    includes = '#include <mpi.h>'
387    body = 'MPI_Init(0,0);\nMPI_Finalize();\n'
388    try:
389      ok = self.checkRun(includes, body, executor = self.mpiexecseq, timeout = 120, threads = 1)
390      if not ok: raise RuntimeError(error_message)
391    except RuntimeError as e:
392      if str(e).find('Runaway process exceeded time limit') > -1:
393        raise RuntimeError('Timeout: %s' % error_message)
394
395  def configureMPI2(self):
396    '''Check for functions added to the interface in MPI-2'''
397    oldFlags = self.compilers.CPPFLAGS
398    oldLibs  = self.compilers.LIBS
399    self.compilers.CPPFLAGS += ' '+self.headers.toString(self.include)
400    self.compilers.LIBS = self.libraries.toString(self.lib)+' '+self.compilers.LIBS
401    self.framework.saveLog()
402    # Check for some of the MPI functions PETSc needs from MPI-2.0/2.1. Generally speaking, PETSc requires MPI-2.1 with exception of MPI multithreading and one-sided.
403    if not self.checkLink('#include <mpi.h>\n',
404    '''
405      int a,b,c,d,flag,sendbuf[1]={1},recvbuf[1]={2};
406      MPI_Datatype newtype;
407      if (MPI_Allreduce(MPI_IN_PLACE,0,1,MPI_INT,MPI_SUM,MPI_COMM_SELF)) return 0;
408      if (MPI_Finalized(&flag)) return 0;
409      if (MPI_Type_dup(MPI_INT,&newtype)) return 0;
410      if (MPI_Exscan(sendbuf,recvbuf,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD)) return 0;
411      if (MPI_Reduce_scatter(sendbuf,recvbuf,sendbuf,MPI_INT,MPI_SUM,MPI_COMM_WORLD)) return 0;
412      if (MPI_Type_get_envelope(MPI_INT,&a,&b,&c,&d)) return 0;
413    '''):
414      raise RuntimeError('PETSc requires some of the MPI-2.0 (1997), MPI-2.1 (2008) functions - they are not available with the specified MPI library')
415
416    if not self.checkLink('#include <mpi.h>\n', 'int count=2; int blocklens[2]={0,1}; MPI_Aint indices[2]={0,1}; MPI_Datatype old_types[2]={MPI_INT,MPI_DOUBLE}; MPI_Datatype *newtype = 0;\n \
417                                             if (MPI_Type_create_struct(count, blocklens, indices, old_types, newtype)) { }\n'):
418      self.framework.addDefine('MPI_Type_create_struct(count,lens,displs,types,newtype)', 'MPI_Type_struct((count),(lens),(displs),(types),(newtype))')
419    if not self.checkLink('#include <mpi.h>\n', 'MPI_Comm_errhandler_fn * p_err_fun = 0; MPI_Errhandler * p_errhandler = 0; if (MPI_Comm_create_errhandler(p_err_fun,p_errhandler)) { }\n'):
420      self.framework.addDefine('MPI_Comm_create_errhandler(p_err_fun,p_errhandler)', 'MPI_Errhandler_create((p_err_fun),(p_errhandler))')
421    if not self.checkLink('#include <mpi.h>\n', 'if (MPI_Comm_set_errhandler(MPI_COMM_WORLD,MPI_ERRORS_RETURN)) { }\n'):
422      self.framework.addDefine('MPI_Comm_set_errhandler(comm,p_errhandler)', 'MPI_Errhandler_set((comm),(p_errhandler))')
423    if self.checkLink('#include <mpi.h>\n', 'if (MPI_Reduce_local(0, 0, 0, MPI_INT, MPI_SUM)) { }\n'): # MPI_Reduce_local is in MPI-2.2
424      self.haveReduceLocal = 1
425      self.addDefine('HAVE_MPI_REDUCE_LOCAL',1)
426    if self.checkLink('#include <mpi.h>\n', 'char version[MPI_MAX_LIBRARY_VERSION_STRING];int verlen;if (MPI_Get_library_version(version,&verlen)) { }\n'):
427      self.addDefine('HAVE_MPI_GET_LIBRARY_VERSION', 1)
428    # Even MPI_Win_create is in MPI 2.0, we do this test to suppress MPIUNI, which does not support MPI one-sided.
429    if self.checkLink('#include <mpi.h>\n', 'int base[100]; MPI_Win win = 0; if (MPI_Win_create(base,100,4,MPI_INFO_NULL,MPI_COMM_WORLD,&win)) { }'):
430      self.addDefine('HAVE_MPI_WIN_CREATE', 1)
431    if not self.checkLink('#include <mpi.h>\n', 'int ptr[1] = {0}; MPI_Win win = 0; if (MPI_Accumulate(ptr,1,MPI_INT,0,0,1,MPI_INT,MPI_REPLACE,win)) { }'):
432      raise RuntimeError('PETSc requires MPI_REPLACE (introduced in MPI-2.1 in 2008). Please update or switch to MPI that supports MPI_REPLACE. Let us know at petsc-maint@mcs.anl.gov if this is not possible')
433    # flag broken one-sided tests
434    if not 'HAVE_MSMPI' in self.defines and not (hasattr(self, 'mpich_numversion') and int(self.mpich_numversion) <= 30004300) and not (hasattr(self, 'isNecMPI')):
435      self.addDefine('HAVE_MPI_ONE_SIDED', 1)
436
437    if self.checkLink('#include <mpi.h>\n', 'int provided; if (MPI_Init_thread(0,0,MPI_THREAD_FUNNELED,&provided)) return 0;'): # MPI-2.1
438      self.addDefine('HAVE_MPI_INIT_THREAD',1)
439
440    # deadlock AO tests ex1 with test 3
441    if (not hasattr(self, 'isNecMPI')) and self.checkLink('#include <mpi.h>\n',
442    '''
443     int sendbuf[2] = {1,2};
444     int recvbuf[1];
445     if (MPI_Reduce_scatter_block(sendbuf,recvbuf,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD)) return 0;
446    '''):
447      self.addDefine('HAVE_MPI_REDUCE_SCATTER_BLOCK',1) # MPI-2.2
448
449    self.compilers.CPPFLAGS = oldFlags
450    self.compilers.LIBS = oldLibs
451    self.logWrite(self.framework.restoreLog())
452    return
453
454  def configureMPI3(self):
455    '''Check for functions added to the interface in MPI-3'''
456    oldFlags = self.compilers.CPPFLAGS
457    oldLibs  = self.compilers.LIBS
458    self.compilers.CPPFLAGS += ' '+self.headers.toString(self.include)
459    self.compilers.LIBS = self.libraries.toString(self.lib)+' '+self.compilers.LIBS
460    self.framework.saveLog()
461    # Skip buggy MPICH versions
462    if (hasattr(self, 'mpich_numversion') and int(self.mpich_numversion) > 30004300) or not hasattr(self, 'mpich_numversion'):
463      if self.checkLink('#include <mpi.h>\n',
464                      'MPI_Comm scomm; MPI_Aint size=128; int disp_unit=8,*baseptr; MPI_Win win;\n\
465                       if (MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &scomm)) { }\n\
466                       if (MPI_Win_allocate_shared(size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,&baseptr,&win)) { }\n\
467                       if (MPI_Win_shared_query(win,0,&size,&disp_unit,&baseptr)) { }\n'):
468        self.addDefine('HAVE_MPI_PROCESS_SHARED_MEMORY', 1)
469        self.support_mpi3_shm = 1
470    if not any([
471        hasattr(self, 'isIntelMPI'),
472        hasattr(self, 'ompi_version') and (4,1,0) <= self.ompi_version < (4,2,0), # dynamic window tests fail unless using --mca btl vader
473        ]) and self.checkLink('#include <mpi.h>\n',
474                      'MPI_Aint size=128; int disp_unit=8,*baseptr; MPI_Win win;\n\
475                       if (MPI_Win_allocate(size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,&baseptr,&win)) { }\n\
476                       if (MPI_Win_attach(win,baseptr,size)) { }\n\
477                       if (MPI_Win_create_dynamic(MPI_INFO_NULL,MPI_COMM_WORLD,&win)) { }\n'):
478      self.addDefine('HAVE_MPI_FEATURE_DYNAMIC_WINDOW', 1) # Use it to represent a group of MPI3 Win routines
479    if self.checkCompile('#include <mpi.h>\n MPI_Count i = 0;\n'): # introduced in MPI-3.0
480      self.addDefine('HAVE_MPI_COUNT',1) # we need it to support MPIPetsc_Type_get_envelope etc
481
482    if self.checkLink('#include <mpi.h>\n',
483                      '''
484                        int send=0,recv,counts[2]={1,1},displs[2]={1,2}; MPI_Request req;
485                        if (MPI_Iscatter(&send,1,MPI_INT,&recv,1,MPI_INT,0,MPI_COMM_WORLD,&req)) return 0;
486                        if (MPI_Iscatterv(&send,counts,displs,MPI_INT,&recv,1,MPI_INT,0,MPI_COMM_WORLD,&req)) return 0;
487                        if (MPI_Igather(&send,1,MPI_INT,&recv,1,MPI_INT,0,MPI_COMM_WORLD,&req)) return 0;
488                        if (MPI_Igatherv(&send,1,MPI_INT,&recv,counts,displs,MPI_INT,0,MPI_COMM_WORLD,&req)) return 0;
489                        if (MPI_Iallgather(&send,1,MPI_INT,&recv,1,MPI_INT,MPI_COMM_WORLD,&req)) return 0;
490                        if (MPI_Iallgatherv(&send,1,MPI_INT,&recv,counts,displs,MPI_INT,MPI_COMM_WORLD,&req)) return 0;
491                        if (MPI_Ialltoall(&send,1,MPI_INT,&recv,1,MPI_INT,MPI_COMM_WORLD,&req)) return 0;
492                        if (MPI_Iallreduce(&send,&recv,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD,&req)) return 0;
493                        if (MPI_Ibarrier(MPI_COMM_WORLD,&req)) return 0;
494                      '''):
495      self.addDefine('HAVE_MPI_NONBLOCKING_COLLECTIVES', 1)
496    if self.checkLink('#include <mpi.h>\n',
497                      'MPI_Comm distcomm; \n\
498                       MPI_Request req; \n\
499                       if (MPI_Dist_graph_create_adjacent(MPI_COMM_WORLD,0,0,MPI_WEIGHTS_EMPTY,0,0,MPI_WEIGHTS_EMPTY,MPI_INFO_NULL,0,&distcomm)) { }\n\
500                       if (MPI_Neighbor_alltoallv(0,0,0,MPI_INT,0,0,0,MPI_INT,distcomm)) { }\n\
501                       if (MPI_Ineighbor_alltoallv(0,0,0,MPI_INT,0,0,0,MPI_INT,distcomm,&req)) { }\n'):
502      self.addDefine('HAVE_MPI_NEIGHBORHOOD_COLLECTIVES',1)
503
504    if self.checkLink('#include <mpi.h>\n', 'int ptr[1] = {0}; MPI_Win win = 0; if (MPI_Get_accumulate(ptr,1,MPI_INT,ptr,1,MPI_INT,0,0,1,MPI_INT,MPI_SUM,win)) { }\n'):
505      self.addDefine('HAVE_MPI_GET_ACCUMULATE', 1)
506    if self.checkLink('#include <mpi.h>\n', 'int ptr[1]; MPI_Win win = 0; MPI_Request req; if (MPI_Rget(ptr,1,MPI_INT,0,1,1,MPI_INT,win,&req)) { }\n'):
507      self.addDefine('HAVE_MPI_RGET', 1)
508    self.compilers.CPPFLAGS = oldFlags
509    self.compilers.LIBS = oldLibs
510    self.logWrite(self.framework.restoreLog())
511    return
512
513  def configureMPI4(self):
514    '''Check for functions added to the interface in MPI-4'''
515    oldFlags = self.compilers.CPPFLAGS
516    oldLibs  = self.compilers.LIBS
517    self.compilers.CPPFLAGS += ' '+self.headers.toString(self.include)
518    self.compilers.LIBS = self.libraries.toString(self.lib)+' '+self.compilers.LIBS
519    self.framework.saveLog()
520
521    if self.checkLink('#include <mpi.h>\n',
522    '''
523      int          buf[1]={0},dest=1,source=1,tag=0, combiner, ints[1], rbuf[1] = {0};
524      MPI_Count    count=1, nints, naddrs, ncounts, ntypes, counts[1]={0};
525      MPI_Request  req;
526      MPI_Status   stat;
527      MPI_Aint     addrs[1]={0};
528      MPI_Datatype types[1];
529
530      if (MPI_Scatterv_c(buf,counts,addrs,MPI_INT,rbuf,count,MPI_INT,0,MPI_COMM_WORLD)) return 1;
531      if (MPI_Gatherv_c(buf,count,MPI_INT,rbuf,counts,addrs,MPI_INT,0,MPI_COMM_WORLD)) return 1;
532      if (MPI_Send_c(buf,count,MPI_INT,dest,tag,MPI_COMM_WORLD)) return 1;
533      if (MPI_Send_init_c(buf,count,MPI_INT,dest,tag,MPI_COMM_WORLD,&req)) return 1;
534      if (MPI_Isend_c(buf,count,MPI_INT,dest,tag,MPI_COMM_WORLD,&req)) return 1;
535      if (MPI_Recv_c(buf,count,MPI_INT,source,tag,MPI_COMM_WORLD,&stat)) return 1;
536      if (MPI_Recv_init_c(buf,count,MPI_INT,source,tag,MPI_COMM_WORLD,&req)) return 1;
537      if (MPI_Irecv_c(buf,count,MPI_INT,source,tag,MPI_COMM_WORLD,&req)) return 1;
538      if (MPI_Neighbor_alltoallv_c(0,0,0,MPI_INT,0,0,0,MPI_INT,MPI_COMM_WORLD)) return 1;
539      if (MPI_Ineighbor_alltoallv_c(0,0,0,MPI_INT,0,0,0,MPI_INT,MPI_COMM_WORLD,&req)) return 1;
540      if (MPI_Type_get_envelope_c(MPI_INT,&nints,&naddrs,&ncounts,&ntypes,&combiner)) return 1;
541      if (MPI_Type_get_contents_c(MPI_INT,nints,naddrs,ncounts,ntypes,ints,addrs,counts,types)) return 1;
542    ''' + ('if (MPI_Reduce_local_c(0,0,0,MPI_INT,MPI_SUM)) return 1;\n' if self.haveReduceLocal == 1 else '')):
543      self.addDefine('HAVE_MPI_LARGE_COUNT', 1)
544
545    if self.checkLink('#include <mpi.h>\n',
546    '''
547      MPI_Request req;
548      MPI_Info    info = 0;
549      if (MPI_Neighbor_alltoallv_init(0,0,0,MPI_INT,0,0,0,MPI_INT,MPI_COMM_WORLD,info,&req)) return 1;
550    '''):
551      self.addDefine('HAVE_MPI_PERSISTENT_NEIGHBORHOOD_COLLECTIVES', 1)
552
553    self.compilers.CPPFLAGS = oldFlags
554    self.compilers.LIBS = oldLibs
555    self.logWrite(self.framework.restoreLog())
556    return
557
558  def configureMPIX(self):
559    '''Check for experimental functions added by MPICH or Open MPI as MPIX'''
560    # mpich-4.2 has a bug fix (PR6454). Without it, we could not use MPIX stream
561    if (hasattr(self, 'mpich_numversion') and int(self.mpich_numversion) >= 40200000):
562      oldFlags = self.compilers.CPPFLAGS
563      oldLibs  = self.compilers.LIBS
564      self.compilers.CPPFLAGS += ' '+self.headers.toString(self.include)
565      self.compilers.LIBS = self.libraries.toString(self.lib)+' '+self.compilers.LIBS
566      self.framework.saveLog()
567      if self.checkLink('#include <mpi.h>\n',
568      '''
569        MPI_Info    info ;
570        // cudaStream_t stream;
571        int         stream; // use a fake type instead as we don't want this check to depend on CUDA
572        MPI_Comm    stream_comm ;
573        MPIX_Stream mpi_stream ;
574        MPI_Request req;
575        MPI_Status  stat;
576        int         sbuf[1]={0},rbuf[1]={0},count=1,dest=1,source=0,tag=0;
577
578        MPI_Info_create (&info);
579        MPI_Info_set(info, "type", "cudaStream_t");
580        MPIX_Info_set_hex(info, "value", &stream, sizeof(stream));
581        MPIX_Stream_create(info, &mpi_stream );
582        MPIX_Stream_comm_create(MPI_COMM_WORLD, mpi_stream, &stream_comm);
583        MPIX_Isend_enqueue(sbuf,count,MPI_INT,dest,tag,stream_comm,&req);
584        MPIX_Irecv_enqueue(rbuf,count,MPI_INT,source,tag,stream_comm,&req);
585        MPIX_Allreduce_enqueue(sbuf,rbuf,count,MPI_INT,MPI_SUM,stream_comm);
586        MPIX_Wait_enqueue(&req, &stat);
587      '''):
588        self.addDefine('HAVE_MPIX_STREAM', 1)
589
590      if self.checkLink('#include <mpi.h>\n',
591      '''
592        MPI_Comm comm = MPI_COMM_WORLD; // fake
593        MPIX_Threadcomm_start(comm);
594        MPIX_Threadcomm_finish(comm);
595        MPIX_Threadcomm_free(&comm);
596      '''):
597        self.addDefine('HAVE_MPIX_THREADCOMM', 1)
598
599      self.compilers.CPPFLAGS = oldFlags
600      self.compilers.LIBS = oldLibs
601      self.logWrite(self.framework.restoreLog())
602    return
603
604  def configureMPIGPUAware(self):
605    '''Check if the MPI supports GPUs. If yes, define HAVE_MPI_GPU_AWARE, otherwise set testoptions to not rely on GPU-aware MPI.'''
606    import re
607    gpu_aware = 0
608    if hasattr(self, 'ompi_major_version') and hasattr(self, 'mpiexec'):
609      # https://docs.open-mpi.org/en/main/tuning-apps/accelerators/rocm.html#checking-that-open-mpi-has-been-built-with-rocm-support
610      # Check if ompi_info prints lines like "MPI extensions: affinity, cuda, ftmpi, rocm"
611      try:
612        ompi_info = os.path.join(os.path.dirname(self.mpiexec), 'ompi_info') # ompi_info should be in the same directory as mpiexec/mpirun
613        (out, err, status) = Configure.executeShellCommand(ompi_info, timeout = 60, log = self.log, threads = 1)
614        if not status and not err:
615          pattern = re.compile(r'^.*MPI extensions:.*\b(cuda|rocm)\b.*$', re.MULTILINE)
616          if pattern.search(out): gpu_aware = 1
617      except:
618        pass
619    elif hasattr(self, 'mpich_numversion'):
620      if (self.cuda.found and self.libraries.check(self.dlib, 'yaksuri_cudai_unpack_wchar_t')) or (self.hip.found and self.libraries.check(self.dlib, 'yaksuri_hipi_unpack_wchar_t')): gpu_aware = 1
621    if gpu_aware:
622      self.addDefine('HAVE_MPI_GPU_AWARE', 1)
623    else:
624      self.log.write('We find the MPI was not configured with GPU support. Add "-use_gpu_aware_mpi 0" to PETSc CI test.\n')
625      self.testoptions = '-use_gpu_aware_mpi 0'
626    return
627
628  def configureMPITypes(self):
629    '''Checking for MPI Datatype handles'''
630    oldFlags = self.compilers.CPPFLAGS
631    oldLibs  = self.compilers.LIBS
632    self.compilers.CPPFLAGS += ' '+self.headers.toString(self.include)
633    self.compilers.LIBS = self.libraries.toString(self.lib)+' '+self.compilers.LIBS
634    mpitypes = [('MPI_LONG_DOUBLE', 'long-double'), ('MPI_INT64_T', 'int64_t')]
635    for datatype, name in mpitypes:
636      includes = '#include <stdlib.h>\n#include <mpi.h>\n'
637      body     = 'int size;\nint ierr;\nMPI_Init(0,0);\nierr = MPI_Type_size('+datatype+', &size);\nif(ierr || (size == 0)) exit(1);\nMPI_Finalize();\n'
638      if self.checkCompile(includes, body):
639        self.addDefine('HAVE_'+datatype, 1)
640    self.compilers.CPPFLAGS = oldFlags
641    self.compilers.LIBS = oldLibs
642    return
643
644  def alternateConfigureLibrary(self):
645    '''Setup MPIUNI, our uniprocessor version of MPI'''
646    self.addDefine('HAVE_MPIUNI', 1)
647    self.addDefine('HAVE_MPI_REDUCE_LOCAL',1) # One can add these MPI macros on demand for MPIUNI
648    self.addDefine('HAVE_MPI_LARGE_COUNT', 1)
649    self.addDefine('HAVE_MPI_COUNT',1)
650    self.addMakeMacro('MPI_IS_MPIUNI', 1)
651    self.framework.packages.append(self)
652    self.mpiexec = '${PETSC_DIR}/lib/petsc/bin/petsc-mpiexec.uni'
653    self.mpiexecseq = '${PETSC_DIR}/lib/petsc/bin/petsc-mpiexec.uni'
654    self.addMakeMacro('MPIEXEC','${PETSC_DIR}/lib/petsc/bin/petsc-mpiexec.uni')
655    self.executeTest(self.configureMPIEXEC_TAIL)
656    self.framework.saveLog()
657    self.logWrite(self.framework.restoreLog())
658    self.usingMPIUni = 1
659    self.found = 1
660    self.version = 'PETSc MPIUNI uniprocessor MPI replacement'
661    self.executeTest(self.PetscArchMPICheck)
662    self.mpi_f08 = False
663    return
664
665  def checkDownload(self):
666    '''Check if we should download MPICH or Open MPI'''
667    if 'download-mpi' in self.argDB and self.argDB['download-mpi']:
668      raise RuntimeError('Option --download-mpi does not exist! Use --download-mpich or --download-openmpi instead.')
669    if self.argDB['download-mpich'] and self.argDB['download-openmpi']:
670      raise RuntimeError('Cannot install more than one of Open MPI or  MPICH for a single configuration. \nUse different PETSC_ARCH if you want to be able to switch between two')
671    return None
672
673  def SGIMPICheck(self):
674    '''Returns true if SGI MPI is used'''
675    if self.libraries.check(self.lib, 'MPI_SGI_barrier') :
676      self.logPrint('SGI MPI detected - defining MISSING_SIGTERM')
677      self.addDefine('MISSING_SIGTERM', 1)
678      return 1
679    else:
680      self.logPrint('SGI MPI test failure')
681      return 0
682
683  def CxxMPICheck(self):
684    '''Make sure C++ can compile and link'''
685    if not hasattr(self.compilers, 'CXX'):
686      return 0
687    self.libraries.pushLanguage('Cxx')
688    oldFlags = self.compilers.CXXPPFLAGS
689    self.compilers.CXXPPFLAGS += ' '+self.headers.toString(self.include)
690    self.log.write('Checking for header mpi.h\n')
691    # check if MPI_Finalize from c++ exists
692    self.log.write('Checking for C++ MPI_Finalize()\n')
693    if not self.libraries.check(self.lib, 'MPI_Finalize', prototype = '#include <mpi.h>', call = 'int ierr;\nierr = MPI_Finalize();\n(void)ierr', cxxMangle = 1):
694      raise RuntimeError('C++ error! MPI_Finalize() could not be located!')
695    self.compilers.CXXPPFLAGS = oldFlags
696    self.libraries.popLanguage()
697    return
698
699  def FortranMPICheck(self):
700    '''Make sure fortran include [mpif.h] and library symbols are found'''
701    if not hasattr(self.compilers, 'FC'):
702      return 0
703    # Fortran compiler is being used - so make sure mpif.h exists
704    self.libraries.pushLanguage('FC')
705    oldFlags = self.compilers.FPPFLAGS
706    self.compilers.FPPFLAGS += ' '+self.headers.toString(self.include)
707    # check if mpi_init form fortran works
708    self.log.write('Checking for fortran mpi_init()\n')
709    if not self.libraries.check(self.lib,'', call = '#include "mpif.h"\n       integer ierr\n       call mpi_init(ierr)'):
710      raise RuntimeError('Fortran error! mpi_init() could not be located!')
711    self.compilers.FPPFLAGS = oldFlags
712    self.libraries.popLanguage()
713    return 0
714
715  def configureIO(self):
716    '''Check for the functions in MPI/IO
717       - Define HAVE_MPIIO if they are present
718       - Some older MPI 1 implementations are missing these'''
719    # MSWIN has buggy MPI IO
720    if 'HAVE_MSMPI' in self.defines: return
721    oldFlags = self.compilers.CPPFLAGS
722    oldLibs  = self.compilers.LIBS
723    self.compilers.CPPFLAGS += ' '+self.headers.toString(self.include)
724    self.compilers.LIBS = self.libraries.toString(self.lib)+' '+self.compilers.LIBS
725    if not self.checkLink('#include <mpi.h>\n', '''MPI_Aint lb, extent;\nif (MPI_Type_get_extent(MPI_INT, &lb, &extent)) { }\n
726                                                 MPI_File fh = 0;\nvoid *buf = 0;\nMPI_Status status;\nif (MPI_File_write_all(fh, buf, 1, MPI_INT, &status)) { }\n
727                                                 if (MPI_File_read_all(fh, buf, 1, MPI_INT, &status)) { }\n
728                                                 MPI_Offset disp = 0;\nMPI_Info info = 0;\nif (MPI_File_set_view(fh, disp, MPI_INT, MPI_INT, "", info)) { }\n
729                                                 if (MPI_File_open(MPI_COMM_SELF, "", 0, info, &fh)) { }\n
730                                                 if (MPI_File_close(&fh)) { }\n'''):
731      self.compilers.LIBS = oldLibs
732      self.compilers.CPPFLAGS = oldFlags
733      return
734    self.addDefine('HAVE_MPIIO', 1)
735    self.compilers.LIBS = oldLibs
736    self.compilers.CPPFLAGS = oldFlags
737    return
738
739  def checkMPIDistro(self):
740    '''Determine if MPICH_NUMVERSION, OMPI_MAJOR_VERSION or MSMPI_VER exist in mpi.h
741       Used for consistency checking of MPI installation at compile time'''
742    import re
743    HASHLINESPACE = ' *(?:\n#.*\n *)*'
744    oldFlags = self.compilers.CPPFLAGS
745    self.compilers.CPPFLAGS += ' '+self.headers.toString(self.include)
746
747    # the following packages are all derived originally from the MPICH implementation
748    MPI_VER = ''
749    # I_MPI_NUMVERSION is broken on Windows and only has a value of 0 so test also for the named version
750    MPICHPKG = 'I_MPI'
751    mpich_test = '#include <mpi.h>\nconst char *mpich_ver = '+MPICHPKG+'_VERSION;\n'
752    if self.checkCompile(mpich_test):
753      buf = self.outputPreprocess(mpich_test)
754      try:
755        mpich_numversion = re.compile('\nconst char *mpich_ver ='+HASHLINESPACE+r'"([\.0-9]+)"'+HASHLINESPACE+';').search(buf).group(1)
756        self.addDefine('HAVE_'+MPICHPKG+'_VERSION',mpich_numversion)
757        MPI_VER  = '  '+MPICHPKG+'_VERSION: '+mpich_numversion
758      except:
759        self.logPrint('Unable to parse '+MPICHPKG+' version from header. Probably a buggy preprocessor')
760    for MPICHPKG in ['MPICH', 'I_MPI', 'MVAPICH2']:
761      mpich_test = '#include <mpi.h>\nint mpich_ver = '+MPICHPKG+'_NUMVERSION;\n'
762      if self.checkCompile(mpich_test):
763        buf = self.outputPreprocess(mpich_test)
764        try:
765          mpich_numversion = re.compile('\nint mpich_ver ='+HASHLINESPACE+'([0-9]+)'+HASHLINESPACE+';').search(buf).group(1)
766          MPI_VER += '  '+MPICHPKG+'_NUMVERSION: '+mpich_numversion
767          self.addDefine('HAVE_'+MPICHPKG, 1)
768          # for I_MPI and MVAPICH2, we can not use petscpkg_version.h since they are not a PETSc package yet.
769          # Anyway, we use PETSC_PKG_'MPICHPKG'_NUMVERSION to record the config time version for later compile time checking.
770          self.addDefine('PKG_'+MPICHPKG+'_NUMVERSION',mpich_numversion)
771          if MPICHPKG == 'MPICH':
772            self.mpich_numversion = mpich_numversion
773            MAJ = int(mpich_numversion)//10000000  # See comments in MPICH.py
774            MIN = int(mpich_numversion)//100000%100
775            REV = int(mpich_numversion)//1000%100
776            self.mpich.version_tuple = (MAJ, MIN, REV) # version_tuple makes mpich included in petscpkg_version.h
777          elif MPICHPKG == 'I_MPI': self.isIntelMPI = 1
778        except:
779          self.logPrint('Unable to parse '+MPICHPKG+' version from header. Probably a buggy preprocessor')
780    if MPI_VER:
781      self.compilers.CPPFLAGS = oldFlags
782      self.mpi_pkg_version = MPI_VER+'\n'
783      self.mpi_pkg = 'mpich'+mpich_numversion[0]
784      return
785
786    # NEC MPI is derived from MPICH but it does not keep MPICH related NUMVERSION
787    necmpi_test = '#include <mpi.h>\nMPI_NEC_Function f = MPI_NEC_FUNCTION_NULL;\n'
788    if self.checkCompile(necmpi_test):
789      self.isNecMPI = 1
790      self.addDefine('HAVE_NECMPI',1)
791
792    # IBM Spectrum MPI is derived from Open MPI, we do not yet have specific tests for it
793    # https://www.ibm.com/us-en/marketplace/spectrum-mpi
794    openmpi_test = '#include <mpi.h>\nint ompi_major = OMPI_MAJOR_VERSION;\nint ompi_minor = OMPI_MINOR_VERSION;\nint ompi_release = OMPI_RELEASE_VERSION;\n'
795    if self.checkCompile(openmpi_test):
796      buf = self.outputPreprocess(openmpi_test)
797      ompi_major_version = ompi_minor_version = ompi_release_version = 'unknown'
798      try:
799        ompi_major_version = re.compile('\nint ompi_major ='+HASHLINESPACE+'([0-9]+)'+HASHLINESPACE+';').search(buf).group(1)
800        ompi_minor_version = re.compile('\nint ompi_minor ='+HASHLINESPACE+'([0-9]+)'+HASHLINESPACE+';').search(buf).group(1)
801        ompi_release_version = re.compile('\nint ompi_release ='+HASHLINESPACE+'([0-9]+)'+HASHLINESPACE+';').search(buf).group(1)
802        self.ompi_major_version = ompi_major_version
803        self.ompi_version = tuple([int(i) for i in [ompi_major_version,ompi_minor_version,ompi_release_version]])
804        self.openmpi.version_tuple = self.ompi_version # version_tuple makes openmpi included by petscpkg_version.h, so one can uses macros defined there
805        self.addDefine('HAVE_OPENMPI', 1) # we have openmpi, though it is not necessarily installed by --download-openmpi
806        self.mpi_pkg_version = '  OMPI_VERSION: '+ompi_major_version+'.'+ompi_minor_version+'.'+ompi_release_version+'\n'
807        MPI_VER = '  OMPI_VERSION: '+ompi_major_version+'.'+ompi_minor_version+'.'+ompi_release_version
808      except:
809        self.logPrint('Unable to parse Open MPI version from header. Probably a buggy preprocessor')
810    if MPI_VER:
811      self.compilers.CPPFLAGS = oldFlags
812      self.mpi_pkg_version = MPI_VER+'\n'
813      self.mpi_pkg = 'openmpi'
814      return
815
816    msmpi_test = '#include <mpi.h>\n#define xstr(s) str(s)\n#define str(s) #s\n#if defined(MSMPI_VER)\nchar msmpi_hex[] = xstr(MSMPI_VER);\n#else\n#error not MSMPI\n#endif\n'
817    if self.checkCompile(msmpi_test):
818      buf = self.outputPreprocess(msmpi_test)
819      msmpi_version = 'unknown'
820      self.addDefine('HAVE_MSMPI',1) # flag we have MSMPI since we need to disable broken components
821      try:
822        msmpi_version = re.compile('\n'+r'char msmpi_hex\[\] = '+HASHLINESPACE+'\"([a-zA-Z0-9_]*)\"'+HASHLINESPACE+';').search(buf).group(1)
823        MPI_VER = '  MSMPI_VERSION: '+msmpi_version
824        self.addDefine('HAVE_MSMPI_VERSION',msmpi_version)
825      except:
826        self.logPrint('Unable to parse MSMPI version from header. Probably a buggy preprocessor')
827    if MPI_VER:
828      self.compilers.CPPFLAGS = oldFlags
829      self.mpi_pkg_version = MPI_VER+'\n'
830      return
831
832    return
833
834  def findMPIIncludeAndLib(self):
835    '''Find MPI include paths and libraries from "mpicc -show" or Cray "cc --cray-print-opts=cflags/libs" and save.'''
836    '''When the underlying C++ compiler used by CUDA or HIP is not the same'''
837    '''as the MPICXX compiler (if any), the includes are needed for compiling with'''
838    '''the CUDA or HIP compiler or the Kokkos compiler, and the libraries are needed'''
839    '''when the Kokkos compiler wrapper is linking a Kokkos application.'''
840    needed=False
841    if hasattr(self.compilers, 'CUDAC') and self.cuda.found: needed = True
842    if hasattr(self.compilers, 'HIPC') and self.hip.found: needed = True
843    if hasattr(self.compilers, 'SYCLC') and self.sycl.found: needed = True
844    if not needed: return
845
846    if 'with-mpi-include' in self.argDB and 'with-mpi-lib' in self.argDB:
847      self.includepaths = self.headers.toString(self.argDB['with-mpi-include'])
848      self.mpilibs = self.libraries.toString(self.argDB['with-mpi-lib'])
849      self.libpaths = ''
850    else:
851      import re
852      cflagsOutput = ''
853      libsOutput   = ''
854      if config.setCompilers.Configure.isCrayPEWrapper(self.setCompilers.CC, self.log):
855        # check these two env vars to only query MPICH headers and libs. Cray PE may include other libs.
856        var1 = os.environ.get('PE_PKGCONFIG_LIBS').split(':') # the env var is in a format like 'mpich:libsci_mpi:libsci:dsmml'
857        var2 = os.environ.get('PE_PKGCONFIG_PRODUCTS').split(':')
858        env  = None # None means to inherit the current process' environment
859        if ('mpich' in var1 and 'PE_MPICH' in var2): # assume the two env vars appear together if any one is set
860          env = dict(os.environ, PE_PKGCONFIG_LIBS='mpich', PE_PKGCONFIG_PRODUCTS='PE_MPICH') # modify the two env vars only
861
862        cflagsOutput = self.executeShellCommand([self.compilers.CC, '--cray-print-opts=cflags'], env=env, log = self.log)[0]
863        # --no-as-needed since we always need MPI
864        libsOutput   = self.executeShellCommand([self.compilers.CC, '--no-as-needed', '--cray-print-opts=libs'], env=env, log = self.log)[0]
865      else:
866        cflagsOutput = self.executeShellCommand(self.compilers.CC + ' -show', log = self.log)[0] # not list since CC might be 'mpicc -cc=clang'
867        libsOutput   = cflagsOutput # same output as -show
868
869      # find include paths
870      self.includepaths = ''
871      argIter = iter(cflagsOutput.split())
872      try:
873        while 1:
874          arg = next(argIter)
875          self.logPrint( 'Checking arg '+arg, 4, 'compilers')
876          m = re.match(r'^-I.*$', arg)
877          if m:
878            self.logPrint('Found include option: '+arg, 4, 'compilers')
879            self.includepaths += arg + ' '
880            continue
881      except StopIteration:
882        pass
883      # find libraries
884      self.libpaths = ''
885      self.mpilibs = ''
886      argIter = iter(libsOutput.split())
887      try:
888        while 1:
889          arg = next(argIter)
890          self.logPrint( 'Checking arg '+arg, 4, 'compilers')
891          m = re.match(r'^-L.*$', arg)
892          if m:
893            self.logPrint('Found -L link option: '+arg, 4, 'compilers')
894            self.libpaths += arg + ' '
895          m = re.match(r'^-Wl.*$', arg)
896          if m:
897            self.logPrint('Found -Wl link option: '+arg, 4, 'compilers')
898            self.libpaths += arg + ' '
899          m = re.match(r'^-l.*$', arg)
900          if m:
901            self.logPrint('Found -l link option: '+arg, 4, 'compilers')
902            # TODO filter out system libraries
903            self.mpilibs += arg + ' '
904      except StopIteration:
905        pass
906    self.addMakeMacro('MPICXX_INCLUDES',self.includepaths)
907    self.addMakeMacro('MPICXX_LIBS',self.libpaths + ' ' + self.mpilibs)
908    return
909
910  def log_print_mpi_h_line(self,buf):
911    for line in buf.splitlines():
912      if 'mpi.h' in line:
913        self.log.write('mpi_h_line:\n'+line+'\n')
914        return
915    self.log.write('mpi.h not found in buf')
916    return
917
918  def PetscArchMPICheck(self):
919    '''Check that previously configured for MPI include files are not in the PETSC_ARCH directory'''
920    import os
921    '''Makes sure incompatible mpi.h is not in the PETSC_ARCH/include directory'''
922    build_mpi_h_dir = os.path.join(self.petscdir.dir,self.arch,'include')
923    build_mpi_h = os.path.join(build_mpi_h_dir,'mpi.h')
924    if os.path.isfile(build_mpi_h):
925      self.log.write('mpi.h found in build dir! Checking if its a bad copy.\n')
926      if self.usingMPIUni:
927        raise RuntimeError('There is a copy of mpi.h in '+build_mpi_h_dir+' that will conflict with --with-mpi=0 build. do:\nrm -rf '+self.arch+' and run ./configure again\n')
928      oldFlags = self.compilers.CPPFLAGS
929      mpi_h_test = '#include <mpi.h>'
930      # check self.include
931      self.compilers.CPPFLAGS = oldFlags+' '+self.headers.toString(self.include)
932      buf1 = self.outputPreprocess(mpi_h_test)
933      self.log_print_mpi_h_line(buf1)
934      # check build_mpi_h_dir and self.include
935      self.compilers.CPPFLAGS = oldFlags+' '+self.headers.getIncludeArgument(build_mpi_h_dir)+' '+self.headers.toString(self.include)
936      buf2 = self.outputPreprocess(mpi_h_test)
937      self.log_print_mpi_h_line(buf2)
938      if buf1 != buf2:
939        raise RuntimeError('There is a copy of mpi.h in '+build_mpi_h_dir+' that is not compatible with your MPI, do:\nrm -rf '+self.arch+' and run ./configure again\n')
940      self.compilers.CPPFLAGS = oldFlags
941    return
942
943  def configureLibrary(self):
944    '''Calls the regular package configureLibrary and then does an additional test needed by MPI'''
945    import platform
946    if 'with-'+self.package+'-shared' in self.argDB:
947      self.argDB['with-'+self.package] = 1
948    config.package.Package.configureLibrary(self)
949    if hasattr(self.compilers, 'FC'):
950      self.mpi_f08 = False
951      if self.argDB['with-mpi-ftn-module'] == 'mpi_f08':
952        self.addDefine('USE_MPI_F08',1)
953        self.mpi_f08 = True
954      elif not self.argDB['with-mpi-ftn-module'] == 'mpi':
955        raise RuntimeError('--with-mpi-ftn-module must be "mpi" or "mpi_f08", not "' + self.argDB['with-mpi-ftn-module'] +'"')
956      self.addDefine('MPI_FTN_MODULE',self.argDB['with-mpi-ftn-module'])
957
958      self.libraries.pushLanguage('FC')
959      oldFlags = self.compilers.FPPFLAGS
960      self.compilers.FPPFLAGS += ' '+self.headers.toString(self.include)
961      if self.mpi_f08:
962        self.log.write('Checking for mpi_f80.mod\n')
963        if self.libraries.check(self.lib,'', call = '       use mpi_f08\n       integer(kind=selected_int_kind(5)) ierr,rank\n       call mpi_init(ierr)\n       call mpi_comm_rank(MPI_COMM_WORLD,rank,ierr)\n'):
964          self.addDefine('HAVE_MPI_FTN_MODULE', 1)
965        else:
966          raise RuntimeError('You requested --with-mpi-ftn-module=mpi_f08 but that module does not exist')
967      else:
968        self.log.write('Checking for mpi.mod\n')
969        if self.libraries.check(self.lib,'', call = '       use mpi\n       integer(kind=selected_int_kind(5)) ierr,rank\n       call mpi_init(ierr)\n       call mpi_comm_rank(MPI_COMM_WORLD,rank,ierr)\n'):
970          self.addDefine('HAVE_MPI_FTN_MODULE', 1)
971        elif 'HAVE_MSMPI' not in self.defines:
972          self.logPrintWarning('Unable to find or use the MPI Fortran module file mpi.mod! PETSc will be configured to use "mpif.h" and not the MPI Fortran module')
973      self.compilers.FPPFLAGS = oldFlags
974      self.libraries.popLanguage()
975
976    if self.setCompilers.usedMPICompilers:
977      if 'with-mpi-include' in self.argDB: raise RuntimeError('Do not use --with-mpi-include when using MPI compiler wrappers')
978      if 'with-mpi-lib' in self.argDB: raise RuntimeError('Do not use --with-mpi-lib when using MPI compiler wrappers')
979    self.executeTest(self.checkMPIDistro)
980    if any(x in platform.processor() for x in ['i386','x86','i86pc']) and config.setCompilers.Configure.isSolaris(self.log) and hasattr(self, 'mpich_numversion') and int(self.mpich_numversion) >= 30301300:
981      # this is only needed if MPICH/HWLOC were compiled with optimization
982      self.logWrite('Setting environmental variable to work around buggy HWLOC\nhttps://github.com/open-mpi/hwloc/issues/290\n')
983      os.environ['HWLOC_COMPONENTS'] = '-x86'
984      self.addDefine('HAVE_HWLOC_SOLARIS_BUG',1)
985      self.logPrintWarning('This MPI implementation may have a bug in it that causes programs to hang. \
986You may need to set the environmental variable HWLOC_COMPONENTS to -x86 to prevent such hangs')
987    self.executeTest(self.configureMPI2) #depends on checkMPIDistro
988    self.executeTest(self.configureMPI3) #depends on checkMPIDistro
989    self.executeTest(self.configureMPI4)
990    self.executeTest(self.configureMPIX)
991    self.executeTest(self.configureMPIEXEC)
992    self.executeTest(self.configureMPIEXEC_TAIL)
993    self.executeTest(self.configureMPIGPUAware) # needs self.mpiexec
994    self.executeTest(self.configureMPITypes)
995    self.executeTest(self.SGIMPICheck)
996    self.executeTest(self.CxxMPICheck)
997    self.executeTest(self.FortranMPICheck) #depends on checkMPIDistro
998    self.executeTest(self.configureIO) #depends on checkMPIDistro
999    self.executeTest(self.findMPIIncludeAndLib)
1000    self.executeTest(self.PetscArchMPICheck)
1001
1002    oldFlags = self.compilers.CPPFLAGS # Disgusting save and restore
1003    self.compilers.CPPFLAGS += ' '+self.headers.toString(self.include)
1004    for combiner in ['MPI_COMBINER_DUP', 'MPI_COMBINER_CONTIGUOUS', 'MPI_COMBINER_NAMED']:
1005      if self.checkCompile('#include <mpi.h>', 'int combiner = %s;(void)combiner' % (combiner,)):
1006        self.addDefine('HAVE_' + combiner,1)
1007    self.compilers.CPPFLAGS = oldFlags
1008