xref: /petsc/src/sys/tutorials/ex4f90.F90 (revision c4762a1b19cd2af06abeed90e8f9d34fb975dd94)
1*c4762a1bSJed Brown!
2*c4762a1bSJed Brown!     This introductory example illustrates running PETSc on a subset
3*c4762a1bSJed Brown!     of processes
4*c4762a1bSJed Brown!
5*c4762a1bSJed Brown!/*T
6*c4762a1bSJed Brown!   Concepts: introduction to PETSc;
7*c4762a1bSJed Brown!   Concepts: process^subset set PETSC_COMM_WORLD
8*c4762a1bSJed Brown!   Processors: 2
9*c4762a1bSJed Brown!T*/
10*c4762a1bSJed Brown! -----------------------------------------------------------------------
11*c4762a1bSJed Brown
12*c4762a1bSJed Brown      program main
13*c4762a1bSJed Brown#include <petsc/finclude/petscsys.h>
14*c4762a1bSJed Brown      use petscsys
15*c4762a1bSJed Brown      implicit none
16*c4762a1bSJed Brown
17*c4762a1bSJed Brown      PetscErrorCode ierr
18*c4762a1bSJed Brown      PetscMPIInt rank, size, two
19*c4762a1bSJed Brown
20*c4762a1bSJed Brown!     We must call MPI_Init() first, making us, not PETSc, responsible
21*c4762a1bSJed Brown!     for MPI
22*c4762a1bSJed Brown
23*c4762a1bSJed Brown      call MPI_Init(ierr)
24*c4762a1bSJed Brown      if (ierr .ne. 0) then
25*c4762a1bSJed Brown         print*,'Unable to initialize MPI'
26*c4762a1bSJed Brown         stop
27*c4762a1bSJed Brown      endif
28*c4762a1bSJed Brown
29*c4762a1bSJed Brown!     We can now change the communicator universe for PETSc
30*c4762a1bSJed Brown
31*c4762a1bSJed Brown      two = 2
32*c4762a1bSJed Brown      call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr)
33*c4762a1bSJed Brown      call MPI_Comm_split(MPI_COMM_WORLD,mod(rank,two),0,PETSC_COMM_WORLD,ierr)
34*c4762a1bSJed Brown
35*c4762a1bSJed Brown!     Every PETSc routine should begin with the PetscInitialize()
36*c4762a1bSJed Brown!     routine.
37*c4762a1bSJed Brown
38*c4762a1bSJed Brown      call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
39*c4762a1bSJed Brown      if (ierr .ne. 0) then
40*c4762a1bSJed Brown        print*,'Unable to initialize PETSc'
41*c4762a1bSJed Brown        stop
42*c4762a1bSJed Brown      endif
43*c4762a1bSJed Brown
44*c4762a1bSJed Brown!     The following MPI calls return the number of processes being used
45*c4762a1bSJed Brown!     and the rank of this process in the group.
46*c4762a1bSJed Brown
47*c4762a1bSJed Brown      call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr);CHKERRA(ierr)
48*c4762a1bSJed Brown      call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr);CHKERRA(ierr)
49*c4762a1bSJed Brown
50*c4762a1bSJed Brown
51*c4762a1bSJed Brown!     Here we would like to print only one message that represents all
52*c4762a1bSJed Brown!     the processes in the group.
53*c4762a1bSJed Brown      if (rank .eq. 0) write(6,100) size,rank
54*c4762a1bSJed Brown 100  format('No of Procs = ',i4,' rank = ',i4)
55*c4762a1bSJed Brown
56*c4762a1bSJed Brown!     Always call PetscFinalize() before exiting a program.  This
57*c4762a1bSJed Brown!     routine - finalizes the PETSc libraries as well as MPI - provides
58*c4762a1bSJed Brown!     summary and diagnostic information if certain runtime options are
59*c4762a1bSJed Brown!     chosen (e.g., -log_view).  See PetscFinalize() manpage for more
60*c4762a1bSJed Brown!     information.
61*c4762a1bSJed Brown
62*c4762a1bSJed Brown      call PetscFinalize(ierr)
63*c4762a1bSJed Brown      call MPI_Comm_free(PETSC_COMM_WORLD,ierr)
64*c4762a1bSJed Brown
65*c4762a1bSJed Brown!     Since we initialized MPI, we must call MPI_Finalize()
66*c4762a1bSJed Brown
67*c4762a1bSJed Brown      call  MPI_Finalize(ierr)
68*c4762a1bSJed Brown      end
69*c4762a1bSJed Brown
70*c4762a1bSJed Brown!
71*c4762a1bSJed Brown!/*TEST
72*c4762a1bSJed Brown!
73*c4762a1bSJed Brown!   test:
74*c4762a1bSJed Brown!
75*c4762a1bSJed Brown!TEST*/
76