xref: /petsc/src/sys/tutorials/ex4f90.F90 (revision 4a7137268e3693434874b221cb7dc4fdfbd92ed4)
1c4762a1bSJed Brown!
2c4762a1bSJed Brown!     This introductory example illustrates running PETSc on a subset
3c4762a1bSJed Brown!     of processes
4c4762a1bSJed Brown!
5c4762a1bSJed Brown!/*T
6c4762a1bSJed Brown!   Concepts: introduction to PETSc;
7c4762a1bSJed Brown!   Concepts: process^subset set PETSC_COMM_WORLD
8c4762a1bSJed Brown!   Processors: 2
9c4762a1bSJed Brown!T*/
10c4762a1bSJed Brown! -----------------------------------------------------------------------
11c4762a1bSJed Brown
12c4762a1bSJed Brown      program main
13c4762a1bSJed Brown#include <petsc/finclude/petscsys.h>
14c4762a1bSJed Brown      use petscsys
15c4762a1bSJed Brown      implicit none
16c4762a1bSJed Brown
17c4762a1bSJed Brown      PetscErrorCode ierr
18*4a713726SSatish Balay      PetscMPIInt rank, size, zero, two
19c4762a1bSJed Brown
20c4762a1bSJed Brown!     We must call MPI_Init() first, making us, not PETSc, responsible
21c4762a1bSJed Brown!     for MPI
22c4762a1bSJed Brown
23c4762a1bSJed Brown      call MPI_Init(ierr)
24c4762a1bSJed Brown      if (ierr .ne. 0) then
25c4762a1bSJed Brown         print*,'Unable to initialize MPI'
26c4762a1bSJed Brown         stop
27c4762a1bSJed Brown      endif
28c4762a1bSJed Brown
29c4762a1bSJed Brown!     We can now change the communicator universe for PETSc
30c4762a1bSJed Brown
31*4a713726SSatish Balay      zero = 0
32c4762a1bSJed Brown      two = 2
33c4762a1bSJed Brown      call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr)
34*4a713726SSatish Balay      call MPI_Comm_split(MPI_COMM_WORLD,mod(rank,two),zero,PETSC_COMM_WORLD,ierr)
35c4762a1bSJed Brown
36c4762a1bSJed Brown!     Every PETSc routine should begin with the PetscInitialize()
37c4762a1bSJed Brown!     routine.
38c4762a1bSJed Brown
39c4762a1bSJed Brown      call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
40c4762a1bSJed Brown      if (ierr .ne. 0) then
41c4762a1bSJed Brown        print*,'Unable to initialize PETSc'
42c4762a1bSJed Brown        stop
43c4762a1bSJed Brown      endif
44c4762a1bSJed Brown
45c4762a1bSJed Brown!     The following MPI calls return the number of processes being used
46c4762a1bSJed Brown!     and the rank of this process in the group.
47c4762a1bSJed Brown
48c4762a1bSJed Brown      call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr);CHKERRA(ierr)
49c4762a1bSJed Brown      call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr);CHKERRA(ierr)
50c4762a1bSJed Brown
51c4762a1bSJed Brown
52c4762a1bSJed Brown!     Here we would like to print only one message that represents all
53c4762a1bSJed Brown!     the processes in the group.
54c4762a1bSJed Brown      if (rank .eq. 0) write(6,100) size,rank
55c4762a1bSJed Brown 100  format('No of Procs = ',i4,' rank = ',i4)
56c4762a1bSJed Brown
57c4762a1bSJed Brown!     Always call PetscFinalize() before exiting a program.  This
58c4762a1bSJed Brown!     routine - finalizes the PETSc libraries as well as MPI - provides
59c4762a1bSJed Brown!     summary and diagnostic information if certain runtime options are
60c4762a1bSJed Brown!     chosen (e.g., -log_view).  See PetscFinalize() manpage for more
61c4762a1bSJed Brown!     information.
62c4762a1bSJed Brown
63c4762a1bSJed Brown      call PetscFinalize(ierr)
64c4762a1bSJed Brown      call MPI_Comm_free(PETSC_COMM_WORLD,ierr)
65c4762a1bSJed Brown
66c4762a1bSJed Brown!     Since we initialized MPI, we must call MPI_Finalize()
67c4762a1bSJed Brown
68c4762a1bSJed Brown      call  MPI_Finalize(ierr)
69c4762a1bSJed Brown      end
70c4762a1bSJed Brown
71c4762a1bSJed Brown!
72c4762a1bSJed Brown!/*TEST
73c4762a1bSJed Brown!
74c4762a1bSJed Brown!   test:
75c4762a1bSJed Brown!
76c4762a1bSJed Brown!TEST*/
77