1! 2! This introductory example illustrates running PETSc on a subset 3! of processes 4! 5! ----------------------------------------------------------------------- 6#include <petsc/finclude/petscsys.h> 7program main 8 use petscsys 9 implicit none 10 11 PetscErrorCode ierr 12 PetscMPIInt rank, size, zero, two 13 14! We must call MPI_Init() first, making us, not PETSc, responsible 15! for MPI 16 17 PetscCallMPIA(MPI_Init(ierr)) 18 19! We can now change the communicator universe for PETSc 20 21 zero = 0 22 two = 2 23 PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr)) 24 PetscCallMPIA(MPI_Comm_split(MPI_COMM_WORLD, mod(rank, two), zero, PETSC_COMM_WORLD, ierr)) 25 26! Every PETSc routine should begin with the PetscInitialize() 27! routine. 28 29 PetscCallA(PetscInitialize(ierr)) 30 31! The following MPI calls return the number of processes being used 32! and the rank of this process in the group. 33 34 PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr)) 35 PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr)) 36 37! Here we would like to print only one message that represents all 38! the processes in the group. 39 if (rank == 0) write (6, 100) size, rank 40100 format('No of Procs = ', i4, ' rank = ', i4) 41 42! Always call PetscFinalize() before exiting a program. This 43! routine - finalizes the PETSc libraries as well as MPI - provides 44! summary and diagnostic information if certain runtime options are 45! chosen (e.g., -log_view). See PetscFinalize() manpage for more 46! information. 47 48 PetscCallA(PetscFinalize(ierr)) 49 PetscCallMPIA(MPI_Comm_free(PETSC_COMM_WORLD, ierr)) 50 51! Since we initialized MPI, we must call MPI_Finalize() 52 53 PetscCallMPIA(MPI_Finalize(ierr)) 54end 55 56! 57!/*TEST 58! 59! test: 60! 61!TEST*/ 62