1! 2! This introductory example illustrates running PETSc on a subset 3! of processes 4! 5!/*T 6! Concepts: introduction to PETSc; 7! Concepts: process^subset set PETSC_COMM_WORLD 8! Processors: 2 9!T*/ 10! ----------------------------------------------------------------------- 11 12 program main 13#include <petsc/finclude/petscsys.h> 14 use petscmpi ! or mpi or mpi_f08 15 use petscsys 16 implicit none 17 18 PetscErrorCode ierr 19 PetscMPIInt rank, size, zero, two 20 21! We must call MPI_Init() first, making us, not PETSc, responsible 22! for MPI 23 24 call MPI_Init(ierr) 25 if (ierr .ne. 0) then 26 print*,'Unable to initialize MPI' 27 stop 28 endif 29 30! We can now change the communicator universe for PETSc 31 32 zero = 0 33 two = 2 34 call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr) 35 call MPI_Comm_split(MPI_COMM_WORLD,mod(rank,two),zero,PETSC_COMM_WORLD,ierr) 36 37! Every PETSc routine should begin with the PetscInitialize() 38! routine. 39 40 call PetscInitialize(PETSC_NULL_CHARACTER,ierr) 41 if (ierr .ne. 0) then 42 print*,'Unable to initialize PETSc' 43 stop 44 endif 45 46! The following MPI calls return the number of processes being used 47! and the rank of this process in the group. 48 49 call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr);CHKERRA(ierr) 50 call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr);CHKERRA(ierr) 51 52! Here we would like to print only one message that represents all 53! the processes in the group. 54 if (rank .eq. 0) write(6,100) size,rank 55 100 format('No of Procs = ',i4,' rank = ',i4) 56 57! Always call PetscFinalize() before exiting a program. This 58! routine - finalizes the PETSc libraries as well as MPI - provides 59! summary and diagnostic information if certain runtime options are 60! chosen (e.g., -log_view). See PetscFinalize() manpage for more 61! information. 62 63 call PetscFinalize(ierr) 64 call MPI_Comm_free(PETSC_COMM_WORLD,ierr) 65 66! Since we initialized MPI, we must call MPI_Finalize() 67 68 call MPI_Finalize(ierr) 69 end 70 71! 72!/*TEST 73! 74! test: 75! 76!TEST*/ 77