1 #define PETSC_DLL 2 /* 3 Provides utility routines for split MPI communicator. 4 */ 5 #include "petscsys.h" /*I "petscsys.h" I*/ 6 7 #undef __FUNCT__ 8 #define __FUNCT__ "PetscSubcommDestroy" 9 PetscErrorCode PETSCMAT_DLLEXPORT PetscSubcommDestroy(PetscSubcomm psubcomm) 10 { 11 PetscErrorCode ierr; 12 13 PetscFunctionBegin; 14 ierr = PetscFree(psubcomm);CHKERRQ(ierr); 15 PetscFunctionReturn(0); 16 } 17 18 #undef __FUNCT__ 19 #define __FUNCT__ "PetscSubcommCreate" 20 /*@C 21 PetscSubcommCreate - Create a PetscSubcomm context. 22 23 Collective on MPI_Comm 24 25 Input Parameter: 26 + comm - MPI communicator 27 - nsubcomm - the number of subcommunicators to be created 28 29 Output Parameter: 30 . psubcomm - location to store the PetscSubcomm context 31 32 33 Notes: 34 To avoid data scattering from subcomm back to original comm, we create subcommunicators 35 by iteratively taking a process into a subcommunicator. 36 Example: size=4, nsubcomm=(*psubcomm)->n=3 37 comm=(*psubcomm)->parent: 38 rank: [0] [1] [2] [3] 39 color: 0 1 2 0 40 41 subcomm=(*psubcomm)->comm: 42 subrank: [0] [0] [0] [1] 43 44 dupcomm=(*psubcomm)->dupparent: 45 duprank: [0] [2] [3] [1] 46 47 Here, subcomm[color = 0] has subsize=2, owns process [0] and [3] 48 subcomm[color = 1] has subsize=1, owns process [1] 49 subcomm[color = 2] has subsize=1, owns process [2] 50 dupcomm has same number of processes as comm, and its duprank maps 51 processes in subcomm contiguously into a 1d array: 52 duprank: [0] [1] [2] [3] 53 rank: [0] [3] [1] [2] 54 subcomm[0] subcomm[1] subcomm[2] 55 56 Level: advanced 57 58 .keywords: communicator, create 59 60 .seealso: PetscSubcommDestroy() 61 @*/ 62 PetscErrorCode PETSCMAT_DLLEXPORT PetscSubcommCreate(MPI_Comm comm,PetscInt nsubcomm,PetscSubcomm *psubcomm) 63 { 64 PetscErrorCode ierr; 65 PetscMPIInt rank,size,*subsize,duprank,subrank; 66 PetscInt np_subcomm,nleftover,i,j,color; 67 MPI_Comm subcomm=0,dupcomm=0; 68 PetscSubcomm psubcomm_tmp; 69 70 PetscFunctionBegin; 71 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 72 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 73 if (nsubcomm < 1 || nsubcomm > size) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %D cannot be < 1 or > input comm size %D",nsubcomm,size); 74 75 /* get size of each subcommunicator */ 76 ierr = PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);CHKERRQ(ierr); 77 np_subcomm = size/nsubcomm; 78 nleftover = size - nsubcomm*np_subcomm; 79 for (i=0; i<nsubcomm; i++){ 80 subsize[i] = np_subcomm; 81 if (i<nleftover) subsize[i]++; 82 } 83 84 /* find color for this proc */ 85 color = rank%nsubcomm; 86 subrank = rank/nsubcomm; 87 88 ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr); 89 90 j = 0; duprank = 0; 91 for (i=0; i<nsubcomm; i++){ 92 if (j == color){ 93 duprank += subrank; 94 break; 95 } 96 duprank += subsize[i]; j++; 97 } 98 ierr = PetscFree(subsize);CHKERRQ(ierr); 99 100 /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */ 101 ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr); 102 103 ierr = PetscNew(struct _n_PetscSubcomm,&psubcomm_tmp);CHKERRQ(ierr); 104 psubcomm_tmp->parent = comm; 105 psubcomm_tmp->dupparent = dupcomm; 106 psubcomm_tmp->comm = subcomm; 107 psubcomm_tmp->n = nsubcomm; 108 psubcomm_tmp->color = color; 109 *psubcomm = psubcomm_tmp; 110 PetscFunctionReturn(0); 111 } 112