1 /* 2 * Increase the overlap of a 'big' subdomain across several processor cores 3 * 4 * Author: Fande Kong <fdkong.jd@gmail.com> 5 */ 6 7 #include <petscsf.h> 8 #include <petsc/private/matimpl.h> 9 10 /* 11 * Increase overlap for the sub-matrix across sub communicator 12 * sub-matrix could be a graph or numerical matrix 13 * */ 14 PetscErrorCode MatIncreaseOverlapSplit_Single(Mat mat,IS *is,PetscInt ov) 15 { 16 PetscInt i,nindx,*indices_sc,*indices_ov,localsize,*localsizes_sc,localsize_tmp; 17 PetscInt *indices_ov_rd,nroots,nleaves,*localoffsets,*indices_recv,*sources_sc,*sources_sc_rd; 18 const PetscInt *indices; 19 PetscMPIInt srank,ssize,issamecomm,k,grank; 20 IS is_sc,allis_sc,partitioning; 21 MPI_Comm gcomm,dcomm,scomm; 22 PetscSF sf; 23 PetscSFNode *remote; 24 Mat *smat; 25 MatPartitioning part; 26 27 PetscFunctionBegin; 28 /* get a sub communicator before call individual MatIncreaseOverlap 29 * since the sub communicator may be changed. 30 * */ 31 CHKERRQ(PetscObjectGetComm((PetscObject)(*is),&dcomm)); 32 /* make a copy before the original one is deleted */ 33 CHKERRQ(PetscCommDuplicate(dcomm,&scomm,NULL)); 34 /* get a global communicator, where mat should be a global matrix */ 35 CHKERRQ(PetscObjectGetComm((PetscObject)mat,&gcomm)); 36 CHKERRQ((*mat->ops->increaseoverlap)(mat,1,is,ov)); 37 CHKERRMPI(MPI_Comm_compare(gcomm,scomm,&issamecomm)); 38 /* if the sub-communicator is the same as the global communicator, 39 * user does not want to use a sub-communicator 40 * */ 41 if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) { 42 CHKERRQ(PetscCommDestroy(&scomm)); 43 PetscFunctionReturn(0); 44 } 45 /* if the sub-communicator is petsc_comm_self, 46 * user also does not care the sub-communicator 47 * */ 48 CHKERRMPI(MPI_Comm_compare(scomm,PETSC_COMM_SELF,&issamecomm)); 49 if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) { 50 CHKERRQ(PetscCommDestroy(&scomm)); 51 PetscFunctionReturn(0); 52 } 53 CHKERRMPI(MPI_Comm_rank(scomm,&srank)); 54 CHKERRMPI(MPI_Comm_size(scomm,&ssize)); 55 CHKERRMPI(MPI_Comm_rank(gcomm,&grank)); 56 /* create a new IS based on sub-communicator 57 * since the old IS is often based on petsc_comm_self 58 * */ 59 CHKERRQ(ISGetLocalSize(*is,&nindx)); 60 CHKERRQ(PetscMalloc1(nindx,&indices_sc)); 61 CHKERRQ(ISGetIndices(*is,&indices)); 62 CHKERRQ(PetscArraycpy(indices_sc,indices,nindx)); 63 CHKERRQ(ISRestoreIndices(*is,&indices)); 64 /* we do not need any more */ 65 CHKERRQ(ISDestroy(is)); 66 /* create a index set based on the sub communicator */ 67 CHKERRQ(ISCreateGeneral(scomm,nindx,indices_sc,PETSC_OWN_POINTER,&is_sc)); 68 /* gather all indices within the sub communicator */ 69 CHKERRQ(ISAllGather(is_sc,&allis_sc)); 70 CHKERRQ(ISDestroy(&is_sc)); 71 /* gather local sizes */ 72 CHKERRQ(PetscMalloc1(ssize,&localsizes_sc)); 73 /* get individual local sizes for all index sets */ 74 CHKERRMPI(MPI_Gather(&nindx,1,MPIU_INT,localsizes_sc,1,MPIU_INT,0,scomm)); 75 /* only root does these computations */ 76 if (!srank) { 77 /* get local size for the big index set */ 78 CHKERRQ(ISGetLocalSize(allis_sc,&localsize)); 79 CHKERRQ(PetscCalloc2(localsize,&indices_ov,localsize,&sources_sc)); 80 CHKERRQ(PetscCalloc2(localsize,&indices_ov_rd,localsize,&sources_sc_rd)); 81 CHKERRQ(ISGetIndices(allis_sc,&indices)); 82 CHKERRQ(PetscArraycpy(indices_ov,indices,localsize)); 83 CHKERRQ(ISRestoreIndices(allis_sc,&indices)); 84 CHKERRQ(ISDestroy(&allis_sc)); 85 /* assign corresponding sources */ 86 localsize_tmp = 0; 87 for (k=0; k<ssize; k++) { 88 for (i=0; i<localsizes_sc[k]; i++) { 89 sources_sc[localsize_tmp++] = k; 90 } 91 } 92 /* record where indices come from */ 93 CHKERRQ(PetscSortIntWithArray(localsize,indices_ov,sources_sc)); 94 /* count local sizes for reduced indices */ 95 CHKERRQ(PetscArrayzero(localsizes_sc,ssize)); 96 /* initialize the first entity */ 97 if (localsize) { 98 indices_ov_rd[0] = indices_ov[0]; 99 sources_sc_rd[0] = sources_sc[0]; 100 localsizes_sc[sources_sc[0]]++; 101 } 102 localsize_tmp = 1; 103 /* remove duplicate integers */ 104 for (i=1; i<localsize; i++) { 105 if (indices_ov[i] != indices_ov[i-1]) { 106 indices_ov_rd[localsize_tmp] = indices_ov[i]; 107 sources_sc_rd[localsize_tmp++] = sources_sc[i]; 108 localsizes_sc[sources_sc[i]]++; 109 } 110 } 111 CHKERRQ(PetscFree2(indices_ov,sources_sc)); 112 CHKERRQ(PetscCalloc1(ssize+1,&localoffsets)); 113 for (k=0; k<ssize; k++) { 114 localoffsets[k+1] = localoffsets[k] + localsizes_sc[k]; 115 } 116 nleaves = localoffsets[ssize]; 117 CHKERRQ(PetscArrayzero(localoffsets,ssize+1)); 118 nroots = localsizes_sc[srank]; 119 CHKERRQ(PetscMalloc1(nleaves,&remote)); 120 for (i=0; i<nleaves; i++) { 121 remote[i].rank = sources_sc_rd[i]; 122 remote[i].index = localoffsets[sources_sc_rd[i]]++; 123 } 124 CHKERRQ(PetscFree(localoffsets)); 125 } else { 126 CHKERRQ(ISDestroy(&allis_sc)); 127 /* Allocate a 'zero' pointer to avoid using uninitialized variable */ 128 CHKERRQ(PetscCalloc1(0,&remote)); 129 nleaves = 0; 130 indices_ov_rd = NULL; 131 sources_sc_rd = NULL; 132 } 133 /* scatter sizes to everybody */ 134 CHKERRMPI(MPI_Scatter(localsizes_sc,1, MPIU_INT,&nroots,1, MPIU_INT,0,scomm)); 135 CHKERRQ(PetscFree(localsizes_sc)); 136 CHKERRQ(PetscCalloc1(nroots,&indices_recv)); 137 /* set data back to every body */ 138 CHKERRQ(PetscSFCreate(scomm,&sf)); 139 CHKERRQ(PetscSFSetType(sf,PETSCSFBASIC)); 140 CHKERRQ(PetscSFSetFromOptions(sf)); 141 CHKERRQ(PetscSFSetGraph(sf,nroots,nleaves,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER)); 142 CHKERRQ(PetscSFReduceBegin(sf,MPIU_INT,indices_ov_rd,indices_recv,MPI_REPLACE)); 143 CHKERRQ(PetscSFReduceEnd(sf,MPIU_INT,indices_ov_rd,indices_recv,MPI_REPLACE)); 144 CHKERRQ(PetscSFDestroy(&sf)); 145 CHKERRQ(PetscFree2(indices_ov_rd,sources_sc_rd)); 146 CHKERRQ(ISCreateGeneral(scomm,nroots,indices_recv,PETSC_OWN_POINTER,&is_sc)); 147 CHKERRQ(MatCreateSubMatricesMPI(mat,1,&is_sc,&is_sc,MAT_INITIAL_MATRIX,&smat)); 148 CHKERRQ(ISDestroy(&allis_sc)); 149 /* create a partitioner to repartition the sub-matrix */ 150 CHKERRQ(MatPartitioningCreate(scomm,&part)); 151 CHKERRQ(MatPartitioningSetAdjacency(part,smat[0])); 152 #if defined(PETSC_HAVE_PARMETIS) 153 /* if there exists a ParMETIS installation, we try to use ParMETIS 154 * because a repartition routine possibly work better 155 * */ 156 CHKERRQ(MatPartitioningSetType(part,MATPARTITIONINGPARMETIS)); 157 /* try to use reparition function, instead of partition function */ 158 CHKERRQ(MatPartitioningParmetisSetRepartition(part)); 159 #else 160 /* we at least provide a default partitioner to rebalance the computation */ 161 CHKERRQ(MatPartitioningSetType(part,MATPARTITIONINGAVERAGE)); 162 #endif 163 /* user can pick up any partitioner by using an option */ 164 CHKERRQ(MatPartitioningSetFromOptions(part)); 165 CHKERRQ(MatPartitioningApply(part,&partitioning)); 166 CHKERRQ(MatPartitioningDestroy(&part)); 167 CHKERRQ(MatDestroy(&(smat[0]))); 168 CHKERRQ(PetscFree(smat)); 169 /* get local rows including overlap */ 170 CHKERRQ(ISBuildTwoSided(partitioning,is_sc,is)); 171 CHKERRQ(ISDestroy(&is_sc)); 172 CHKERRQ(ISDestroy(&partitioning)); 173 CHKERRQ(PetscCommDestroy(&scomm)); 174 PetscFunctionReturn(0); 175 } 176