1 /* 2 * Increase the overlap of a 'big' subdomain across several processor cores 3 * 4 * Author: Fande Kong <fdkong.jd@gmail.com> 5 */ 6 7 #include <petscsf.h> 8 #include <petsc/private/matimpl.h> 9 10 /* 11 * Increase overlap for the sub-matrix across sub communicator 12 * sub-matrix could be a graph or numerical matrix 13 * */ 14 PetscErrorCode MatIncreaseOverlapSplit_Single(Mat mat,IS *is,PetscInt ov) 15 { 16 PetscInt i,nindx,*indices_sc,*indices_ov,localsize,*localsizes_sc,localsize_tmp; 17 PetscInt *indices_ov_rd,nroots,nleaves,*localoffsets,*indices_recv,*sources_sc,*sources_sc_rd; 18 const PetscInt *indices; 19 PetscMPIInt srank,ssize,issamecomm,k,grank; 20 IS is_sc,allis_sc,partitioning; 21 MPI_Comm gcomm,dcomm,scomm; 22 PetscSF sf; 23 PetscSFNode *remote; 24 Mat *smat; 25 MatPartitioning part; 26 27 PetscFunctionBegin; 28 /* get a sub communicator before call individual MatIncreaseOverlap 29 * since the sub communicator may be changed. 30 * */ 31 PetscCall(PetscObjectGetComm((PetscObject)(*is),&dcomm)); 32 /* make a copy before the original one is deleted */ 33 PetscCall(PetscCommDuplicate(dcomm,&scomm,NULL)); 34 /* get a global communicator, where mat should be a global matrix */ 35 PetscCall(PetscObjectGetComm((PetscObject)mat,&gcomm)); 36 PetscCall((*mat->ops->increaseoverlap)(mat,1,is,ov)); 37 PetscCallMPI(MPI_Comm_compare(gcomm,scomm,&issamecomm)); 38 /* if the sub-communicator is the same as the global communicator, 39 * user does not want to use a sub-communicator 40 * */ 41 if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) { 42 PetscCall(PetscCommDestroy(&scomm)); 43 PetscFunctionReturn(0); 44 } 45 /* if the sub-communicator is petsc_comm_self, 46 * user also does not care the sub-communicator 47 * */ 48 PetscCallMPI(MPI_Comm_compare(scomm,PETSC_COMM_SELF,&issamecomm)); 49 if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) { 50 PetscCall(PetscCommDestroy(&scomm)); 51 PetscFunctionReturn(0); 52 } 53 PetscCallMPI(MPI_Comm_rank(scomm,&srank)); 54 PetscCallMPI(MPI_Comm_size(scomm,&ssize)); 55 PetscCallMPI(MPI_Comm_rank(gcomm,&grank)); 56 /* create a new IS based on sub-communicator 57 * since the old IS is often based on petsc_comm_self 58 * */ 59 PetscCall(ISGetLocalSize(*is,&nindx)); 60 PetscCall(PetscMalloc1(nindx,&indices_sc)); 61 PetscCall(ISGetIndices(*is,&indices)); 62 PetscCall(PetscArraycpy(indices_sc,indices,nindx)); 63 PetscCall(ISRestoreIndices(*is,&indices)); 64 /* we do not need any more */ 65 PetscCall(ISDestroy(is)); 66 /* create a index set based on the sub communicator */ 67 PetscCall(ISCreateGeneral(scomm,nindx,indices_sc,PETSC_OWN_POINTER,&is_sc)); 68 /* gather all indices within the sub communicator */ 69 PetscCall(ISAllGather(is_sc,&allis_sc)); 70 PetscCall(ISDestroy(&is_sc)); 71 /* gather local sizes */ 72 PetscCall(PetscMalloc1(ssize,&localsizes_sc)); 73 /* get individual local sizes for all index sets */ 74 PetscCallMPI(MPI_Gather(&nindx,1,MPIU_INT,localsizes_sc,1,MPIU_INT,0,scomm)); 75 /* only root does these computations */ 76 if (!srank) { 77 /* get local size for the big index set */ 78 PetscCall(ISGetLocalSize(allis_sc,&localsize)); 79 PetscCall(PetscCalloc2(localsize,&indices_ov,localsize,&sources_sc)); 80 PetscCall(PetscCalloc2(localsize,&indices_ov_rd,localsize,&sources_sc_rd)); 81 PetscCall(ISGetIndices(allis_sc,&indices)); 82 PetscCall(PetscArraycpy(indices_ov,indices,localsize)); 83 PetscCall(ISRestoreIndices(allis_sc,&indices)); 84 PetscCall(ISDestroy(&allis_sc)); 85 /* assign corresponding sources */ 86 localsize_tmp = 0; 87 for (k=0; k<ssize; k++) { 88 for (i=0; i<localsizes_sc[k]; i++) { 89 sources_sc[localsize_tmp++] = k; 90 } 91 } 92 /* record where indices come from */ 93 PetscCall(PetscSortIntWithArray(localsize,indices_ov,sources_sc)); 94 /* count local sizes for reduced indices */ 95 PetscCall(PetscArrayzero(localsizes_sc,ssize)); 96 /* initialize the first entity */ 97 if (localsize) { 98 indices_ov_rd[0] = indices_ov[0]; 99 sources_sc_rd[0] = sources_sc[0]; 100 localsizes_sc[sources_sc[0]]++; 101 } 102 localsize_tmp = 1; 103 /* remove duplicate integers */ 104 for (i=1; i<localsize; i++) { 105 if (indices_ov[i] != indices_ov[i-1]) { 106 indices_ov_rd[localsize_tmp] = indices_ov[i]; 107 sources_sc_rd[localsize_tmp++] = sources_sc[i]; 108 localsizes_sc[sources_sc[i]]++; 109 } 110 } 111 PetscCall(PetscFree2(indices_ov,sources_sc)); 112 PetscCall(PetscCalloc1(ssize+1,&localoffsets)); 113 for (k=0; k<ssize; k++) { 114 localoffsets[k+1] = localoffsets[k] + localsizes_sc[k]; 115 } 116 nleaves = localoffsets[ssize]; 117 PetscCall(PetscArrayzero(localoffsets,ssize+1)); 118 nroots = localsizes_sc[srank]; 119 PetscCall(PetscMalloc1(nleaves,&remote)); 120 for (i=0; i<nleaves; i++) { 121 remote[i].rank = sources_sc_rd[i]; 122 remote[i].index = localoffsets[sources_sc_rd[i]]++; 123 } 124 PetscCall(PetscFree(localoffsets)); 125 } else { 126 PetscCall(ISDestroy(&allis_sc)); 127 /* Allocate a 'zero' pointer to avoid using uninitialized variable */ 128 PetscCall(PetscCalloc1(0,&remote)); 129 nleaves = 0; 130 indices_ov_rd = NULL; 131 sources_sc_rd = NULL; 132 } 133 /* scatter sizes to everybody */ 134 PetscCallMPI(MPI_Scatter(localsizes_sc,1, MPIU_INT,&nroots,1, MPIU_INT,0,scomm)); 135 PetscCall(PetscFree(localsizes_sc)); 136 PetscCall(PetscCalloc1(nroots,&indices_recv)); 137 /* set data back to every body */ 138 PetscCall(PetscSFCreate(scomm,&sf)); 139 PetscCall(PetscSFSetType(sf,PETSCSFBASIC)); 140 PetscCall(PetscSFSetFromOptions(sf)); 141 PetscCall(PetscSFSetGraph(sf,nroots,nleaves,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER)); 142 PetscCall(PetscSFReduceBegin(sf,MPIU_INT,indices_ov_rd,indices_recv,MPI_REPLACE)); 143 PetscCall(PetscSFReduceEnd(sf,MPIU_INT,indices_ov_rd,indices_recv,MPI_REPLACE)); 144 PetscCall(PetscSFDestroy(&sf)); 145 PetscCall(PetscFree2(indices_ov_rd,sources_sc_rd)); 146 PetscCall(ISCreateGeneral(scomm,nroots,indices_recv,PETSC_OWN_POINTER,&is_sc)); 147 PetscCall(MatCreateSubMatricesMPI(mat,1,&is_sc,&is_sc,MAT_INITIAL_MATRIX,&smat)); 148 PetscCall(ISDestroy(&allis_sc)); 149 /* create a partitioner to repartition the sub-matrix */ 150 PetscCall(MatPartitioningCreate(scomm,&part)); 151 PetscCall(MatPartitioningSetAdjacency(part,smat[0])); 152 #if defined(PETSC_HAVE_PARMETIS) 153 /* if there exists a ParMETIS installation, we try to use ParMETIS 154 * because a repartition routine possibly work better 155 * */ 156 PetscCall(MatPartitioningSetType(part,MATPARTITIONINGPARMETIS)); 157 /* try to use reparition function, instead of partition function */ 158 PetscCall(MatPartitioningParmetisSetRepartition(part)); 159 #else 160 /* we at least provide a default partitioner to rebalance the computation */ 161 PetscCall(MatPartitioningSetType(part,MATPARTITIONINGAVERAGE)); 162 #endif 163 /* user can pick up any partitioner by using an option */ 164 PetscCall(MatPartitioningSetFromOptions(part)); 165 PetscCall(MatPartitioningApply(part,&partitioning)); 166 PetscCall(MatPartitioningDestroy(&part)); 167 PetscCall(MatDestroy(&(smat[0]))); 168 PetscCall(PetscFree(smat)); 169 /* get local rows including overlap */ 170 PetscCall(ISBuildTwoSided(partitioning,is_sc,is)); 171 PetscCall(ISDestroy(&is_sc)); 172 PetscCall(ISDestroy(&partitioning)); 173 PetscCall(PetscCommDestroy(&scomm)); 174 PetscFunctionReturn(0); 175 } 176