xref: /petsc/src/mat/utils/overlapsplit.c (revision d8e47b638cf8f604a99e9678e1df24f82d959cd7)
1 /*
2  * Increase the overlap of a 'big' subdomain across several processor cores
3  *
4  * Author: Fande Kong <fdkong.jd@gmail.com>
5  */
6 
7 #include <petscsf.h>
8 #include <petsc/private/matimpl.h>
9 
10 /*
11  * Increase overlap for the sub-matrix across sub communicator
12  * sub-matrix could be a graph or numerical matrix
13  * */
MatIncreaseOverlapSplit_Single(Mat mat,IS * is,PetscInt ov)14 PetscErrorCode MatIncreaseOverlapSplit_Single(Mat mat, IS *is, PetscInt ov)
15 {
16   PetscInt        nindx, *indices_sc, *indices_ov, localsize, *localsizes_sc, localsize_tmp;
17   PetscInt       *indices_ov_rd, nroots, nleaves, *localoffsets, *indices_recv;
18   const PetscInt *indices;
19   PetscMPIInt     srank, size, issamecomm, grank, *sources_sc, *sources_sc_rd;
20   IS              is_sc, allis_sc, partitioning;
21   MPI_Comm        gcomm, dcomm, scomm;
22   PetscSF         sf;
23   PetscSFNode    *remote;
24   Mat            *smat;
25   MatPartitioning part;
26 
27   PetscFunctionBegin;
28   /* get a sub communicator before call individual MatIncreaseOverlap
29    * since the sub communicator may be changed.
30    * */
31   PetscCall(PetscObjectGetComm((PetscObject)*is, &dcomm));
32   /* make a copy before the original one is deleted */
33   PetscCall(PetscCommDuplicate(dcomm, &scomm, NULL));
34   /* get a global communicator, where mat should be a global matrix  */
35   PetscCall(PetscObjectGetComm((PetscObject)mat, &gcomm));
36   PetscUseTypeMethod(mat, increaseoverlap, 1, is, ov);
37   PetscCallMPI(MPI_Comm_compare(gcomm, scomm, &issamecomm));
38   /* if the sub-communicator is the same as the global communicator,
39    * user does not want to use a sub-communicator
40    * */
41   if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) {
42     PetscCall(PetscCommDestroy(&scomm));
43     PetscFunctionReturn(PETSC_SUCCESS);
44   }
45   /* if the sub-communicator is petsc_comm_self,
46    * user also does not care the sub-communicator
47    * */
48   PetscCallMPI(MPI_Comm_compare(scomm, PETSC_COMM_SELF, &issamecomm));
49   if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) {
50     PetscCall(PetscCommDestroy(&scomm));
51     PetscFunctionReturn(PETSC_SUCCESS);
52   }
53   PetscCallMPI(MPI_Comm_rank(scomm, &srank));
54   PetscCallMPI(MPI_Comm_size(scomm, &size));
55   PetscCallMPI(MPI_Comm_rank(gcomm, &grank));
56   /* create a new IS based on sub-communicator
57    * since the old IS is often based on petsc_comm_self
58    * */
59   PetscCall(ISGetLocalSize(*is, &nindx));
60   PetscCall(PetscMalloc1(nindx, &indices_sc));
61   PetscCall(ISGetIndices(*is, &indices));
62   PetscCall(PetscArraycpy(indices_sc, indices, nindx));
63   PetscCall(ISRestoreIndices(*is, &indices));
64   /* we do not need any more */
65   PetscCall(ISDestroy(is));
66   /* create a index set based on the sub communicator  */
67   PetscCall(ISCreateGeneral(scomm, nindx, indices_sc, PETSC_OWN_POINTER, &is_sc));
68   /* gather all indices within  the sub communicator */
69   PetscCall(ISAllGather(is_sc, &allis_sc));
70   PetscCall(ISDestroy(&is_sc));
71   /* gather local sizes */
72   PetscCall(PetscMalloc1(size, &localsizes_sc));
73   /* get individual local sizes for all index sets */
74   PetscCallMPI(MPI_Gather(&nindx, 1, MPIU_INT, localsizes_sc, 1, MPIU_INT, 0, scomm));
75   /* only root does these computations */
76   if (!srank) {
77     /* get local size for the big index set */
78     PetscCall(ISGetLocalSize(allis_sc, &localsize));
79     PetscCall(PetscCalloc2(localsize, &indices_ov, localsize, &sources_sc));
80     PetscCall(PetscCalloc2(localsize, &indices_ov_rd, localsize, &sources_sc_rd));
81     PetscCall(ISGetIndices(allis_sc, &indices));
82     PetscCall(PetscArraycpy(indices_ov, indices, localsize));
83     PetscCall(ISRestoreIndices(allis_sc, &indices));
84     PetscCall(ISDestroy(&allis_sc));
85     /* assign corresponding sources */
86     localsize_tmp = 0;
87     for (PetscMPIInt k = 0; k < size; k++) {
88       for (PetscInt i = 0; i < localsizes_sc[k]; i++) sources_sc[localsize_tmp++] = k;
89     }
90     /* record where indices come from */
91     PetscCall(PetscSortIntWithMPIIntArray(localsize, indices_ov, sources_sc));
92     /* count local sizes for reduced indices */
93     PetscCall(PetscArrayzero(localsizes_sc, size));
94     /* initialize the first entity */
95     if (localsize) {
96       indices_ov_rd[0] = indices_ov[0];
97       sources_sc_rd[0] = sources_sc[0];
98       localsizes_sc[sources_sc[0]]++;
99     }
100     localsize_tmp = 1;
101     /* remove duplicate integers */
102     for (PetscInt i = 1; i < localsize; i++) {
103       if (indices_ov[i] != indices_ov[i - 1]) {
104         indices_ov_rd[localsize_tmp]   = indices_ov[i];
105         sources_sc_rd[localsize_tmp++] = sources_sc[i];
106         localsizes_sc[sources_sc[i]]++;
107       }
108     }
109     PetscCall(PetscFree2(indices_ov, sources_sc));
110     PetscCall(PetscCalloc1(size + 1, &localoffsets));
111     for (PetscMPIInt k = 0; k < size; k++) localoffsets[k + 1] = localoffsets[k] + localsizes_sc[k];
112     nleaves = localoffsets[size];
113     PetscCall(PetscArrayzero(localoffsets, size + 1));
114     nroots = localsizes_sc[srank];
115     PetscCall(PetscMalloc1(nleaves, &remote));
116     for (PetscInt i = 0; i < nleaves; i++) {
117       remote[i].rank  = sources_sc_rd[i];
118       remote[i].index = localoffsets[sources_sc_rd[i]]++;
119     }
120     PetscCall(PetscFree(localoffsets));
121   } else {
122     PetscCall(ISDestroy(&allis_sc));
123     /* Allocate a 'zero' pointer to avoid using uninitialized variable  */
124     PetscCall(PetscCalloc1(0, &remote));
125     nleaves       = 0;
126     indices_ov_rd = NULL;
127     sources_sc_rd = NULL;
128   }
129   /* scatter sizes to everybody */
130   PetscCallMPI(MPI_Scatter(localsizes_sc, 1, MPIU_INT, &nroots, 1, MPIU_INT, 0, scomm));
131   PetscCall(PetscFree(localsizes_sc));
132   PetscCall(PetscCalloc1(nroots, &indices_recv));
133   /* set data back to every body */
134   PetscCall(PetscSFCreate(scomm, &sf));
135   PetscCall(PetscSFSetType(sf, PETSCSFBASIC));
136   PetscCall(PetscSFSetFromOptions(sf));
137   PetscCall(PetscSFSetGraph(sf, nroots, nleaves, NULL, PETSC_OWN_POINTER, remote, PETSC_OWN_POINTER));
138   PetscCall(PetscSFReduceBegin(sf, MPIU_INT, indices_ov_rd, indices_recv, MPI_REPLACE));
139   PetscCall(PetscSFReduceEnd(sf, MPIU_INT, indices_ov_rd, indices_recv, MPI_REPLACE));
140   PetscCall(PetscSFDestroy(&sf));
141   PetscCall(PetscFree2(indices_ov_rd, sources_sc_rd));
142   PetscCall(ISCreateGeneral(scomm, nroots, indices_recv, PETSC_OWN_POINTER, &is_sc));
143   PetscCall(MatCreateSubMatricesMPI(mat, 1, &is_sc, &is_sc, MAT_INITIAL_MATRIX, &smat));
144   PetscCall(ISDestroy(&allis_sc));
145   /* create a partitioner to repartition the sub-matrix */
146   PetscCall(MatPartitioningCreate(scomm, &part));
147   PetscCall(MatPartitioningSetAdjacency(part, smat[0]));
148 #if defined(PETSC_HAVE_PARMETIS)
149   /* if there exists a ParMETIS installation, we try to use ParMETIS
150    * because a repartition routine possibly work better
151    * */
152   PetscCall(MatPartitioningSetType(part, MATPARTITIONINGPARMETIS));
153   /* try to use reparition function, instead of partition function */
154   PetscCall(MatPartitioningParmetisSetRepartition(part));
155 #else
156   /* we at least provide a default partitioner to rebalance the computation  */
157   PetscCall(MatPartitioningSetType(part, MATPARTITIONINGAVERAGE));
158 #endif
159   /* user can pick up any partitioner by using an option */
160   PetscCall(MatPartitioningSetFromOptions(part));
161   PetscCall(MatPartitioningApply(part, &partitioning));
162   PetscCall(MatPartitioningDestroy(&part));
163   PetscCall(MatDestroy(&smat[0]));
164   PetscCall(PetscFree(smat));
165   /* get local rows including  overlap */
166   PetscCall(ISBuildTwoSided(partitioning, is_sc, is));
167   PetscCall(ISDestroy(&is_sc));
168   PetscCall(ISDestroy(&partitioning));
169   PetscCall(PetscCommDestroy(&scomm));
170   PetscFunctionReturn(PETSC_SUCCESS);
171 }
172