xref: /petsc/src/mat/utils/overlapsplit.c (revision 9371c9d470a9602b6d10a8bf50c9b2280a79e45a)
1 /*
2  * Increase the overlap of a 'big' subdomain across several processor cores
3  *
4  * Author: Fande Kong <fdkong.jd@gmail.com>
5  */
6 
7 #include <petscsf.h>
8 #include <petsc/private/matimpl.h>
9 
10 /*
11  * Increase overlap for the sub-matrix across sub communicator
12  * sub-matrix could be a graph or numerical matrix
13  * */
14 PetscErrorCode MatIncreaseOverlapSplit_Single(Mat mat, IS *is, PetscInt ov) {
15   PetscInt        i, nindx, *indices_sc, *indices_ov, localsize, *localsizes_sc, localsize_tmp;
16   PetscInt       *indices_ov_rd, nroots, nleaves, *localoffsets, *indices_recv, *sources_sc, *sources_sc_rd;
17   const PetscInt *indices;
18   PetscMPIInt     srank, ssize, issamecomm, k, grank;
19   IS              is_sc, allis_sc, partitioning;
20   MPI_Comm        gcomm, dcomm, scomm;
21   PetscSF         sf;
22   PetscSFNode    *remote;
23   Mat            *smat;
24   MatPartitioning part;
25 
26   PetscFunctionBegin;
27   /* get a sub communicator before call individual MatIncreaseOverlap
28    * since the sub communicator may be changed.
29    * */
30   PetscCall(PetscObjectGetComm((PetscObject)(*is), &dcomm));
31   /* make a copy before the original one is deleted */
32   PetscCall(PetscCommDuplicate(dcomm, &scomm, NULL));
33   /* get a global communicator, where mat should be a global matrix  */
34   PetscCall(PetscObjectGetComm((PetscObject)mat, &gcomm));
35   PetscUseTypeMethod(mat, increaseoverlap, 1, is, ov);
36   PetscCallMPI(MPI_Comm_compare(gcomm, scomm, &issamecomm));
37   /* if the sub-communicator is the same as the global communicator,
38    * user does not want to use a sub-communicator
39    * */
40   if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) {
41     PetscCall(PetscCommDestroy(&scomm));
42     PetscFunctionReturn(0);
43   }
44   /* if the sub-communicator is petsc_comm_self,
45    * user also does not care the sub-communicator
46    * */
47   PetscCallMPI(MPI_Comm_compare(scomm, PETSC_COMM_SELF, &issamecomm));
48   if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) {
49     PetscCall(PetscCommDestroy(&scomm));
50     PetscFunctionReturn(0);
51   }
52   PetscCallMPI(MPI_Comm_rank(scomm, &srank));
53   PetscCallMPI(MPI_Comm_size(scomm, &ssize));
54   PetscCallMPI(MPI_Comm_rank(gcomm, &grank));
55   /* create a new IS based on sub-communicator
56    * since the old IS is often based on petsc_comm_self
57    * */
58   PetscCall(ISGetLocalSize(*is, &nindx));
59   PetscCall(PetscMalloc1(nindx, &indices_sc));
60   PetscCall(ISGetIndices(*is, &indices));
61   PetscCall(PetscArraycpy(indices_sc, indices, nindx));
62   PetscCall(ISRestoreIndices(*is, &indices));
63   /* we do not need any more */
64   PetscCall(ISDestroy(is));
65   /* create a index set based on the sub communicator  */
66   PetscCall(ISCreateGeneral(scomm, nindx, indices_sc, PETSC_OWN_POINTER, &is_sc));
67   /* gather all indices within  the sub communicator */
68   PetscCall(ISAllGather(is_sc, &allis_sc));
69   PetscCall(ISDestroy(&is_sc));
70   /* gather local sizes */
71   PetscCall(PetscMalloc1(ssize, &localsizes_sc));
72   /* get individual local sizes for all index sets */
73   PetscCallMPI(MPI_Gather(&nindx, 1, MPIU_INT, localsizes_sc, 1, MPIU_INT, 0, scomm));
74   /* only root does these computations */
75   if (!srank) {
76     /* get local size for the big index set */
77     PetscCall(ISGetLocalSize(allis_sc, &localsize));
78     PetscCall(PetscCalloc2(localsize, &indices_ov, localsize, &sources_sc));
79     PetscCall(PetscCalloc2(localsize, &indices_ov_rd, localsize, &sources_sc_rd));
80     PetscCall(ISGetIndices(allis_sc, &indices));
81     PetscCall(PetscArraycpy(indices_ov, indices, localsize));
82     PetscCall(ISRestoreIndices(allis_sc, &indices));
83     PetscCall(ISDestroy(&allis_sc));
84     /* assign corresponding sources */
85     localsize_tmp = 0;
86     for (k = 0; k < ssize; k++) {
87       for (i = 0; i < localsizes_sc[k]; i++) { sources_sc[localsize_tmp++] = k; }
88     }
89     /* record where indices come from */
90     PetscCall(PetscSortIntWithArray(localsize, indices_ov, sources_sc));
91     /* count local sizes for reduced indices */
92     PetscCall(PetscArrayzero(localsizes_sc, ssize));
93     /* initialize the first entity */
94     if (localsize) {
95       indices_ov_rd[0] = indices_ov[0];
96       sources_sc_rd[0] = sources_sc[0];
97       localsizes_sc[sources_sc[0]]++;
98     }
99     localsize_tmp = 1;
100     /* remove duplicate integers */
101     for (i = 1; i < localsize; i++) {
102       if (indices_ov[i] != indices_ov[i - 1]) {
103         indices_ov_rd[localsize_tmp]   = indices_ov[i];
104         sources_sc_rd[localsize_tmp++] = sources_sc[i];
105         localsizes_sc[sources_sc[i]]++;
106       }
107     }
108     PetscCall(PetscFree2(indices_ov, sources_sc));
109     PetscCall(PetscCalloc1(ssize + 1, &localoffsets));
110     for (k = 0; k < ssize; k++) { localoffsets[k + 1] = localoffsets[k] + localsizes_sc[k]; }
111     nleaves = localoffsets[ssize];
112     PetscCall(PetscArrayzero(localoffsets, ssize + 1));
113     nroots = localsizes_sc[srank];
114     PetscCall(PetscMalloc1(nleaves, &remote));
115     for (i = 0; i < nleaves; i++) {
116       remote[i].rank  = sources_sc_rd[i];
117       remote[i].index = localoffsets[sources_sc_rd[i]]++;
118     }
119     PetscCall(PetscFree(localoffsets));
120   } else {
121     PetscCall(ISDestroy(&allis_sc));
122     /* Allocate a 'zero' pointer to avoid using uninitialized variable  */
123     PetscCall(PetscCalloc1(0, &remote));
124     nleaves       = 0;
125     indices_ov_rd = NULL;
126     sources_sc_rd = NULL;
127   }
128   /* scatter sizes to everybody */
129   PetscCallMPI(MPI_Scatter(localsizes_sc, 1, MPIU_INT, &nroots, 1, MPIU_INT, 0, scomm));
130   PetscCall(PetscFree(localsizes_sc));
131   PetscCall(PetscCalloc1(nroots, &indices_recv));
132   /* set data back to every body */
133   PetscCall(PetscSFCreate(scomm, &sf));
134   PetscCall(PetscSFSetType(sf, PETSCSFBASIC));
135   PetscCall(PetscSFSetFromOptions(sf));
136   PetscCall(PetscSFSetGraph(sf, nroots, nleaves, NULL, PETSC_OWN_POINTER, remote, PETSC_OWN_POINTER));
137   PetscCall(PetscSFReduceBegin(sf, MPIU_INT, indices_ov_rd, indices_recv, MPI_REPLACE));
138   PetscCall(PetscSFReduceEnd(sf, MPIU_INT, indices_ov_rd, indices_recv, MPI_REPLACE));
139   PetscCall(PetscSFDestroy(&sf));
140   PetscCall(PetscFree2(indices_ov_rd, sources_sc_rd));
141   PetscCall(ISCreateGeneral(scomm, nroots, indices_recv, PETSC_OWN_POINTER, &is_sc));
142   PetscCall(MatCreateSubMatricesMPI(mat, 1, &is_sc, &is_sc, MAT_INITIAL_MATRIX, &smat));
143   PetscCall(ISDestroy(&allis_sc));
144   /* create a partitioner to repartition the sub-matrix */
145   PetscCall(MatPartitioningCreate(scomm, &part));
146   PetscCall(MatPartitioningSetAdjacency(part, smat[0]));
147 #if defined(PETSC_HAVE_PARMETIS)
148   /* if there exists a ParMETIS installation, we try to use ParMETIS
149    * because a repartition routine possibly work better
150    * */
151   PetscCall(MatPartitioningSetType(part, MATPARTITIONINGPARMETIS));
152   /* try to use reparition function, instead of partition function */
153   PetscCall(MatPartitioningParmetisSetRepartition(part));
154 #else
155   /* we at least provide a default partitioner to rebalance the computation  */
156   PetscCall(MatPartitioningSetType(part, MATPARTITIONINGAVERAGE));
157 #endif
158   /* user can pick up any partitioner by using an option */
159   PetscCall(MatPartitioningSetFromOptions(part));
160   PetscCall(MatPartitioningApply(part, &partitioning));
161   PetscCall(MatPartitioningDestroy(&part));
162   PetscCall(MatDestroy(&(smat[0])));
163   PetscCall(PetscFree(smat));
164   /* get local rows including  overlap */
165   PetscCall(ISBuildTwoSided(partitioning, is_sc, is));
166   PetscCall(ISDestroy(&is_sc));
167   PetscCall(ISDestroy(&partitioning));
168   PetscCall(PetscCommDestroy(&scomm));
169   PetscFunctionReturn(0);
170 }
171