xref: /petsc/src/dm/impls/plex/plexdistribute.c (revision 6ffe77eaecce1557e50d00ca5347a7f48e598865)
1 #include <petsc/private/dmpleximpl.h>    /*I      "petscdmplex.h"   I*/
2 #include <petsc/private/dmlabelimpl.h>   /*I      "petscdmlabel.h"  I*/
3 
4 /*@C
5   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
6 
7   Input Parameters:
8 + dm      - The DM object
9 . user    - The user callback, may be NULL (to clear the callback)
10 - ctx     - context for callback evaluation, may be NULL
11 
12   Level: advanced
13 
14   Notes:
15      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.
16 
17      Any setting here overrides other configuration of DMPlex adjacency determination.
18 
19 .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()`
20 @*/
21 PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
22 {
23   DM_Plex *mesh = (DM_Plex *)dm->data;
24 
25   PetscFunctionBegin;
26   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
27   mesh->useradjacency = user;
28   mesh->useradjacencyctx = ctx;
29   PetscFunctionReturn(0);
30 }
31 
32 /*@C
33   DMPlexGetAdjacencyUser - get the user-defined adjacency callback
34 
35   Input Parameter:
36 . dm      - The DM object
37 
38   Output Parameters:
39 + user    - The user callback
40 - ctx     - context for callback evaluation
41 
42   Level: advanced
43 
44 .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()`
45 @*/
46 PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
47 {
48   DM_Plex *mesh = (DM_Plex *)dm->data;
49 
50   PetscFunctionBegin;
51   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
52   if (user) *user = mesh->useradjacency;
53   if (ctx) *ctx = mesh->useradjacencyctx;
54   PetscFunctionReturn(0);
55 }
56 
57 /*@
58   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
59 
60   Input Parameters:
61 + dm      - The DM object
62 - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
63 
64   Level: intermediate
65 
66 .seealso: `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
67 @*/
68 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
69 {
70   DM_Plex *mesh = (DM_Plex *) dm->data;
71 
72   PetscFunctionBegin;
73   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
74   mesh->useAnchors = useAnchors;
75   PetscFunctionReturn(0);
76 }
77 
78 /*@
79   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
80 
81   Input Parameter:
82 . dm      - The DM object
83 
84   Output Parameter:
85 . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
86 
87   Level: intermediate
88 
89 .seealso: `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
90 @*/
91 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
92 {
93   DM_Plex *mesh = (DM_Plex *) dm->data;
94 
95   PetscFunctionBegin;
96   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
97   PetscValidBoolPointer(useAnchors, 2);
98   *useAnchors = mesh->useAnchors;
99   PetscFunctionReturn(0);
100 }
101 
102 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103 {
104   const PetscInt *cone = NULL;
105   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106 
107   PetscFunctionBeginHot;
108   PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
109   PetscCall(DMPlexGetCone(dm, p, &cone));
110   for (c = 0; c <= coneSize; ++c) {
111     const PetscInt  point   = !c ? p : cone[c-1];
112     const PetscInt *support = NULL;
113     PetscInt        supportSize, s, q;
114 
115     PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
116     PetscCall(DMPlexGetSupport(dm, point, &support));
117     for (s = 0; s < supportSize; ++s) {
118       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
119         if (support[s] == adj[q]) break;
120       }
121       PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
122     }
123   }
124   *adjSize = numAdj;
125   PetscFunctionReturn(0);
126 }
127 
128 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
129 {
130   const PetscInt *support = NULL;
131   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
132 
133   PetscFunctionBeginHot;
134   PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
135   PetscCall(DMPlexGetSupport(dm, p, &support));
136   for (s = 0; s <= supportSize; ++s) {
137     const PetscInt  point = !s ? p : support[s-1];
138     const PetscInt *cone  = NULL;
139     PetscInt        coneSize, c, q;
140 
141     PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
142     PetscCall(DMPlexGetCone(dm, point, &cone));
143     for (c = 0; c < coneSize; ++c) {
144       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
145         if (cone[c] == adj[q]) break;
146       }
147       PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
148     }
149   }
150   *adjSize = numAdj;
151   PetscFunctionReturn(0);
152 }
153 
154 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
155 {
156   PetscInt      *star = NULL;
157   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;
158 
159   PetscFunctionBeginHot;
160   PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
161   for (s = 0; s < starSize*2; s += 2) {
162     const PetscInt *closure = NULL;
163     PetscInt        closureSize, c, q;
164 
165     PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure));
166     for (c = 0; c < closureSize*2; c += 2) {
167       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
168         if (closure[c] == adj[q]) break;
169       }
170       PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
171     }
172     PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure));
173   }
174   PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
175   *adjSize = numAdj;
176   PetscFunctionReturn(0);
177 }
178 
179 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
180 {
181   static PetscInt asiz = 0;
182   PetscInt maxAnchors = 1;
183   PetscInt aStart = -1, aEnd = -1;
184   PetscInt maxAdjSize;
185   PetscSection aSec = NULL;
186   IS aIS = NULL;
187   const PetscInt *anchors;
188   DM_Plex *mesh = (DM_Plex *)dm->data;
189 
190   PetscFunctionBeginHot;
191   if (useAnchors) {
192     PetscCall(DMPlexGetAnchors(dm,&aSec,&aIS));
193     if (aSec) {
194       PetscCall(PetscSectionGetMaxDof(aSec,&maxAnchors));
195       maxAnchors = PetscMax(1,maxAnchors);
196       PetscCall(PetscSectionGetChart(aSec,&aStart,&aEnd));
197       PetscCall(ISGetIndices(aIS,&anchors));
198     }
199   }
200   if (!*adj) {
201     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
202 
203     PetscCall(DMPlexGetChart(dm, &pStart,&pEnd));
204     PetscCall(DMPlexGetDepth(dm, &depth));
205     depth = PetscMax(depth, -depth);
206     PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
207     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
208     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
209     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
210     asiz *= maxAnchors;
211     asiz  = PetscMin(asiz,pEnd-pStart);
212     PetscCall(PetscMalloc1(asiz,adj));
213   }
214   if (*adjSize < 0) *adjSize = asiz;
215   maxAdjSize = *adjSize;
216   if (mesh->useradjacency) {
217     PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
218   } else if (useTransitiveClosure) {
219     PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
220   } else if (useCone) {
221     PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
222   } else {
223     PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
224   }
225   if (useAnchors && aSec) {
226     PetscInt origSize = *adjSize;
227     PetscInt numAdj = origSize;
228     PetscInt i = 0, j;
229     PetscInt *orig = *adj;
230 
231     while (i < origSize) {
232       PetscInt p = orig[i];
233       PetscInt aDof = 0;
234 
235       if (p >= aStart && p < aEnd) {
236         PetscCall(PetscSectionGetDof(aSec,p,&aDof));
237       }
238       if (aDof) {
239         PetscInt aOff;
240         PetscInt s, q;
241 
242         for (j = i + 1; j < numAdj; j++) {
243           orig[j - 1] = orig[j];
244         }
245         origSize--;
246         numAdj--;
247         PetscCall(PetscSectionGetOffset(aSec,p,&aOff));
248         for (s = 0; s < aDof; ++s) {
249           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
250             if (anchors[aOff+s] == orig[q]) break;
251           }
252           PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
253         }
254       }
255       else {
256         i++;
257       }
258     }
259     *adjSize = numAdj;
260     PetscCall(ISRestoreIndices(aIS,&anchors));
261   }
262   PetscFunctionReturn(0);
263 }
264 
265 /*@
266   DMPlexGetAdjacency - Return all points adjacent to the given point
267 
268   Input Parameters:
269 + dm - The DM object
270 - p  - The point
271 
272   Input/Output Parameters:
273 + adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE;
274             on output the number of adjacent points
275 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize;
276         on output contains the adjacent points
277 
278   Level: advanced
279 
280   Notes:
281     The user must PetscFree the adj array if it was not passed in.
282 
283 .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()`
284 @*/
285 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
286 {
287   PetscBool      useCone, useClosure, useAnchors;
288 
289   PetscFunctionBeginHot;
290   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
291   PetscValidIntPointer(adjSize,3);
292   PetscValidPointer(adj,4);
293   PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
294   PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
295   PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
296   PetscFunctionReturn(0);
297 }
298 
299 /*@
300   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
301 
302   Collective on dm
303 
304   Input Parameters:
305 + dm      - The DM
306 . sfPoint - The PetscSF which encodes point connectivity
307 . rootRankSection -
308 . rootRanks -
309 . leftRankSection -
310 - leafRanks -
311 
312   Output Parameters:
313 + processRanks - A list of process neighbors, or NULL
314 - sfProcess    - An SF encoding the two-sided process connectivity, or NULL
315 
316   Level: developer
317 
318 .seealso: `PetscSFCreate()`, `DMPlexCreateProcessSF()`
319 @*/
320 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
321 {
322   const PetscSFNode *remotePoints;
323   PetscInt          *localPointsNew;
324   PetscSFNode       *remotePointsNew;
325   const PetscInt    *nranks;
326   PetscInt          *ranksNew;
327   PetscBT            neighbors;
328   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
329   PetscMPIInt        size, proc, rank;
330 
331   PetscFunctionBegin;
332   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
333   PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2);
334   if (processRanks) {PetscValidPointer(processRanks, 7);}
335   if (sfProcess)    {PetscValidPointer(sfProcess, 8);}
336   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size));
337   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank));
338   PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
339   PetscCall(PetscBTCreate(size, &neighbors));
340   PetscCall(PetscBTMemzero(size, neighbors));
341   /* Compute root-to-leaf process connectivity */
342   PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
343   PetscCall(ISGetIndices(rootRanks, &nranks));
344   for (p = pStart; p < pEnd; ++p) {
345     PetscInt ndof, noff, n;
346 
347     PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
348     PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
349     for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff+n]));
350   }
351   PetscCall(ISRestoreIndices(rootRanks, &nranks));
352   /* Compute leaf-to-neighbor process connectivity */
353   PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
354   PetscCall(ISGetIndices(leafRanks, &nranks));
355   for (p = pStart; p < pEnd; ++p) {
356     PetscInt ndof, noff, n;
357 
358     PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
359     PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
360     for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff+n]));
361   }
362   PetscCall(ISRestoreIndices(leafRanks, &nranks));
363   /* Compute leaf-to-root process connectivity */
364   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
365   /* Calculate edges */
366   PetscBTClear(neighbors, rank);
367   for (proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
368   PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
369   PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
370   PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
371   for (proc = 0, n = 0; proc < size; ++proc) {
372     if (PetscBTLookup(neighbors, proc)) {
373       ranksNew[n]              = proc;
374       localPointsNew[n]        = proc;
375       remotePointsNew[n].index = rank;
376       remotePointsNew[n].rank  = proc;
377       ++n;
378     }
379   }
380   PetscCall(PetscBTDestroy(&neighbors));
381   if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
382   else              PetscCall(PetscFree(ranksNew));
383   if (sfProcess) {
384     PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
385     PetscCall(PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF"));
386     PetscCall(PetscSFSetFromOptions(*sfProcess));
387     PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
388   }
389   PetscFunctionReturn(0);
390 }
391 
392 /*@
393   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
394 
395   Collective on dm
396 
397   Input Parameter:
398 . dm - The DM
399 
400   Output Parameters:
401 + rootSection - The number of leaves for a given root point
402 . rootrank    - The rank of each edge into the root point
403 . leafSection - The number of processes sharing a given leaf point
404 - leafrank    - The rank of each process sharing a leaf point
405 
406   Level: developer
407 
408 .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
409 @*/
410 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
411 {
412   MPI_Comm        comm;
413   PetscSF         sfPoint;
414   const PetscInt *rootdegree;
415   PetscInt       *myrank, *remoterank;
416   PetscInt        pStart, pEnd, p, nedges;
417   PetscMPIInt     rank;
418 
419   PetscFunctionBegin;
420   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
421   PetscCallMPI(MPI_Comm_rank(comm, &rank));
422   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
423   PetscCall(DMGetPointSF(dm, &sfPoint));
424   /* Compute number of leaves for each root */
425   PetscCall(PetscObjectSetName((PetscObject) rootSection, "Root Section"));
426   PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
427   PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
428   PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
429   for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]));
430   PetscCall(PetscSectionSetUp(rootSection));
431   /* Gather rank of each leaf to root */
432   PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
433   PetscCall(PetscMalloc1(pEnd-pStart, &myrank));
434   PetscCall(PetscMalloc1(nedges,  &remoterank));
435   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
436   PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
437   PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
438   PetscCall(PetscFree(myrank));
439   PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
440   /* Distribute remote ranks to leaves */
441   PetscCall(PetscObjectSetName((PetscObject) leafSection, "Leaf Section"));
442   PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
443   PetscFunctionReturn(0);
444 }
445 
446 /*@C
447   DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF.
448 
449   Collective on dm
450 
451   Input Parameters:
452 + dm          - The DM
453 . levels      - Number of overlap levels
454 . rootSection - The number of leaves for a given root point
455 . rootrank    - The rank of each edge into the root point
456 . leafSection - The number of processes sharing a given leaf point
457 - leafrank    - The rank of each process sharing a leaf point
458 
459   Output Parameter:
460 . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings
461 
462   Level: developer
463 
464 .seealso: `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
465 @*/
466 PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
467 {
468   MPI_Comm           comm;
469   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
470   PetscSF            sfPoint;
471   const PetscSFNode *remote;
472   const PetscInt    *local;
473   const PetscInt    *nrank, *rrank;
474   PetscInt          *adj = NULL;
475   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
476   PetscMPIInt        rank, size;
477   PetscBool          flg;
478 
479   PetscFunctionBegin;
480   *ovLabel = NULL;
481   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
482   PetscCallMPI(MPI_Comm_size(comm, &size));
483   PetscCallMPI(MPI_Comm_rank(comm, &rank));
484   if (size == 1) PetscFunctionReturn(0);
485   PetscCall(DMGetPointSF(dm, &sfPoint));
486   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
487   if (!levels) {
488     /* Add owned points */
489     PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
490     for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
491     PetscFunctionReturn(0);
492   }
493   PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
494   PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
495   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
496   /* Handle leaves: shared with the root point */
497   for (l = 0; l < nleaves; ++l) {
498     PetscInt adjSize = PETSC_DETERMINE, a;
499 
500     PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
501     for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
502   }
503   PetscCall(ISGetIndices(rootrank, &rrank));
504   PetscCall(ISGetIndices(leafrank, &nrank));
505   /* Handle roots */
506   for (p = pStart; p < pEnd; ++p) {
507     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
508 
509     if ((p >= sStart) && (p < sEnd)) {
510       /* Some leaves share a root with other leaves on different processes */
511       PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
512       if (neighbors) {
513         PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
514         PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
515         for (n = 0; n < neighbors; ++n) {
516           const PetscInt remoteRank = nrank[noff+n];
517 
518           if (remoteRank == rank) continue;
519           for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
520         }
521       }
522     }
523     /* Roots are shared with leaves */
524     PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
525     if (!neighbors) continue;
526     PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
527     PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
528     for (n = 0; n < neighbors; ++n) {
529       const PetscInt remoteRank = rrank[noff+n];
530 
531       if (remoteRank == rank) continue;
532       for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
533     }
534   }
535   PetscCall(PetscFree(adj));
536   PetscCall(ISRestoreIndices(rootrank, &rrank));
537   PetscCall(ISRestoreIndices(leafrank, &nrank));
538   /* Add additional overlap levels */
539   for (l = 1; l < levels; l++) {
540     /* Propagate point donations over SF to capture remote connections */
541     PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
542     /* Add next level of point donations to the label */
543     PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
544   }
545   /* We require the closure in the overlap */
546   PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
547   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg));
548   if (flg) {
549     PetscViewer viewer;
550     PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
551     PetscCall(DMLabelView(ovAdjByRank, viewer));
552   }
553   /* Invert sender to receiver label */
554   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
555   PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
556   /* Add owned points, except for shared local points */
557   for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
558   for (l = 0; l < nleaves; ++l) {
559     PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
560     PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
561   }
562   /* Clean up */
563   PetscCall(DMLabelDestroy(&ovAdjByRank));
564   PetscFunctionReturn(0);
565 }
566 
567 /*@C
568   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF
569 
570   Collective on dm
571 
572   Input Parameters:
573 + dm          - The DM
574 - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes
575 
576   Output Parameter:
577 . migrationSF - An SF that maps original points in old locations to points in new locations
578 
579   Level: developer
580 
581 .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()`
582 @*/
583 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
584 {
585   MPI_Comm           comm;
586   PetscMPIInt        rank, size;
587   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
588   PetscInt          *pointDepths, *remoteDepths, *ilocal;
589   PetscInt          *depthRecv, *depthShift, *depthIdx;
590   PetscSFNode       *iremote;
591   PetscSF            pointSF;
592   const PetscInt    *sharedLocal;
593   const PetscSFNode *overlapRemote, *sharedRemote;
594 
595   PetscFunctionBegin;
596   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
597   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
598   PetscCallMPI(MPI_Comm_rank(comm, &rank));
599   PetscCallMPI(MPI_Comm_size(comm, &size));
600   PetscCall(DMGetDimension(dm, &dim));
601 
602   /* Before building the migration SF we need to know the new stratum offsets */
603   PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
604   PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
605   for (d=0; d<dim+1; d++) {
606     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
607     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
608   }
609   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
610   PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE));
611   PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE));
612 
613   /* Count received points in each stratum and compute the internal strata shift */
614   PetscCall(PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx));
615   for (d=0; d<dim+1; d++) depthRecv[d]=0;
616   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
617   depthShift[dim] = 0;
618   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
619   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
620   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
621   for (d=0; d<dim+1; d++) {
622     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
623     depthIdx[d] = pStart + depthShift[d];
624   }
625 
626   /* Form the overlap SF build an SF that describes the full overlap migration SF */
627   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
628   newLeaves = pEnd - pStart + nleaves;
629   PetscCall(PetscMalloc1(newLeaves, &ilocal));
630   PetscCall(PetscMalloc1(newLeaves, &iremote));
631   /* First map local points to themselves */
632   for (d=0; d<dim+1; d++) {
633     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
634     for (p=pStart; p<pEnd; p++) {
635       point = p + depthShift[d];
636       ilocal[point] = point;
637       iremote[point].index = p;
638       iremote[point].rank = rank;
639       depthIdx[d]++;
640     }
641   }
642 
643   /* Add in the remote roots for currently shared points */
644   PetscCall(DMGetPointSF(dm, &pointSF));
645   PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
646   for (d=0; d<dim+1; d++) {
647     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
648     for (p=0; p<numSharedPoints; p++) {
649       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
650         point = sharedLocal[p] + depthShift[d];
651         iremote[point].index = sharedRemote[p].index;
652         iremote[point].rank = sharedRemote[p].rank;
653       }
654     }
655   }
656 
657   /* Now add the incoming overlap points */
658   for (p=0; p<nleaves; p++) {
659     point = depthIdx[remoteDepths[p]];
660     ilocal[point] = point;
661     iremote[point].index = overlapRemote[p].index;
662     iremote[point].rank = overlapRemote[p].rank;
663     depthIdx[remoteDepths[p]]++;
664   }
665   PetscCall(PetscFree2(pointDepths,remoteDepths));
666 
667   PetscCall(PetscSFCreate(comm, migrationSF));
668   PetscCall(PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF"));
669   PetscCall(PetscSFSetFromOptions(*migrationSF));
670   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
671   PetscCall(PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
672 
673   PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
674   PetscFunctionReturn(0);
675 }
676 
677 /*@
678   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
679 
680   Input Parameters:
681 + dm          - The DM
682 - sf          - A star forest with non-ordered leaves, usually defining a DM point migration
683 
684   Output Parameter:
685 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
686 
687   Note:
688   This lexicographically sorts by (depth, cellType)
689 
690   Level: developer
691 
692 .seealso: `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
693 @*/
694 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
695 {
696   MPI_Comm           comm;
697   PetscMPIInt        rank, size;
698   PetscInt           d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
699   PetscSFNode       *pointDepths, *remoteDepths;
700   PetscInt          *ilocal;
701   PetscInt          *depthRecv, *depthShift, *depthIdx;
702   PetscInt          *ctRecv,    *ctShift,    *ctIdx;
703   const PetscSFNode *iremote;
704 
705   PetscFunctionBegin;
706   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
707   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
708   PetscCallMPI(MPI_Comm_rank(comm, &rank));
709   PetscCallMPI(MPI_Comm_size(comm, &size));
710   PetscCall(DMPlexGetDepth(dm, &ldepth));
711   PetscCall(DMGetDimension(dm, &dim));
712   PetscCall(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
713   PetscCheck(!(ldepth >= 0) || !(depth != ldepth),PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, ldepth, depth);
714   PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0));
715 
716   /* Before building the migration SF we need to know the new stratum offsets */
717   PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
718   PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
719   for (d = 0; d < depth+1; ++d) {
720     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
721     for (p = pStart; p < pEnd; ++p) {
722       DMPolytopeType ct;
723 
724       PetscCall(DMPlexGetCellType(dm, p, &ct));
725       pointDepths[p].index = d;
726       pointDepths[p].rank  = ct;
727     }
728   }
729   for (p = 0; p < nleaves; ++p) {remoteDepths[p].index = -1; remoteDepths[p].rank = -1;}
730   PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE));
731   PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE));
732   /* Count received points in each stratum and compute the internal strata shift */
733   PetscCall(PetscCalloc6(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
734   for (p = 0; p < nleaves; ++p) {
735     if (remoteDepths[p].rank < 0) {
736       ++depthRecv[remoteDepths[p].index];
737     } else {
738       ++ctRecv[remoteDepths[p].rank];
739     }
740   }
741   {
742     PetscInt depths[4], dims[4], shift = 0, i, c;
743 
744     /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
745          Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
746      */
747     depths[0] = depth; depths[1] = 0; depths[2] = depth-1; depths[3] = 1;
748     dims[0]   = dim;   dims[1]   = 0; dims[2]   = dim-1;   dims[3]   = 1;
749     for (i = 0; i <= depth; ++i) {
750       const PetscInt dep = depths[i];
751       const PetscInt dim = dims[i];
752 
753       for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
754         if (DMPolytopeTypeGetDim((DMPolytopeType) c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
755         ctShift[c] = shift;
756         shift     += ctRecv[c];
757       }
758       depthShift[dep] = shift;
759       shift          += depthRecv[dep];
760     }
761     for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
762       const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType) c);
763 
764       if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
765         ctShift[c] = shift;
766         shift     += ctRecv[c];
767       }
768     }
769   }
770   /* Derive a new local permutation based on stratified indices */
771   PetscCall(PetscMalloc1(nleaves, &ilocal));
772   for (p = 0; p < nleaves; ++p) {
773     const PetscInt       dep = remoteDepths[p].index;
774     const DMPolytopeType ct  = (DMPolytopeType) remoteDepths[p].rank;
775 
776     if ((PetscInt) ct < 0) {
777       ilocal[p] = depthShift[dep] + depthIdx[dep];
778       ++depthIdx[dep];
779     } else {
780       ilocal[p] = ctShift[ct] + ctIdx[ct];
781       ++ctIdx[ct];
782     }
783   }
784   PetscCall(PetscSFCreate(comm, migrationSF));
785   PetscCall(PetscObjectSetName((PetscObject) *migrationSF, "Migration SF"));
786   PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode*)iremote, PETSC_COPY_VALUES));
787   PetscCall(PetscFree2(pointDepths,remoteDepths));
788   PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
789   PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0));
790   PetscFunctionReturn(0);
791 }
792 
793 /*@
794   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
795 
796   Collective on dm
797 
798   Input Parameters:
799 + dm - The DMPlex object
800 . pointSF - The PetscSF describing the communication pattern
801 . originalSection - The PetscSection for existing data layout
802 - originalVec - The existing data in a local vector
803 
804   Output Parameters:
805 + newSection - The PetscSF describing the new data layout
806 - newVec - The new data in a local vector
807 
808   Level: developer
809 
810 .seealso: `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()`
811 @*/
812 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
813 {
814   PetscSF        fieldSF;
815   PetscInt      *remoteOffsets, fieldSize;
816   PetscScalar   *originalValues, *newValues;
817 
818   PetscFunctionBegin;
819   PetscCall(PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0));
820   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
821 
822   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
823   PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
824   PetscCall(VecSetType(newVec,dm->vectype));
825 
826   PetscCall(VecGetArray(originalVec, &originalValues));
827   PetscCall(VecGetArray(newVec, &newValues));
828   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
829   PetscCall(PetscFree(remoteOffsets));
830   PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE));
831   PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE));
832   PetscCall(PetscSFDestroy(&fieldSF));
833   PetscCall(VecRestoreArray(newVec, &newValues));
834   PetscCall(VecRestoreArray(originalVec, &originalValues));
835   PetscCall(PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0));
836   PetscFunctionReturn(0);
837 }
838 
839 /*@
840   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
841 
842   Collective on dm
843 
844   Input Parameters:
845 + dm - The DMPlex object
846 . pointSF - The PetscSF describing the communication pattern
847 . originalSection - The PetscSection for existing data layout
848 - originalIS - The existing data
849 
850   Output Parameters:
851 + newSection - The PetscSF describing the new data layout
852 - newIS - The new data
853 
854   Level: developer
855 
856 .seealso: `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()`
857 @*/
858 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
859 {
860   PetscSF         fieldSF;
861   PetscInt       *newValues, *remoteOffsets, fieldSize;
862   const PetscInt *originalValues;
863 
864   PetscFunctionBegin;
865   PetscCall(PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0));
866   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
867 
868   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
869   PetscCall(PetscMalloc1(fieldSize, &newValues));
870 
871   PetscCall(ISGetIndices(originalIS, &originalValues));
872   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
873   PetscCall(PetscFree(remoteOffsets));
874   PetscCall(PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE));
875   PetscCall(PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE));
876   PetscCall(PetscSFDestroy(&fieldSF));
877   PetscCall(ISRestoreIndices(originalIS, &originalValues));
878   PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
879   PetscCall(PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0));
880   PetscFunctionReturn(0);
881 }
882 
883 /*@
884   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
885 
886   Collective on dm
887 
888   Input Parameters:
889 + dm - The DMPlex object
890 . pointSF - The PetscSF describing the communication pattern
891 . originalSection - The PetscSection for existing data layout
892 . datatype - The type of data
893 - originalData - The existing data
894 
895   Output Parameters:
896 + newSection - The PetscSection describing the new data layout
897 - newData - The new data
898 
899   Level: developer
900 
901 .seealso: `DMPlexDistribute()`, `DMPlexDistributeField()`
902 @*/
903 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
904 {
905   PetscSF        fieldSF;
906   PetscInt      *remoteOffsets, fieldSize;
907   PetscMPIInt    dataSize;
908 
909   PetscFunctionBegin;
910   PetscCall(PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0));
911   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
912 
913   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
914   PetscCallMPI(MPI_Type_size(datatype, &dataSize));
915   PetscCall(PetscMalloc(fieldSize * dataSize, newData));
916 
917   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
918   PetscCall(PetscFree(remoteOffsets));
919   PetscCall(PetscSFBcastBegin(fieldSF, datatype, originalData, *newData,MPI_REPLACE));
920   PetscCall(PetscSFBcastEnd(fieldSF, datatype, originalData, *newData,MPI_REPLACE));
921   PetscCall(PetscSFDestroy(&fieldSF));
922   PetscCall(PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0));
923   PetscFunctionReturn(0);
924 }
925 
926 static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
927 {
928   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
929   MPI_Comm               comm;
930   PetscSF                coneSF;
931   PetscSection           originalConeSection, newConeSection;
932   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
933   PetscBool              flg;
934 
935   PetscFunctionBegin;
936   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
937   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5);
938   PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0));
939   /* Distribute cone section */
940   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
941   PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
942   PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
943   PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
944   PetscCall(DMSetUp(dmParallel));
945   /* Communicate and renumber cones */
946   PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
947   PetscCall(PetscFree(remoteOffsets));
948   PetscCall(DMPlexGetCones(dm, &cones));
949   if (original) {
950     PetscInt numCones;
951 
952     PetscCall(PetscSectionGetStorageSize(originalConeSection,&numCones));
953     PetscCall(PetscMalloc1(numCones,&globCones));
954     PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
955   } else {
956     globCones = cones;
957   }
958   PetscCall(DMPlexGetCones(dmParallel, &newCones));
959   PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE));
960   PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE));
961   if (original) {
962     PetscCall(PetscFree(globCones));
963   }
964   PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
965   PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
966   if (PetscDefined(USE_DEBUG)) {
967     PetscInt  p;
968     PetscBool valid = PETSC_TRUE;
969     for (p = 0; p < newConesSize; ++p) {
970       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank,p));}
971     }
972     PetscCheck(valid,PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
973   }
974   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg));
975   if (flg) {
976     PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
977     PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
978     PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
979     PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
980     PetscCall(PetscSFView(coneSF, NULL));
981   }
982   PetscCall(DMPlexGetConeOrientations(dm, &cones));
983   PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
984   PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE));
985   PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE));
986   PetscCall(PetscSFDestroy(&coneSF));
987   PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0));
988   /* Create supports and stratify DMPlex */
989   {
990     PetscInt pStart, pEnd;
991 
992     PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
993     PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
994   }
995   PetscCall(DMPlexSymmetrize(dmParallel));
996   PetscCall(DMPlexStratify(dmParallel));
997   {
998     PetscBool useCone, useClosure, useAnchors;
999 
1000     PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1001     PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1002     PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1003     PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1004   }
1005   PetscFunctionReturn(0);
1006 }
1007 
1008 static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1009 {
1010   MPI_Comm         comm;
1011   DM               cdm, cdmParallel;
1012   PetscSection     originalCoordSection, newCoordSection;
1013   Vec              originalCoordinates, newCoordinates;
1014   PetscInt         bs;
1015   PetscBool        isper;
1016   const char      *name;
1017   const PetscReal *maxCell, *L;
1018   const DMBoundaryType *bd;
1019 
1020   PetscFunctionBegin;
1021   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1022   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1023 
1024   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1025   PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1026   PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1027   PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1028   if (originalCoordinates) {
1029     PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1030     PetscCall(PetscObjectGetName((PetscObject) originalCoordinates, &name));
1031     PetscCall(PetscObjectSetName((PetscObject) newCoordinates, name));
1032 
1033     PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1034     PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1035     PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1036     PetscCall(VecSetBlockSize(newCoordinates, bs));
1037     PetscCall(VecDestroy(&newCoordinates));
1038   }
1039   PetscCall(DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd));
1040   PetscCall(DMSetPeriodicity(dmParallel, isper, maxCell, L, bd));
1041   PetscCall(DMGetCoordinateDM(dm, &cdm));
1042   PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1043   PetscCall(DMCopyDisc(cdm, cdmParallel));
1044   PetscFunctionReturn(0);
1045 }
1046 
1047 static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1048 {
1049   DM_Plex         *mesh = (DM_Plex*) dm->data;
1050   MPI_Comm         comm;
1051   DMLabel          depthLabel;
1052   PetscMPIInt      rank;
1053   PetscInt         depth, d, numLabels, numLocalLabels, l;
1054   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1055   PetscObjectState depthState = -1;
1056 
1057   PetscFunctionBegin;
1058   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1059   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1060 
1061   PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0));
1062   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1063   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1064 
1065   /* If the user has changed the depth label, communicate it instead */
1066   PetscCall(DMPlexGetDepth(dm, &depth));
1067   PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1068   if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject) depthLabel, &depthState));
1069   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1070   PetscCall(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm));
1071   if (sendDepth) {
1072     PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1073     PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1074   }
1075   /* Everyone must have either the same number of labels, or none */
1076   PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1077   numLabels = numLocalLabels;
1078   PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1079   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1080   for (l = 0; l < numLabels; ++l) {
1081     DMLabel     label = NULL, labelNew = NULL;
1082     PetscBool   isDepth, lisOutput = PETSC_TRUE, isOutput;
1083     const char *name = NULL;
1084 
1085     if (hasLabels) {
1086       PetscCall(DMGetLabelByNum(dm, l, &label));
1087       /* Skip "depth" because it is recreated */
1088       PetscCall(PetscObjectGetName((PetscObject) label, &name));
1089       PetscCall(PetscStrcmp(name, "depth", &isDepth));
1090     }
1091     PetscCallMPI(MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm));
1092     if (isDepth && !sendDepth) continue;
1093     PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1094     if (isDepth) {
1095       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1096       PetscInt gdepth;
1097 
1098       PetscCall(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1099       PetscCheck(!(depth >= 0) || !(gdepth != depth),PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, depth, gdepth);
1100       for (d = 0; d <= gdepth; ++d) {
1101         PetscBool has;
1102 
1103         PetscCall(DMLabelHasStratum(labelNew, d, &has));
1104         if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1105       }
1106     }
1107     PetscCall(DMAddLabel(dmParallel, labelNew));
1108     /* Put the output flag in the new label */
1109     if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1110     PetscCall(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm));
1111     PetscCall(PetscObjectGetName((PetscObject) labelNew, &name));
1112     PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1113     PetscCall(DMLabelDestroy(&labelNew));
1114   }
1115   PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0));
1116   PetscFunctionReturn(0);
1117 }
1118 
1119 static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1120 {
1121   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1122   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1123   MPI_Comm        comm;
1124   DM              refTree;
1125   PetscSection    origParentSection, newParentSection;
1126   PetscInt        *origParents, *origChildIDs;
1127   PetscBool       flg;
1128 
1129   PetscFunctionBegin;
1130   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1131   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5);
1132   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1133 
1134   /* Set up tree */
1135   PetscCall(DMPlexGetReferenceTree(dm,&refTree));
1136   PetscCall(DMPlexSetReferenceTree(dmParallel,refTree));
1137   PetscCall(DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL));
1138   if (origParentSection) {
1139     PetscInt        pStart, pEnd;
1140     PetscInt        *newParents, *newChildIDs, *globParents;
1141     PetscInt        *remoteOffsetsParents, newParentSize;
1142     PetscSF         parentSF;
1143 
1144     PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1145     PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection));
1146     PetscCall(PetscSectionSetChart(newParentSection,pStart,pEnd));
1147     PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1148     PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1149     PetscCall(PetscFree(remoteOffsetsParents));
1150     PetscCall(PetscSectionGetStorageSize(newParentSection,&newParentSize));
1151     PetscCall(PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs));
1152     if (original) {
1153       PetscInt numParents;
1154 
1155       PetscCall(PetscSectionGetStorageSize(origParentSection,&numParents));
1156       PetscCall(PetscMalloc1(numParents,&globParents));
1157       PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1158     }
1159     else {
1160       globParents = origParents;
1161     }
1162     PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE));
1163     PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE));
1164     if (original) {
1165       PetscCall(PetscFree(globParents));
1166     }
1167     PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE));
1168     PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE));
1169     PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1170     if (PetscDefined(USE_DEBUG)) {
1171       PetscInt  p;
1172       PetscBool valid = PETSC_TRUE;
1173       for (p = 0; p < newParentSize; ++p) {
1174         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p));}
1175       }
1176       PetscCheck(valid,PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1177     }
1178     PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg));
1179     if (flg) {
1180       PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1181       PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1182       PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1183       PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1184       PetscCall(PetscSFView(parentSF, NULL));
1185     }
1186     PetscCall(DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs));
1187     PetscCall(PetscSectionDestroy(&newParentSection));
1188     PetscCall(PetscFree2(newParents,newChildIDs));
1189     PetscCall(PetscSFDestroy(&parentSF));
1190   }
1191   pmesh->useAnchors = mesh->useAnchors;
1192   PetscFunctionReturn(0);
1193 }
1194 
1195 PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1196 {
1197   PetscMPIInt            rank, size;
1198   MPI_Comm               comm;
1199 
1200   PetscFunctionBegin;
1201   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1202   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1203 
1204   /* Create point SF for parallel mesh */
1205   PetscCall(PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0));
1206   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1207   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1208   PetscCallMPI(MPI_Comm_size(comm, &size));
1209   {
1210     const PetscInt *leaves;
1211     PetscSFNode    *remotePoints, *rowners, *lowners;
1212     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1213     PetscInt        pStart, pEnd;
1214 
1215     PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1216     PetscCall(PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL));
1217     PetscCall(PetscMalloc2(numRoots,&rowners,numLeaves,&lowners));
1218     for (p=0; p<numRoots; p++) {
1219       rowners[p].rank  = -1;
1220       rowners[p].index = -1;
1221     }
1222     PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1223     PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1224     for (p = 0; p < numLeaves; ++p) {
1225       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1226         lowners[p].rank  = rank;
1227         lowners[p].index = leaves ? leaves[p] : p;
1228       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1229         lowners[p].rank  = -2;
1230         lowners[p].index = -2;
1231       }
1232     }
1233     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1234       rowners[p].rank  = -3;
1235       rowners[p].index = -3;
1236     }
1237     PetscCall(PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC));
1238     PetscCall(PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC));
1239     PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1240     PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1241     for (p = 0; p < numLeaves; ++p) {
1242       PetscCheck(lowners[p].rank >= 0 && lowners[p].index >= 0,PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1243       if (lowners[p].rank != rank) ++numGhostPoints;
1244     }
1245     PetscCall(PetscMalloc1(numGhostPoints, &ghostPoints));
1246     PetscCall(PetscMalloc1(numGhostPoints, &remotePoints));
1247     for (p = 0, gp = 0; p < numLeaves; ++p) {
1248       if (lowners[p].rank != rank) {
1249         ghostPoints[gp]        = leaves ? leaves[p] : p;
1250         remotePoints[gp].rank  = lowners[p].rank;
1251         remotePoints[gp].index = lowners[p].index;
1252         ++gp;
1253       }
1254     }
1255     PetscCall(PetscFree2(rowners,lowners));
1256     PetscCall(PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER));
1257     PetscCall(PetscSFSetFromOptions((dmParallel)->sf));
1258   }
1259   {
1260     PetscBool useCone, useClosure, useAnchors;
1261 
1262     PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1263     PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1264     PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1265     PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1266   }
1267   PetscCall(PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0));
1268   PetscFunctionReturn(0);
1269 }
1270 
1271 /*@
1272   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?
1273 
1274   Input Parameters:
1275 + dm - The DMPlex object
1276 - flg - Balance the partition?
1277 
1278   Level: intermediate
1279 
1280 .seealso: `DMPlexDistribute()`, `DMPlexGetPartitionBalance()`
1281 @*/
1282 PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1283 {
1284   DM_Plex *mesh = (DM_Plex *)dm->data;
1285 
1286   PetscFunctionBegin;
1287   mesh->partitionBalance = flg;
1288   PetscFunctionReturn(0);
1289 }
1290 
1291 /*@
1292   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?
1293 
1294   Input Parameter:
1295 . dm - The DMPlex object
1296 
1297   Output Parameter:
1298 . flg - Balance the partition?
1299 
1300   Level: intermediate
1301 
1302 .seealso: `DMPlexDistribute()`, `DMPlexSetPartitionBalance()`
1303 @*/
1304 PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1305 {
1306   DM_Plex *mesh = (DM_Plex *)dm->data;
1307 
1308   PetscFunctionBegin;
1309   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1310   PetscValidBoolPointer(flg, 2);
1311   *flg = mesh->partitionBalance;
1312   PetscFunctionReturn(0);
1313 }
1314 
1315 typedef struct {
1316   PetscInt vote, rank, index;
1317 } Petsc3Int;
1318 
1319 /* MaxLoc, but carry a third piece of information around */
1320 static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1321 {
1322   Petsc3Int *a = (Petsc3Int *)inout_;
1323   Petsc3Int *b = (Petsc3Int *)in_;
1324   PetscInt i, len = *len_;
1325   for (i = 0; i < len; i++) {
1326     if (a[i].vote < b[i].vote) {
1327       a[i].vote = b[i].vote;
1328       a[i].rank = b[i].rank;
1329       a[i].index = b[i].index;
1330     } else if (a[i].vote <= b[i].vote) {
1331       if (a[i].rank >= b[i].rank) {
1332         a[i].rank = b[i].rank;
1333         a[i].index = b[i].index;
1334       }
1335     }
1336   }
1337 }
1338 
1339 /*@C
1340   DMPlexCreatePointSF - Build a point SF from an SF describing a point migration
1341 
1342   Input Parameters:
1343 + dm          - The source DMPlex object
1344 . migrationSF - The star forest that describes the parallel point remapping
1345 - ownership   - Flag causing a vote to determine point ownership
1346 
1347   Output Parameter:
1348 . pointSF     - The star forest describing the point overlap in the remapped DM
1349 
1350   Notes:
1351   Output pointSF is guaranteed to have the array of local indices (leaves) sorted.
1352 
1353   Level: developer
1354 
1355 .seealso: `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1356 @*/
1357 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1358 {
1359   PetscMPIInt        rank, size;
1360   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1361   PetscInt          *pointLocal;
1362   const PetscInt    *leaves;
1363   const PetscSFNode *roots;
1364   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1365   Vec                shifts;
1366   const PetscInt     numShifts = 13759;
1367   const PetscScalar *shift = NULL;
1368   const PetscBool    shiftDebug = PETSC_FALSE;
1369   PetscBool          balance;
1370 
1371   PetscFunctionBegin;
1372   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1373   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank));
1374   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size));
1375   PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0));
1376 
1377   PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1378   PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1379   PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1380   if (ownership) {
1381     MPI_Op       op;
1382     MPI_Datatype datatype;
1383     Petsc3Int   *rootVote = NULL, *leafVote = NULL;
1384     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1385     if (balance) {
1386       PetscRandom r;
1387 
1388       PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1389       PetscCall(PetscRandomSetInterval(r, 0, 2467*size));
1390       PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1391       PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1392       PetscCall(VecSetType(shifts, VECSTANDARD));
1393       PetscCall(VecSetRandom(shifts, r));
1394       PetscCall(PetscRandomDestroy(&r));
1395       PetscCall(VecGetArrayRead(shifts, &shift));
1396     }
1397 
1398     PetscCall(PetscMalloc1(nroots, &rootVote));
1399     PetscCall(PetscMalloc1(nleaves, &leafVote));
1400     /* Point ownership vote: Process with highest rank owns shared points */
1401     for (p = 0; p < nleaves; ++p) {
1402       if (shiftDebug) {
1403         PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size));
1404       }
1405       /* Either put in a bid or we know we own it */
1406       leafVote[p].vote  = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1407       leafVote[p].rank = rank;
1408       leafVote[p].index = p;
1409     }
1410     for (p = 0; p < nroots; p++) {
1411       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1412       rootVote[p].vote  = -3;
1413       rootVote[p].rank  = -3;
1414       rootVote[p].index = -3;
1415     }
1416     PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1417     PetscCallMPI(MPI_Type_commit(&datatype));
1418     PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1419     PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1420     PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1421     PetscCallMPI(MPI_Op_free(&op));
1422     PetscCallMPI(MPI_Type_free(&datatype));
1423     for (p = 0; p < nroots; p++) {
1424       rootNodes[p].rank = rootVote[p].rank;
1425       rootNodes[p].index = rootVote[p].index;
1426     }
1427     PetscCall(PetscFree(leafVote));
1428     PetscCall(PetscFree(rootVote));
1429   } else {
1430     for (p = 0; p < nroots; p++) {
1431       rootNodes[p].index = -1;
1432       rootNodes[p].rank = rank;
1433     }
1434     for (p = 0; p < nleaves; p++) {
1435       /* Write new local id into old location */
1436       if (roots[p].rank == rank) {
1437         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1438       }
1439     }
1440   }
1441   PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE));
1442   PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE));
1443 
1444   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1445     if (leafNodes[p].rank != rank) npointLeaves++;
1446   }
1447   PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1448   PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1449   for (idx = 0, p = 0; p < nleaves; p++) {
1450     if (leafNodes[p].rank != rank) {
1451       /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1452       pointLocal[idx] = p;
1453       pointRemote[idx] = leafNodes[p];
1454       idx++;
1455     }
1456   }
1457   if (shift) {
1458     PetscCall(VecRestoreArrayRead(shifts, &shift));
1459     PetscCall(VecDestroy(&shifts));
1460   }
1461   if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT));
1462   PetscCall(PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF));
1463   PetscCall(PetscSFSetFromOptions(*pointSF));
1464   PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1465   PetscCall(PetscFree2(rootNodes, leafNodes));
1466   PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0));
1467   if (PetscDefined(USE_DEBUG)) PetscCall(DMPlexCheckPointSF(dm, *pointSF));
1468   PetscFunctionReturn(0);
1469 }
1470 
1471 /*@C
1472   DMPlexMigrate  - Migrates internal DM data over the supplied star forest
1473 
1474   Collective on dm
1475 
1476   Input Parameters:
1477 + dm       - The source DMPlex object
1478 - sf       - The star forest communication context describing the migration pattern
1479 
1480   Output Parameter:
1481 . targetDM - The target DMPlex object
1482 
1483   Level: intermediate
1484 
1485 .seealso: `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1486 @*/
1487 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1488 {
1489   MPI_Comm               comm;
1490   PetscInt               dim, cdim, nroots;
1491   PetscSF                sfPoint;
1492   ISLocalToGlobalMapping ltogMigration;
1493   ISLocalToGlobalMapping ltogOriginal = NULL;
1494 
1495   PetscFunctionBegin;
1496   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1497   PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1498   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
1499   PetscCall(DMGetDimension(dm, &dim));
1500   PetscCall(DMSetDimension(targetDM, dim));
1501   PetscCall(DMGetCoordinateDim(dm, &cdim));
1502   PetscCall(DMSetCoordinateDim(targetDM, cdim));
1503 
1504   /* Check for a one-to-all distribution pattern */
1505   PetscCall(DMGetPointSF(dm, &sfPoint));
1506   PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1507   if (nroots >= 0) {
1508     IS        isOriginal;
1509     PetscInt  n, size, nleaves;
1510     PetscInt  *numbering_orig, *numbering_new;
1511 
1512     /* Get the original point numbering */
1513     PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1514     PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal));
1515     PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1516     PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig));
1517     /* Convert to positive global numbers */
1518     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1519     /* Derive the new local-to-global mapping from the old one */
1520     PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1521     PetscCall(PetscMalloc1(nleaves, &numbering_new));
1522     PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE));
1523     PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE));
1524     PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, &ltogMigration));
1525     PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig));
1526     PetscCall(ISDestroy(&isOriginal));
1527   } else {
1528     /* One-to-all distribution pattern: We can derive LToG from SF */
1529     PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration));
1530   }
1531   PetscCall(PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration"));
1532   PetscCall(ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view"));
1533   /* Migrate DM data to target DM */
1534   PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1535   PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1536   PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1537   PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1538   PetscCall(ISLocalToGlobalMappingDestroy(&ltogOriginal));
1539   PetscCall(ISLocalToGlobalMappingDestroy(&ltogMigration));
1540   PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1541   PetscFunctionReturn(0);
1542 }
1543 
1544 /*@C
1545   DMPlexDistribute - Distributes the mesh and any associated sections.
1546 
1547   Collective on dm
1548 
1549   Input Parameters:
1550 + dm  - The original DMPlex object
1551 - overlap - The overlap of partitions, 0 is the default
1552 
1553   Output Parameters:
1554 + sf - The PetscSF used for point distribution, or NULL if not needed
1555 - dmParallel - The distributed DMPlex object
1556 
1557   Note: If the mesh was not distributed, the output dmParallel will be NULL.
1558 
1559   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1560   representation on the mesh.
1561 
1562   Level: intermediate
1563 
1564 .seealso: `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()`
1565 @*/
1566 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1567 {
1568   MPI_Comm               comm;
1569   PetscPartitioner       partitioner;
1570   IS                     cellPart;
1571   PetscSection           cellPartSection;
1572   DM                     dmCoord;
1573   DMLabel                lblPartition, lblMigration;
1574   PetscSF                sfMigration, sfStratified, sfPoint;
1575   PetscBool              flg, balance;
1576   PetscMPIInt            rank, size;
1577 
1578   PetscFunctionBegin;
1579   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1580   PetscValidLogicalCollectiveInt(dm, overlap, 2);
1581   if (sf) PetscValidPointer(sf,3);
1582   PetscValidPointer(dmParallel,4);
1583 
1584   if (sf) *sf = NULL;
1585   *dmParallel = NULL;
1586   PetscCall(PetscObjectGetComm((PetscObject)dm,&comm));
1587   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1588   PetscCallMPI(MPI_Comm_size(comm, &size));
1589   if (size == 1) PetscFunctionReturn(0);
1590 
1591   PetscCall(PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0));
1592   /* Create cell partition */
1593   PetscCall(PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0));
1594   PetscCall(PetscSectionCreate(comm, &cellPartSection));
1595   PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1596   PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1597   PetscCall(PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0));
1598   {
1599     /* Convert partition to DMLabel */
1600     IS             is;
1601     PetscHSetI     ht;
1602     const PetscInt *points;
1603     PetscInt       *iranks;
1604     PetscInt       pStart, pEnd, proc, npoints, poff = 0, nranks;
1605 
1606     PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1607     /* Preallocate strata */
1608     PetscCall(PetscHSetICreate(&ht));
1609     PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1610     for (proc = pStart; proc < pEnd; proc++) {
1611       PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1612       if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1613     }
1614     PetscCall(PetscHSetIGetSize(ht, &nranks));
1615     PetscCall(PetscMalloc1(nranks, &iranks));
1616     PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1617     PetscCall(PetscHSetIDestroy(&ht));
1618     PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1619     PetscCall(PetscFree(iranks));
1620     /* Inline DMPlexPartitionLabelClosure() */
1621     PetscCall(ISGetIndices(cellPart, &points));
1622     PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1623     for (proc = pStart; proc < pEnd; proc++) {
1624       PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1625       if (!npoints) continue;
1626       PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1627       PetscCall(DMPlexClosurePoints_Private(dm, npoints, points+poff, &is));
1628       PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1629       PetscCall(ISDestroy(&is));
1630     }
1631     PetscCall(ISRestoreIndices(cellPart, &points));
1632   }
1633   PetscCall(PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0));
1634 
1635   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1636   PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1637   PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration));
1638   PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1639   PetscCall(PetscSFDestroy(&sfMigration));
1640   sfMigration = sfStratified;
1641   PetscCall(PetscSFSetUp(sfMigration));
1642   PetscCall(PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0));
1643   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg));
1644   if (flg) {
1645     PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1646     PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1647   }
1648 
1649   /* Create non-overlapping parallel DM and migrate internal data */
1650   PetscCall(DMPlexCreate(comm, dmParallel));
1651   PetscCall(PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh"));
1652   PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1653 
1654   /* Build the point SF without overlap */
1655   PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1656   PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1657   PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1658   PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1659   PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1660   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1661   if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1662 
1663   if (overlap > 0) {
1664     DM                 dmOverlap;
1665     PetscInt           nroots, nleaves, noldleaves, l;
1666     const PetscInt    *oldLeaves;
1667     PetscSFNode       *newRemote, *permRemote;
1668     const PetscSFNode *oldRemote;
1669     PetscSF            sfOverlap, sfOverlapPoint;
1670 
1671     /* Add the partition overlap to the distributed DM */
1672     PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1673     PetscCall(DMDestroy(dmParallel));
1674     *dmParallel = dmOverlap;
1675     if (flg) {
1676       PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1677       PetscCall(PetscSFView(sfOverlap, NULL));
1678     }
1679 
1680     /* Re-map the migration SF to establish the full migration pattern */
1681     PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1682     PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1683     PetscCall(PetscMalloc1(nleaves, &newRemote));
1684     /* oldRemote: original root point mapping to original leaf point
1685        newRemote: original leaf point mapping to overlapped leaf point */
1686     if (oldLeaves) {
1687       /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1688       PetscCall(PetscMalloc1(noldleaves, &permRemote));
1689       for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1690       oldRemote = permRemote;
1691     }
1692     PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE));
1693     PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE));
1694     if (oldLeaves) PetscCall(PetscFree(oldRemote));
1695     PetscCall(PetscSFCreate(comm, &sfOverlapPoint));
1696     PetscCall(PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1697     PetscCall(PetscSFDestroy(&sfOverlap));
1698     PetscCall(PetscSFDestroy(&sfMigration));
1699     sfMigration = sfOverlapPoint;
1700   }
1701   /* Cleanup Partition */
1702   PetscCall(DMLabelDestroy(&lblPartition));
1703   PetscCall(DMLabelDestroy(&lblMigration));
1704   PetscCall(PetscSectionDestroy(&cellPartSection));
1705   PetscCall(ISDestroy(&cellPart));
1706   /* Copy discretization */
1707   PetscCall(DMCopyDisc(dm, *dmParallel));
1708   /* Create sfNatural */
1709   if (dm->useNatural) {
1710     PetscSection section;
1711 
1712     PetscCall(DMGetLocalSection(dm, &section));
1713     PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
1714     PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
1715   }
1716   PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel));
1717   /* Cleanup */
1718   if (sf) {*sf = sfMigration;}
1719   else    PetscCall(PetscSFDestroy(&sfMigration));
1720   PetscCall(PetscSFDestroy(&sfPoint));
1721   PetscCall(PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0));
1722   PetscFunctionReturn(0);
1723 }
1724 
1725 /*@C
1726   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.
1727 
1728   Collective on dm
1729 
1730   Input Parameters:
1731 + dm  - The non-overlapping distributed DMPlex object
1732 - overlap - The overlap of partitions (the same on all ranks)
1733 
1734   Output Parameters:
1735 + sf - The PetscSF used for point distribution
1736 - dmOverlap - The overlapping distributed DMPlex object, or NULL
1737 
1738   Notes:
1739   If the mesh was not distributed, the return value is NULL.
1740 
1741   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1742   representation on the mesh.
1743 
1744   Level: advanced
1745 
1746 .seealso: `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()`
1747 @*/
1748 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1749 {
1750   MPI_Comm               comm;
1751   PetscMPIInt            size, rank;
1752   PetscSection           rootSection, leafSection;
1753   IS                     rootrank, leafrank;
1754   DM                     dmCoord;
1755   DMLabel                lblOverlap;
1756   PetscSF                sfOverlap, sfStratified, sfPoint;
1757 
1758   PetscFunctionBegin;
1759   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1760   PetscValidLogicalCollectiveInt(dm, overlap, 2);
1761   if (sf) PetscValidPointer(sf, 3);
1762   PetscValidPointer(dmOverlap, 4);
1763 
1764   if (sf) *sf = NULL;
1765   *dmOverlap  = NULL;
1766   PetscCall(PetscObjectGetComm((PetscObject)dm,&comm));
1767   PetscCallMPI(MPI_Comm_size(comm, &size));
1768   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1769   if (size == 1) PetscFunctionReturn(0);
1770 
1771   PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1772   /* Compute point overlap with neighbouring processes on the distributed DM */
1773   PetscCall(PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0));
1774   PetscCall(PetscSectionCreate(comm, &rootSection));
1775   PetscCall(PetscSectionCreate(comm, &leafSection));
1776   PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
1777   PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
1778   /* Convert overlap label to stratified migration SF */
1779   PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap));
1780   PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
1781   PetscCall(PetscSFDestroy(&sfOverlap));
1782   sfOverlap = sfStratified;
1783   PetscCall(PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF"));
1784   PetscCall(PetscSFSetFromOptions(sfOverlap));
1785 
1786   PetscCall(PetscSectionDestroy(&rootSection));
1787   PetscCall(PetscSectionDestroy(&leafSection));
1788   PetscCall(ISDestroy(&rootrank));
1789   PetscCall(ISDestroy(&leafrank));
1790   PetscCall(PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0));
1791 
1792   /* Build the overlapping DM */
1793   PetscCall(DMPlexCreate(comm, dmOverlap));
1794   PetscCall(PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh"));
1795   PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
1796   /* Store the overlap in the new DM */
1797   PetscCall(DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap));
1798   /* Build the new point SF */
1799   PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
1800   PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
1801   PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
1802   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1803   PetscCall(PetscSFDestroy(&sfPoint));
1804   /* Cleanup overlap partition */
1805   PetscCall(DMLabelDestroy(&lblOverlap));
1806   if (sf) *sf = sfOverlap;
1807   else    PetscCall(PetscSFDestroy(&sfOverlap));
1808   PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1809   PetscFunctionReturn(0);
1810 }
1811 
1812 PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1813 {
1814   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1815 
1816   PetscFunctionBegin;
1817   *overlap = mesh->overlap;
1818   PetscFunctionReturn(0);
1819 }
1820 
1821 PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap)
1822 {
1823   DM_Plex        *mesh=NULL;
1824   DM_Plex        *meshSrc=NULL;
1825 
1826   PetscFunctionBegin;
1827   PetscValidHeaderSpecificType(dm, DM_CLASSID, 1, DMPLEX);
1828   mesh = (DM_Plex*) dm->data;
1829   mesh->overlap = overlap;
1830   if (dmSrc) {
1831     PetscValidHeaderSpecificType(dmSrc, DM_CLASSID, 2, DMPLEX);
1832     meshSrc = (DM_Plex*) dmSrc->data;
1833     mesh->overlap += meshSrc->overlap;
1834   }
1835   PetscFunctionReturn(0);
1836 }
1837 
1838 /*@
1839   DMPlexGetOverlap - Get the DMPlex partition overlap.
1840 
1841   Not collective
1842 
1843   Input Parameter:
1844 . dm - The DM
1845 
1846   Output Parameter:
1847 . overlap - The overlap of this DM
1848 
1849   Level: intermediate
1850 
1851 .seealso: `DMPlexDistribute()`, `DMPlexDistributeOverlap()`, `DMPlexCreateOverlapLabel()`
1852 @*/
1853 PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
1854 {
1855   PetscFunctionBegin;
1856   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1857   PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap));
1858   PetscFunctionReturn(0);
1859 }
1860 
1861 PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
1862 {
1863   DM_Plex *mesh = (DM_Plex *) dm->data;
1864 
1865   PetscFunctionBegin;
1866   mesh->distDefault = dist;
1867   PetscFunctionReturn(0);
1868 }
1869 
1870 /*@
1871   DMPlexDistributeSetDefault - Set flag indicating whether the DM should be distributed by default
1872 
1873   Logically collective
1874 
1875   Input Parameters:
1876 + dm   - The DM
1877 - dist - Flag for distribution
1878 
1879   Level: intermediate
1880 
1881 .seealso: `DMDistributeGetDefault()`, `DMPlexDistribute()`
1882 @*/
1883 PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
1884 {
1885   PetscFunctionBegin;
1886   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1887   PetscValidLogicalCollectiveBool(dm, dist, 2);
1888   PetscTryMethod(dm,"DMPlexDistributeSetDefault_C",(DM,PetscBool),(dm,dist));
1889   PetscFunctionReturn(0);
1890 }
1891 
1892 PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
1893 {
1894   DM_Plex *mesh = (DM_Plex *) dm->data;
1895 
1896   PetscFunctionBegin;
1897   *dist = mesh->distDefault;
1898   PetscFunctionReturn(0);
1899 }
1900 
1901 /*@
1902   DMPlexDistributeGetDefault - Get flag indicating whether the DM should be distributed by default
1903 
1904   Not collective
1905 
1906   Input Parameter:
1907 . dm   - The DM
1908 
1909   Output Parameter:
1910 . dist - Flag for distribution
1911 
1912   Level: intermediate
1913 
1914 .seealso: `DMDistributeSetDefault()`, `DMPlexDistribute()`
1915 @*/
1916 PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
1917 {
1918   PetscFunctionBegin;
1919   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1920   PetscValidBoolPointer(dist, 2);
1921   PetscUseMethod(dm,"DMPlexDistributeGetDefault_C",(DM,PetscBool*),(dm,dist));
1922   PetscFunctionReturn(0);
1923 }
1924 
1925 /*@C
1926   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1927   root process of the original's communicator.
1928 
1929   Collective on dm
1930 
1931   Input Parameter:
1932 . dm - the original DMPlex object
1933 
1934   Output Parameters:
1935 + sf - the PetscSF used for point distribution (optional)
1936 - gatherMesh - the gathered DM object, or NULL
1937 
1938   Level: intermediate
1939 
1940 .seealso: `DMPlexDistribute()`, `DMPlexGetRedundantDM()`
1941 @*/
1942 PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1943 {
1944   MPI_Comm       comm;
1945   PetscMPIInt    size;
1946   PetscPartitioner oldPart, gatherPart;
1947 
1948   PetscFunctionBegin;
1949   PetscValidHeaderSpecific(dm,DM_CLASSID,1);
1950   PetscValidPointer(gatherMesh,3);
1951   *gatherMesh = NULL;
1952   if (sf) *sf = NULL;
1953   comm = PetscObjectComm((PetscObject)dm);
1954   PetscCallMPI(MPI_Comm_size(comm,&size));
1955   if (size == 1) PetscFunctionReturn(0);
1956   PetscCall(DMPlexGetPartitioner(dm,&oldPart));
1957   PetscCall(PetscObjectReference((PetscObject)oldPart));
1958   PetscCall(PetscPartitionerCreate(comm,&gatherPart));
1959   PetscCall(PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER));
1960   PetscCall(DMPlexSetPartitioner(dm,gatherPart));
1961   PetscCall(DMPlexDistribute(dm,0,sf,gatherMesh));
1962 
1963   PetscCall(DMPlexSetPartitioner(dm,oldPart));
1964   PetscCall(PetscPartitionerDestroy(&gatherPart));
1965   PetscCall(PetscPartitionerDestroy(&oldPart));
1966   PetscFunctionReturn(0);
1967 }
1968 
1969 /*@C
1970   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1971 
1972   Collective on dm
1973 
1974   Input Parameter:
1975 . dm - the original DMPlex object
1976 
1977   Output Parameters:
1978 + sf - the PetscSF used for point distribution (optional)
1979 - redundantMesh - the redundant DM object, or NULL
1980 
1981   Level: intermediate
1982 
1983 .seealso: `DMPlexDistribute()`, `DMPlexGetGatherDM()`
1984 @*/
1985 PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1986 {
1987   MPI_Comm       comm;
1988   PetscMPIInt    size, rank;
1989   PetscInt       pStart, pEnd, p;
1990   PetscInt       numPoints = -1;
1991   PetscSF        migrationSF, sfPoint, gatherSF;
1992   DM             gatherDM, dmCoord;
1993   PetscSFNode    *points;
1994 
1995   PetscFunctionBegin;
1996   PetscValidHeaderSpecific(dm,DM_CLASSID,1);
1997   PetscValidPointer(redundantMesh,3);
1998   *redundantMesh = NULL;
1999   comm = PetscObjectComm((PetscObject)dm);
2000   PetscCallMPI(MPI_Comm_size(comm,&size));
2001   if (size == 1) {
2002     PetscCall(PetscObjectReference((PetscObject) dm));
2003     *redundantMesh = dm;
2004     if (sf) *sf = NULL;
2005     PetscFunctionReturn(0);
2006   }
2007   PetscCall(DMPlexGetGatherDM(dm,&gatherSF,&gatherDM));
2008   if (!gatherDM) PetscFunctionReturn(0);
2009   PetscCallMPI(MPI_Comm_rank(comm,&rank));
2010   PetscCall(DMPlexGetChart(gatherDM,&pStart,&pEnd));
2011   numPoints = pEnd - pStart;
2012   PetscCallMPI(MPI_Bcast(&numPoints,1,MPIU_INT,0,comm));
2013   PetscCall(PetscMalloc1(numPoints,&points));
2014   PetscCall(PetscSFCreate(comm,&migrationSF));
2015   for (p = 0; p < numPoints; p++) {
2016     points[p].index = p;
2017     points[p].rank  = 0;
2018   }
2019   PetscCall(PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER));
2020   PetscCall(DMPlexCreate(comm, redundantMesh));
2021   PetscCall(PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh"));
2022   PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2023   /* This is to express that all point are in overlap */
2024   PetscCall(DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_MAX_INT));
2025   PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2026   PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2027   PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2028   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2029   PetscCall(PetscSFDestroy(&sfPoint));
2030   if (sf) {
2031     PetscSF tsf;
2032 
2033     PetscCall(PetscSFCompose(gatherSF,migrationSF,&tsf));
2034     PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2035     PetscCall(PetscSFDestroy(&tsf));
2036   }
2037   PetscCall(PetscSFDestroy(&migrationSF));
2038   PetscCall(PetscSFDestroy(&gatherSF));
2039   PetscCall(DMDestroy(&gatherDM));
2040   PetscCall(DMCopyDisc(dm, *redundantMesh));
2041   PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh));
2042   PetscFunctionReturn(0);
2043 }
2044 
2045 /*@
2046   DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.
2047 
2048   Collective
2049 
2050   Input Parameter:
2051 . dm      - The DM object
2052 
2053   Output Parameter:
2054 . distributed - Flag whether the DM is distributed
2055 
2056   Level: intermediate
2057 
2058   Notes:
2059   This currently finds out whether at least two ranks have any DAG points.
2060   This involves MPI_Allreduce() with one integer.
2061   The result is currently not stashed so every call to this routine involves this global communication.
2062 
2063 .seealso: `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()`
2064 @*/
2065 PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2066 {
2067   PetscInt          pStart, pEnd, count;
2068   MPI_Comm          comm;
2069   PetscMPIInt       size;
2070 
2071   PetscFunctionBegin;
2072   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2073   PetscValidBoolPointer(distributed,2);
2074   PetscCall(PetscObjectGetComm((PetscObject)dm,&comm));
2075   PetscCallMPI(MPI_Comm_size(comm,&size));
2076   if (size == 1) { *distributed = PETSC_FALSE; PetscFunctionReturn(0); }
2077   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2078   count = (pEnd - pStart) > 0 ? 1 : 0;
2079   PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2080   *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2081   PetscFunctionReturn(0);
2082 }
2083