xref: /petsc/src/dm/impls/plex/plexdistribute.c (revision ffa8c5705e8ab2cf85ee1d14dbe507a6e2eb5283)
1 #include <petsc/private/dmpleximpl.h>    /*I      "petscdmplex.h"   I*/
2 #include <petsc/private/dmlabelimpl.h>   /*I      "petscdmlabel.h"  I*/
3 
4 /*@C
5   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
6 
7   Input Parameters:
8 + dm      - The DM object
9 . user    - The user callback, may be NULL (to clear the callback)
10 - ctx     - context for callback evaluation, may be NULL
11 
12   Level: advanced
13 
14   Notes:
15      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.
16 
17      Any setting here overrides other configuration of DMPlex adjacency determination.
18 
19 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
20 @*/
21 PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
22 {
23   DM_Plex *mesh = (DM_Plex *)dm->data;
24 
25   PetscFunctionBegin;
26   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
27   mesh->useradjacency = user;
28   mesh->useradjacencyctx = ctx;
29   PetscFunctionReturn(0);
30 }
31 
32 /*@C
33   DMPlexGetAdjacencyUser - get the user-defined adjacency callback
34 
35   Input Parameter:
36 . dm      - The DM object
37 
38   Output Parameters:
39 + user    - The user callback
40 - ctx     - context for callback evaluation
41 
42   Level: advanced
43 
44 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
45 @*/
46 PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
47 {
48   DM_Plex *mesh = (DM_Plex *)dm->data;
49 
50   PetscFunctionBegin;
51   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
52   if (user) *user = mesh->useradjacency;
53   if (ctx) *ctx = mesh->useradjacencyctx;
54   PetscFunctionReturn(0);
55 }
56 
57 /*@
58   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
59 
60   Input Parameters:
61 + dm      - The DM object
62 - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
63 
64   Level: intermediate
65 
66 .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
67 @*/
68 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
69 {
70   DM_Plex *mesh = (DM_Plex *) dm->data;
71 
72   PetscFunctionBegin;
73   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
74   mesh->useAnchors = useAnchors;
75   PetscFunctionReturn(0);
76 }
77 
78 /*@
79   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
80 
81   Input Parameter:
82 . dm      - The DM object
83 
84   Output Parameter:
85 . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
86 
87   Level: intermediate
88 
89 .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
90 @*/
91 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
92 {
93   DM_Plex *mesh = (DM_Plex *) dm->data;
94 
95   PetscFunctionBegin;
96   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
97   PetscValidBoolPointer(useAnchors, 2);
98   *useAnchors = mesh->useAnchors;
99   PetscFunctionReturn(0);
100 }
101 
102 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103 {
104   const PetscInt *cone = NULL;
105   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106 
107   PetscFunctionBeginHot;
108   PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
109   PetscCall(DMPlexGetCone(dm, p, &cone));
110   for (c = 0; c <= coneSize; ++c) {
111     const PetscInt  point   = !c ? p : cone[c-1];
112     const PetscInt *support = NULL;
113     PetscInt        supportSize, s, q;
114 
115     PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
116     PetscCall(DMPlexGetSupport(dm, point, &support));
117     for (s = 0; s < supportSize; ++s) {
118       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
119         if (support[s] == adj[q]) break;
120       }
121       PetscCheckFalse(numAdj > maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
122     }
123   }
124   *adjSize = numAdj;
125   PetscFunctionReturn(0);
126 }
127 
128 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
129 {
130   const PetscInt *support = NULL;
131   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
132 
133   PetscFunctionBeginHot;
134   PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
135   PetscCall(DMPlexGetSupport(dm, p, &support));
136   for (s = 0; s <= supportSize; ++s) {
137     const PetscInt  point = !s ? p : support[s-1];
138     const PetscInt *cone  = NULL;
139     PetscInt        coneSize, c, q;
140 
141     PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
142     PetscCall(DMPlexGetCone(dm, point, &cone));
143     for (c = 0; c < coneSize; ++c) {
144       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
145         if (cone[c] == adj[q]) break;
146       }
147       PetscCheckFalse(numAdj > maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
148     }
149   }
150   *adjSize = numAdj;
151   PetscFunctionReturn(0);
152 }
153 
154 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
155 {
156   PetscInt      *star = NULL;
157   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;
158 
159   PetscFunctionBeginHot;
160   PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
161   for (s = 0; s < starSize*2; s += 2) {
162     const PetscInt *closure = NULL;
163     PetscInt        closureSize, c, q;
164 
165     PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure));
166     for (c = 0; c < closureSize*2; c += 2) {
167       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
168         if (closure[c] == adj[q]) break;
169       }
170       PetscCheckFalse(numAdj > maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
171     }
172     PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure));
173   }
174   PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
175   *adjSize = numAdj;
176   PetscFunctionReturn(0);
177 }
178 
179 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
180 {
181   static PetscInt asiz = 0;
182   PetscInt maxAnchors = 1;
183   PetscInt aStart = -1, aEnd = -1;
184   PetscInt maxAdjSize;
185   PetscSection aSec = NULL;
186   IS aIS = NULL;
187   const PetscInt *anchors;
188   DM_Plex *mesh = (DM_Plex *)dm->data;
189 
190   PetscFunctionBeginHot;
191   if (useAnchors) {
192     PetscCall(DMPlexGetAnchors(dm,&aSec,&aIS));
193     if (aSec) {
194       PetscCall(PetscSectionGetMaxDof(aSec,&maxAnchors));
195       maxAnchors = PetscMax(1,maxAnchors);
196       PetscCall(PetscSectionGetChart(aSec,&aStart,&aEnd));
197       PetscCall(ISGetIndices(aIS,&anchors));
198     }
199   }
200   if (!*adj) {
201     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
202 
203     PetscCall(DMPlexGetChart(dm, &pStart,&pEnd));
204     PetscCall(DMPlexGetDepth(dm, &depth));
205     depth = PetscMax(depth, -depth);
206     PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
207     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
208     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
209     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
210     asiz *= maxAnchors;
211     asiz  = PetscMin(asiz,pEnd-pStart);
212     PetscCall(PetscMalloc1(asiz,adj));
213   }
214   if (*adjSize < 0) *adjSize = asiz;
215   maxAdjSize = *adjSize;
216   if (mesh->useradjacency) {
217     PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
218   } else if (useTransitiveClosure) {
219     PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
220   } else if (useCone) {
221     PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
222   } else {
223     PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
224   }
225   if (useAnchors && aSec) {
226     PetscInt origSize = *adjSize;
227     PetscInt numAdj = origSize;
228     PetscInt i = 0, j;
229     PetscInt *orig = *adj;
230 
231     while (i < origSize) {
232       PetscInt p = orig[i];
233       PetscInt aDof = 0;
234 
235       if (p >= aStart && p < aEnd) {
236         PetscCall(PetscSectionGetDof(aSec,p,&aDof));
237       }
238       if (aDof) {
239         PetscInt aOff;
240         PetscInt s, q;
241 
242         for (j = i + 1; j < numAdj; j++) {
243           orig[j - 1] = orig[j];
244         }
245         origSize--;
246         numAdj--;
247         PetscCall(PetscSectionGetOffset(aSec,p,&aOff));
248         for (s = 0; s < aDof; ++s) {
249           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
250             if (anchors[aOff+s] == orig[q]) break;
251           }
252           PetscCheckFalse(numAdj > maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
253         }
254       }
255       else {
256         i++;
257       }
258     }
259     *adjSize = numAdj;
260     PetscCall(ISRestoreIndices(aIS,&anchors));
261   }
262   PetscFunctionReturn(0);
263 }
264 
265 /*@
266   DMPlexGetAdjacency - Return all points adjacent to the given point
267 
268   Input Parameters:
269 + dm - The DM object
270 - p  - The point
271 
272   Input/Output Parameters:
273 + adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE;
274             on output the number of adjacent points
275 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize;
276         on output contains the adjacent points
277 
278   Level: advanced
279 
280   Notes:
281     The user must PetscFree the adj array if it was not passed in.
282 
283 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
284 @*/
285 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
286 {
287   PetscBool      useCone, useClosure, useAnchors;
288 
289   PetscFunctionBeginHot;
290   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
291   PetscValidIntPointer(adjSize,3);
292   PetscValidPointer(adj,4);
293   PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
294   PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
295   PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
296   PetscFunctionReturn(0);
297 }
298 
299 /*@
300   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
301 
302   Collective on dm
303 
304   Input Parameters:
305 + dm      - The DM
306 . sfPoint - The PetscSF which encodes point connectivity
307 . rootRankSection -
308 . rootRanks -
309 . leftRankSection -
310 - leafRanks -
311 
312   Output Parameters:
313 + processRanks - A list of process neighbors, or NULL
314 - sfProcess    - An SF encoding the two-sided process connectivity, or NULL
315 
316   Level: developer
317 
318 .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
319 @*/
320 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
321 {
322   const PetscSFNode *remotePoints;
323   PetscInt          *localPointsNew;
324   PetscSFNode       *remotePointsNew;
325   const PetscInt    *nranks;
326   PetscInt          *ranksNew;
327   PetscBT            neighbors;
328   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
329   PetscMPIInt        size, proc, rank;
330 
331   PetscFunctionBegin;
332   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
333   PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2);
334   if (processRanks) {PetscValidPointer(processRanks, 7);}
335   if (sfProcess)    {PetscValidPointer(sfProcess, 8);}
336   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size));
337   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank));
338   PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
339   PetscCall(PetscBTCreate(size, &neighbors));
340   PetscCall(PetscBTMemzero(size, neighbors));
341   /* Compute root-to-leaf process connectivity */
342   PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
343   PetscCall(ISGetIndices(rootRanks, &nranks));
344   for (p = pStart; p < pEnd; ++p) {
345     PetscInt ndof, noff, n;
346 
347     PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
348     PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
349     for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff+n]));
350   }
351   PetscCall(ISRestoreIndices(rootRanks, &nranks));
352   /* Compute leaf-to-neighbor process connectivity */
353   PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
354   PetscCall(ISGetIndices(leafRanks, &nranks));
355   for (p = pStart; p < pEnd; ++p) {
356     PetscInt ndof, noff, n;
357 
358     PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
359     PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
360     for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff+n]));
361   }
362   PetscCall(ISRestoreIndices(leafRanks, &nranks));
363   /* Compute leaf-to-root process connectivity */
364   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
365   /* Calculate edges */
366   PetscBTClear(neighbors, rank);
367   for (proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
368   PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
369   PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
370   PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
371   for (proc = 0, n = 0; proc < size; ++proc) {
372     if (PetscBTLookup(neighbors, proc)) {
373       ranksNew[n]              = proc;
374       localPointsNew[n]        = proc;
375       remotePointsNew[n].index = rank;
376       remotePointsNew[n].rank  = proc;
377       ++n;
378     }
379   }
380   PetscCall(PetscBTDestroy(&neighbors));
381   if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
382   else              PetscCall(PetscFree(ranksNew));
383   if (sfProcess) {
384     PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
385     PetscCall(PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF"));
386     PetscCall(PetscSFSetFromOptions(*sfProcess));
387     PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
388   }
389   PetscFunctionReturn(0);
390 }
391 
392 /*@
393   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
394 
395   Collective on dm
396 
397   Input Parameter:
398 . dm - The DM
399 
400   Output Parameters:
401 + rootSection - The number of leaves for a given root point
402 . rootrank    - The rank of each edge into the root point
403 . leafSection - The number of processes sharing a given leaf point
404 - leafrank    - The rank of each process sharing a leaf point
405 
406   Level: developer
407 
408 .seealso: DMPlexCreateOverlapLabel(), DMPlexDistribute(), DMPlexDistributeOverlap()
409 @*/
410 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
411 {
412   MPI_Comm        comm;
413   PetscSF         sfPoint;
414   const PetscInt *rootdegree;
415   PetscInt       *myrank, *remoterank;
416   PetscInt        pStart, pEnd, p, nedges;
417   PetscMPIInt     rank;
418 
419   PetscFunctionBegin;
420   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
421   PetscCallMPI(MPI_Comm_rank(comm, &rank));
422   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
423   PetscCall(DMGetPointSF(dm, &sfPoint));
424   /* Compute number of leaves for each root */
425   PetscCall(PetscObjectSetName((PetscObject) rootSection, "Root Section"));
426   PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
427   PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
428   PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
429   for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]));
430   PetscCall(PetscSectionSetUp(rootSection));
431   /* Gather rank of each leaf to root */
432   PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
433   PetscCall(PetscMalloc1(pEnd-pStart, &myrank));
434   PetscCall(PetscMalloc1(nedges,  &remoterank));
435   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
436   PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
437   PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
438   PetscCall(PetscFree(myrank));
439   PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
440   /* Distribute remote ranks to leaves */
441   PetscCall(PetscObjectSetName((PetscObject) leafSection, "Leaf Section"));
442   PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
443   PetscFunctionReturn(0);
444 }
445 
446 /*@C
447   DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF.
448 
449   Collective on dm
450 
451   Input Parameters:
452 + dm          - The DM
453 . levels      - Number of overlap levels
454 . rootSection - The number of leaves for a given root point
455 . rootrank    - The rank of each edge into the root point
456 . leafSection - The number of processes sharing a given leaf point
457 - leafrank    - The rank of each process sharing a leaf point
458 
459   Output Parameter:
460 . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings
461 
462   Level: developer
463 
464 .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
465 @*/
466 PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
467 {
468   MPI_Comm           comm;
469   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
470   PetscSF            sfPoint;
471   const PetscSFNode *remote;
472   const PetscInt    *local;
473   const PetscInt    *nrank, *rrank;
474   PetscInt          *adj = NULL;
475   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
476   PetscMPIInt        rank, size;
477   PetscBool          flg;
478 
479   PetscFunctionBegin;
480   *ovLabel = NULL;
481   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
482   PetscCallMPI(MPI_Comm_size(comm, &size));
483   PetscCallMPI(MPI_Comm_rank(comm, &rank));
484   if (size == 1) PetscFunctionReturn(0);
485   PetscCall(DMGetPointSF(dm, &sfPoint));
486   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
487   if (!levels) {
488     /* Add owned points */
489     PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
490     for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
491     PetscFunctionReturn(0);
492   }
493   PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
494   PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
495   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
496   /* Handle leaves: shared with the root point */
497   for (l = 0; l < nleaves; ++l) {
498     PetscInt adjSize = PETSC_DETERMINE, a;
499 
500     PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
501     for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
502   }
503   PetscCall(ISGetIndices(rootrank, &rrank));
504   PetscCall(ISGetIndices(leafrank, &nrank));
505   /* Handle roots */
506   for (p = pStart; p < pEnd; ++p) {
507     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
508 
509     if ((p >= sStart) && (p < sEnd)) {
510       /* Some leaves share a root with other leaves on different processes */
511       PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
512       if (neighbors) {
513         PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
514         PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
515         for (n = 0; n < neighbors; ++n) {
516           const PetscInt remoteRank = nrank[noff+n];
517 
518           if (remoteRank == rank) continue;
519           for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
520         }
521       }
522     }
523     /* Roots are shared with leaves */
524     PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
525     if (!neighbors) continue;
526     PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
527     PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
528     for (n = 0; n < neighbors; ++n) {
529       const PetscInt remoteRank = rrank[noff+n];
530 
531       if (remoteRank == rank) continue;
532       for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
533     }
534   }
535   PetscCall(PetscFree(adj));
536   PetscCall(ISRestoreIndices(rootrank, &rrank));
537   PetscCall(ISRestoreIndices(leafrank, &nrank));
538   /* Add additional overlap levels */
539   for (l = 1; l < levels; l++) {
540     /* Propagate point donations over SF to capture remote connections */
541     PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
542     /* Add next level of point donations to the label */
543     PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
544   }
545   /* We require the closure in the overlap */
546   PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
547   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg));
548   if (flg) {
549     PetscViewer viewer;
550     PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
551     PetscCall(DMLabelView(ovAdjByRank, viewer));
552   }
553   /* Invert sender to receiver label */
554   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
555   PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
556   /* Add owned points, except for shared local points */
557   for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
558   for (l = 0; l < nleaves; ++l) {
559     PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
560     PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
561   }
562   /* Clean up */
563   PetscCall(DMLabelDestroy(&ovAdjByRank));
564   PetscFunctionReturn(0);
565 }
566 
567 /*@C
568   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF
569 
570   Collective on dm
571 
572   Input Parameters:
573 + dm          - The DM
574 - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes
575 
576   Output Parameter:
577 . migrationSF - An SF that maps original points in old locations to points in new locations
578 
579   Level: developer
580 
581 .seealso: DMPlexCreateOverlapLabel(), DMPlexDistributeOverlap(), DMPlexDistribute()
582 @*/
583 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
584 {
585   MPI_Comm           comm;
586   PetscMPIInt        rank, size;
587   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
588   PetscInt          *pointDepths, *remoteDepths, *ilocal;
589   PetscInt          *depthRecv, *depthShift, *depthIdx;
590   PetscSFNode       *iremote;
591   PetscSF            pointSF;
592   const PetscInt    *sharedLocal;
593   const PetscSFNode *overlapRemote, *sharedRemote;
594 
595   PetscFunctionBegin;
596   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
597   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
598   PetscCallMPI(MPI_Comm_rank(comm, &rank));
599   PetscCallMPI(MPI_Comm_size(comm, &size));
600   PetscCall(DMGetDimension(dm, &dim));
601 
602   /* Before building the migration SF we need to know the new stratum offsets */
603   PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
604   PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
605   for (d=0; d<dim+1; d++) {
606     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
607     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
608   }
609   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
610   PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE));
611   PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE));
612 
613   /* Count received points in each stratum and compute the internal strata shift */
614   PetscCall(PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx));
615   for (d=0; d<dim+1; d++) depthRecv[d]=0;
616   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
617   depthShift[dim] = 0;
618   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
619   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
620   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
621   for (d=0; d<dim+1; d++) {
622     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
623     depthIdx[d] = pStart + depthShift[d];
624   }
625 
626   /* Form the overlap SF build an SF that describes the full overlap migration SF */
627   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
628   newLeaves = pEnd - pStart + nleaves;
629   PetscCall(PetscMalloc1(newLeaves, &ilocal));
630   PetscCall(PetscMalloc1(newLeaves, &iremote));
631   /* First map local points to themselves */
632   for (d=0; d<dim+1; d++) {
633     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
634     for (p=pStart; p<pEnd; p++) {
635       point = p + depthShift[d];
636       ilocal[point] = point;
637       iremote[point].index = p;
638       iremote[point].rank = rank;
639       depthIdx[d]++;
640     }
641   }
642 
643   /* Add in the remote roots for currently shared points */
644   PetscCall(DMGetPointSF(dm, &pointSF));
645   PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
646   for (d=0; d<dim+1; d++) {
647     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
648     for (p=0; p<numSharedPoints; p++) {
649       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
650         point = sharedLocal[p] + depthShift[d];
651         iremote[point].index = sharedRemote[p].index;
652         iremote[point].rank = sharedRemote[p].rank;
653       }
654     }
655   }
656 
657   /* Now add the incoming overlap points */
658   for (p=0; p<nleaves; p++) {
659     point = depthIdx[remoteDepths[p]];
660     ilocal[point] = point;
661     iremote[point].index = overlapRemote[p].index;
662     iremote[point].rank = overlapRemote[p].rank;
663     depthIdx[remoteDepths[p]]++;
664   }
665   PetscCall(PetscFree2(pointDepths,remoteDepths));
666 
667   PetscCall(PetscSFCreate(comm, migrationSF));
668   PetscCall(PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF"));
669   PetscCall(PetscSFSetFromOptions(*migrationSF));
670   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
671   PetscCall(PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
672 
673   PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
674   PetscFunctionReturn(0);
675 }
676 
677 /*@
678   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
679 
680   Input Parameters:
681 + dm          - The DM
682 - sf          - A star forest with non-ordered leaves, usually defining a DM point migration
683 
684   Output Parameter:
685 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
686 
687   Note:
688   This lexicographically sorts by (depth, cellType)
689 
690   Level: developer
691 
692 .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
693 @*/
694 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
695 {
696   MPI_Comm           comm;
697   PetscMPIInt        rank, size;
698   PetscInt           d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
699   PetscSFNode       *pointDepths, *remoteDepths;
700   PetscInt          *ilocal;
701   PetscInt          *depthRecv, *depthShift, *depthIdx;
702   PetscInt          *ctRecv,    *ctShift,    *ctIdx;
703   const PetscSFNode *iremote;
704 
705   PetscFunctionBegin;
706   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
707   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
708   PetscCallMPI(MPI_Comm_rank(comm, &rank));
709   PetscCallMPI(MPI_Comm_size(comm, &size));
710   PetscCall(DMPlexGetDepth(dm, &ldepth));
711   PetscCall(DMGetDimension(dm, &dim));
712   PetscCallMPI(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
713   PetscCheckFalse((ldepth >= 0) && (depth != ldepth),PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
714   PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0));
715 
716   /* Before building the migration SF we need to know the new stratum offsets */
717   PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
718   PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
719   for (d = 0; d < depth+1; ++d) {
720     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
721     for (p = pStart; p < pEnd; ++p) {
722       DMPolytopeType ct;
723 
724       PetscCall(DMPlexGetCellType(dm, p, &ct));
725       pointDepths[p].index = d;
726       pointDepths[p].rank  = ct;
727     }
728   }
729   for (p = 0; p < nleaves; ++p) {remoteDepths[p].index = -1; remoteDepths[p].rank = -1;}
730   PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE));
731   PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE));
732   /* Count received points in each stratum and compute the internal strata shift */
733   PetscCall(PetscCalloc6(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
734   for (p = 0; p < nleaves; ++p) {
735     if (remoteDepths[p].rank < 0) {
736       ++depthRecv[remoteDepths[p].index];
737     } else {
738       ++ctRecv[remoteDepths[p].rank];
739     }
740   }
741   {
742     PetscInt depths[4], dims[4], shift = 0, i, c;
743 
744     /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
745          Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
746      */
747     depths[0] = depth; depths[1] = 0; depths[2] = depth-1; depths[3] = 1;
748     dims[0]   = dim;   dims[1]   = 0; dims[2]   = dim-1;   dims[3]   = 1;
749     for (i = 0; i <= depth; ++i) {
750       const PetscInt dep = depths[i];
751       const PetscInt dim = dims[i];
752 
753       for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
754         if (DMPolytopeTypeGetDim((DMPolytopeType) c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
755         ctShift[c] = shift;
756         shift     += ctRecv[c];
757       }
758       depthShift[dep] = shift;
759       shift          += depthRecv[dep];
760     }
761     for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
762       const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType) c);
763 
764       if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
765         ctShift[c] = shift;
766         shift     += ctRecv[c];
767       }
768     }
769   }
770   /* Derive a new local permutation based on stratified indices */
771   PetscCall(PetscMalloc1(nleaves, &ilocal));
772   for (p = 0; p < nleaves; ++p) {
773     const PetscInt       dep = remoteDepths[p].index;
774     const DMPolytopeType ct  = (DMPolytopeType) remoteDepths[p].rank;
775 
776     if ((PetscInt) ct < 0) {
777       ilocal[p] = depthShift[dep] + depthIdx[dep];
778       ++depthIdx[dep];
779     } else {
780       ilocal[p] = ctShift[ct] + ctIdx[ct];
781       ++ctIdx[ct];
782     }
783   }
784   PetscCall(PetscSFCreate(comm, migrationSF));
785   PetscCall(PetscObjectSetName((PetscObject) *migrationSF, "Migration SF"));
786   PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode*)iremote, PETSC_COPY_VALUES));
787   PetscCall(PetscFree2(pointDepths,remoteDepths));
788   PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
789   PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0));
790   PetscFunctionReturn(0);
791 }
792 
793 /*@
794   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
795 
796   Collective on dm
797 
798   Input Parameters:
799 + dm - The DMPlex object
800 . pointSF - The PetscSF describing the communication pattern
801 . originalSection - The PetscSection for existing data layout
802 - originalVec - The existing data in a local vector
803 
804   Output Parameters:
805 + newSection - The PetscSF describing the new data layout
806 - newVec - The new data in a local vector
807 
808   Level: developer
809 
810 .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
811 @*/
812 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
813 {
814   PetscSF        fieldSF;
815   PetscInt      *remoteOffsets, fieldSize;
816   PetscScalar   *originalValues, *newValues;
817 
818   PetscFunctionBegin;
819   PetscCall(PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0));
820   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
821 
822   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
823   PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
824   PetscCall(VecSetType(newVec,dm->vectype));
825 
826   PetscCall(VecGetArray(originalVec, &originalValues));
827   PetscCall(VecGetArray(newVec, &newValues));
828   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
829   PetscCall(PetscFree(remoteOffsets));
830   PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE));
831   PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE));
832   PetscCall(PetscSFDestroy(&fieldSF));
833   PetscCall(VecRestoreArray(newVec, &newValues));
834   PetscCall(VecRestoreArray(originalVec, &originalValues));
835   PetscCall(PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0));
836   PetscFunctionReturn(0);
837 }
838 
839 /*@
840   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
841 
842   Collective on dm
843 
844   Input Parameters:
845 + dm - The DMPlex object
846 . pointSF - The PetscSF describing the communication pattern
847 . originalSection - The PetscSection for existing data layout
848 - originalIS - The existing data
849 
850   Output Parameters:
851 + newSection - The PetscSF describing the new data layout
852 - newIS - The new data
853 
854   Level: developer
855 
856 .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
857 @*/
858 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
859 {
860   PetscSF         fieldSF;
861   PetscInt       *newValues, *remoteOffsets, fieldSize;
862   const PetscInt *originalValues;
863 
864   PetscFunctionBegin;
865   PetscCall(PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0));
866   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
867 
868   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
869   PetscCall(PetscMalloc1(fieldSize, &newValues));
870 
871   PetscCall(ISGetIndices(originalIS, &originalValues));
872   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
873   PetscCall(PetscFree(remoteOffsets));
874   PetscCall(PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE));
875   PetscCall(PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE));
876   PetscCall(PetscSFDestroy(&fieldSF));
877   PetscCall(ISRestoreIndices(originalIS, &originalValues));
878   PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
879   PetscCall(PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0));
880   PetscFunctionReturn(0);
881 }
882 
883 /*@
884   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
885 
886   Collective on dm
887 
888   Input Parameters:
889 + dm - The DMPlex object
890 . pointSF - The PetscSF describing the communication pattern
891 . originalSection - The PetscSection for existing data layout
892 . datatype - The type of data
893 - originalData - The existing data
894 
895   Output Parameters:
896 + newSection - The PetscSection describing the new data layout
897 - newData - The new data
898 
899   Level: developer
900 
901 .seealso: DMPlexDistribute(), DMPlexDistributeField()
902 @*/
903 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
904 {
905   PetscSF        fieldSF;
906   PetscInt      *remoteOffsets, fieldSize;
907   PetscMPIInt    dataSize;
908 
909   PetscFunctionBegin;
910   PetscCall(PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0));
911   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
912 
913   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
914   PetscCallMPI(MPI_Type_size(datatype, &dataSize));
915   PetscCall(PetscMalloc(fieldSize * dataSize, newData));
916 
917   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
918   PetscCall(PetscFree(remoteOffsets));
919   PetscCall(PetscSFBcastBegin(fieldSF, datatype, originalData, *newData,MPI_REPLACE));
920   PetscCall(PetscSFBcastEnd(fieldSF, datatype, originalData, *newData,MPI_REPLACE));
921   PetscCall(PetscSFDestroy(&fieldSF));
922   PetscCall(PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0));
923   PetscFunctionReturn(0);
924 }
925 
926 static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
927 {
928   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
929   MPI_Comm               comm;
930   PetscSF                coneSF;
931   PetscSection           originalConeSection, newConeSection;
932   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
933   PetscBool              flg;
934 
935   PetscFunctionBegin;
936   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
937   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5);
938   PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0));
939   /* Distribute cone section */
940   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
941   PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
942   PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
943   PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
944   PetscCall(DMSetUp(dmParallel));
945   {
946     PetscInt pStart, pEnd, p;
947 
948     PetscCall(PetscSectionGetChart(newConeSection, &pStart, &pEnd));
949     for (p = pStart; p < pEnd; ++p) {
950       PetscInt coneSize;
951       PetscCall(PetscSectionGetDof(newConeSection, p, &coneSize));
952       pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize);
953     }
954   }
955   /* Communicate and renumber cones */
956   PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
957   PetscCall(PetscFree(remoteOffsets));
958   PetscCall(DMPlexGetCones(dm, &cones));
959   if (original) {
960     PetscInt numCones;
961 
962     PetscCall(PetscSectionGetStorageSize(originalConeSection,&numCones));
963     PetscCall(PetscMalloc1(numCones,&globCones));
964     PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
965   } else {
966     globCones = cones;
967   }
968   PetscCall(DMPlexGetCones(dmParallel, &newCones));
969   PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE));
970   PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE));
971   if (original) {
972     PetscCall(PetscFree(globCones));
973   }
974   PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
975   PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
976   if (PetscDefined(USE_DEBUG)) {
977     PetscInt  p;
978     PetscBool valid = PETSC_TRUE;
979     for (p = 0; p < newConesSize; ++p) {
980       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p));}
981     }
982     PetscCheck(valid,PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
983   }
984   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg));
985   if (flg) {
986     PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
987     PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
988     PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
989     PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
990     PetscCall(PetscSFView(coneSF, NULL));
991   }
992   PetscCall(DMPlexGetConeOrientations(dm, &cones));
993   PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
994   PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE));
995   PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE));
996   PetscCall(PetscSFDestroy(&coneSF));
997   PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0));
998   /* Create supports and stratify DMPlex */
999   {
1000     PetscInt pStart, pEnd;
1001 
1002     PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
1003     PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
1004   }
1005   PetscCall(DMPlexSymmetrize(dmParallel));
1006   PetscCall(DMPlexStratify(dmParallel));
1007   {
1008     PetscBool useCone, useClosure, useAnchors;
1009 
1010     PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1011     PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1012     PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1013     PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1014   }
1015   PetscFunctionReturn(0);
1016 }
1017 
1018 static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1019 {
1020   MPI_Comm         comm;
1021   DM               cdm, cdmParallel;
1022   PetscSection     originalCoordSection, newCoordSection;
1023   Vec              originalCoordinates, newCoordinates;
1024   PetscInt         bs;
1025   PetscBool        isper;
1026   const char      *name;
1027   const PetscReal *maxCell, *L;
1028   const DMBoundaryType *bd;
1029 
1030   PetscFunctionBegin;
1031   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1032   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1033 
1034   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1035   PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1036   PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1037   PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1038   if (originalCoordinates) {
1039     PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1040     PetscCall(PetscObjectGetName((PetscObject) originalCoordinates, &name));
1041     PetscCall(PetscObjectSetName((PetscObject) newCoordinates, name));
1042 
1043     PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1044     PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1045     PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1046     PetscCall(VecSetBlockSize(newCoordinates, bs));
1047     PetscCall(VecDestroy(&newCoordinates));
1048   }
1049   PetscCall(DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd));
1050   PetscCall(DMSetPeriodicity(dmParallel, isper, maxCell, L, bd));
1051   PetscCall(DMGetCoordinateDM(dm, &cdm));
1052   PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1053   PetscCall(DMCopyDisc(cdm, cdmParallel));
1054   PetscFunctionReturn(0);
1055 }
1056 
1057 static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1058 {
1059   DM_Plex         *mesh = (DM_Plex*) dm->data;
1060   MPI_Comm         comm;
1061   DMLabel          depthLabel;
1062   PetscMPIInt      rank;
1063   PetscInt         depth, d, numLabels, numLocalLabels, l;
1064   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1065   PetscObjectState depthState = -1;
1066 
1067   PetscFunctionBegin;
1068   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1069   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1070 
1071   PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0));
1072   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1073   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1074 
1075   /* If the user has changed the depth label, communicate it instead */
1076   PetscCall(DMPlexGetDepth(dm, &depth));
1077   PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1078   if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject) depthLabel, &depthState));
1079   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1080   PetscCallMPI(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm));
1081   if (sendDepth) {
1082     PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1083     PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1084   }
1085   /* Everyone must have either the same number of labels, or none */
1086   PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1087   numLabels = numLocalLabels;
1088   PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1089   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1090   for (l = 0; l < numLabels; ++l) {
1091     DMLabel     label = NULL, labelNew = NULL;
1092     PetscBool   isDepth, lisOutput = PETSC_TRUE, isOutput;
1093     const char *name = NULL;
1094 
1095     if (hasLabels) {
1096       PetscCall(DMGetLabelByNum(dm, l, &label));
1097       /* Skip "depth" because it is recreated */
1098       PetscCall(PetscObjectGetName((PetscObject) label, &name));
1099       PetscCall(PetscStrcmp(name, "depth", &isDepth));
1100     }
1101     PetscCallMPI(MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm));
1102     if (isDepth && !sendDepth) continue;
1103     PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1104     if (isDepth) {
1105       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1106       PetscInt gdepth;
1107 
1108       PetscCallMPI(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1109       PetscCheckFalse((depth >= 0) && (gdepth != depth),PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1110       for (d = 0; d <= gdepth; ++d) {
1111         PetscBool has;
1112 
1113         PetscCall(DMLabelHasStratum(labelNew, d, &has));
1114         if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1115       }
1116     }
1117     PetscCall(DMAddLabel(dmParallel, labelNew));
1118     /* Put the output flag in the new label */
1119     if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1120     PetscCallMPI(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm));
1121     PetscCall(PetscObjectGetName((PetscObject) labelNew, &name));
1122     PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1123     PetscCall(DMLabelDestroy(&labelNew));
1124   }
1125   PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0));
1126   PetscFunctionReturn(0);
1127 }
1128 
1129 static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1130 {
1131   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1132   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1133   MPI_Comm        comm;
1134   DM              refTree;
1135   PetscSection    origParentSection, newParentSection;
1136   PetscInt        *origParents, *origChildIDs;
1137   PetscBool       flg;
1138 
1139   PetscFunctionBegin;
1140   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1141   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5);
1142   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1143 
1144   /* Set up tree */
1145   PetscCall(DMPlexGetReferenceTree(dm,&refTree));
1146   PetscCall(DMPlexSetReferenceTree(dmParallel,refTree));
1147   PetscCall(DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL));
1148   if (origParentSection) {
1149     PetscInt        pStart, pEnd;
1150     PetscInt        *newParents, *newChildIDs, *globParents;
1151     PetscInt        *remoteOffsetsParents, newParentSize;
1152     PetscSF         parentSF;
1153 
1154     PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1155     PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection));
1156     PetscCall(PetscSectionSetChart(newParentSection,pStart,pEnd));
1157     PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1158     PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1159     PetscCall(PetscFree(remoteOffsetsParents));
1160     PetscCall(PetscSectionGetStorageSize(newParentSection,&newParentSize));
1161     PetscCall(PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs));
1162     if (original) {
1163       PetscInt numParents;
1164 
1165       PetscCall(PetscSectionGetStorageSize(origParentSection,&numParents));
1166       PetscCall(PetscMalloc1(numParents,&globParents));
1167       PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1168     }
1169     else {
1170       globParents = origParents;
1171     }
1172     PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE));
1173     PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE));
1174     if (original) {
1175       PetscCall(PetscFree(globParents));
1176     }
1177     PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE));
1178     PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE));
1179     PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1180     if (PetscDefined(USE_DEBUG)) {
1181       PetscInt  p;
1182       PetscBool valid = PETSC_TRUE;
1183       for (p = 0; p < newParentSize; ++p) {
1184         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p));}
1185       }
1186       PetscCheck(valid,PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1187     }
1188     PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg));
1189     if (flg) {
1190       PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1191       PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1192       PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1193       PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1194       PetscCall(PetscSFView(parentSF, NULL));
1195     }
1196     PetscCall(DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs));
1197     PetscCall(PetscSectionDestroy(&newParentSection));
1198     PetscCall(PetscFree2(newParents,newChildIDs));
1199     PetscCall(PetscSFDestroy(&parentSF));
1200   }
1201   pmesh->useAnchors = mesh->useAnchors;
1202   PetscFunctionReturn(0);
1203 }
1204 
1205 PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1206 {
1207   PetscMPIInt            rank, size;
1208   MPI_Comm               comm;
1209 
1210   PetscFunctionBegin;
1211   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1212   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1213 
1214   /* Create point SF for parallel mesh */
1215   PetscCall(PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0));
1216   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1217   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1218   PetscCallMPI(MPI_Comm_size(comm, &size));
1219   {
1220     const PetscInt *leaves;
1221     PetscSFNode    *remotePoints, *rowners, *lowners;
1222     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1223     PetscInt        pStart, pEnd;
1224 
1225     PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1226     PetscCall(PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL));
1227     PetscCall(PetscMalloc2(numRoots,&rowners,numLeaves,&lowners));
1228     for (p=0; p<numRoots; p++) {
1229       rowners[p].rank  = -1;
1230       rowners[p].index = -1;
1231     }
1232     PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1233     PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1234     for (p = 0; p < numLeaves; ++p) {
1235       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1236         lowners[p].rank  = rank;
1237         lowners[p].index = leaves ? leaves[p] : p;
1238       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1239         lowners[p].rank  = -2;
1240         lowners[p].index = -2;
1241       }
1242     }
1243     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1244       rowners[p].rank  = -3;
1245       rowners[p].index = -3;
1246     }
1247     PetscCall(PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC));
1248     PetscCall(PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC));
1249     PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1250     PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1251     for (p = 0; p < numLeaves; ++p) {
1252       PetscCheckFalse(lowners[p].rank < 0 || lowners[p].index < 0,PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1253       if (lowners[p].rank != rank) ++numGhostPoints;
1254     }
1255     PetscCall(PetscMalloc1(numGhostPoints, &ghostPoints));
1256     PetscCall(PetscMalloc1(numGhostPoints, &remotePoints));
1257     for (p = 0, gp = 0; p < numLeaves; ++p) {
1258       if (lowners[p].rank != rank) {
1259         ghostPoints[gp]        = leaves ? leaves[p] : p;
1260         remotePoints[gp].rank  = lowners[p].rank;
1261         remotePoints[gp].index = lowners[p].index;
1262         ++gp;
1263       }
1264     }
1265     PetscCall(PetscFree2(rowners,lowners));
1266     PetscCall(PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER));
1267     PetscCall(PetscSFSetFromOptions((dmParallel)->sf));
1268   }
1269   {
1270     PetscBool useCone, useClosure, useAnchors;
1271 
1272     PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1273     PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1274     PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1275     PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1276   }
1277   PetscCall(PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0));
1278   PetscFunctionReturn(0);
1279 }
1280 
1281 /*@
1282   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?
1283 
1284   Input Parameters:
1285 + dm - The DMPlex object
1286 - flg - Balance the partition?
1287 
1288   Level: intermediate
1289 
1290 .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1291 @*/
1292 PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1293 {
1294   DM_Plex *mesh = (DM_Plex *)dm->data;
1295 
1296   PetscFunctionBegin;
1297   mesh->partitionBalance = flg;
1298   PetscFunctionReturn(0);
1299 }
1300 
1301 /*@
1302   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?
1303 
1304   Input Parameter:
1305 . dm - The DMPlex object
1306 
1307   Output Parameter:
1308 . flg - Balance the partition?
1309 
1310   Level: intermediate
1311 
1312 .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1313 @*/
1314 PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1315 {
1316   DM_Plex *mesh = (DM_Plex *)dm->data;
1317 
1318   PetscFunctionBegin;
1319   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1320   PetscValidBoolPointer(flg, 2);
1321   *flg = mesh->partitionBalance;
1322   PetscFunctionReturn(0);
1323 }
1324 
1325 typedef struct {
1326   PetscInt vote, rank, index;
1327 } Petsc3Int;
1328 
1329 /* MaxLoc, but carry a third piece of information around */
1330 static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1331 {
1332   Petsc3Int *a = (Petsc3Int *)inout_;
1333   Petsc3Int *b = (Petsc3Int *)in_;
1334   PetscInt i, len = *len_;
1335   for (i = 0; i < len; i++) {
1336     if (a[i].vote < b[i].vote) {
1337       a[i].vote = b[i].vote;
1338       a[i].rank = b[i].rank;
1339       a[i].index = b[i].index;
1340     } else if (a[i].vote <= b[i].vote) {
1341       if (a[i].rank >= b[i].rank) {
1342         a[i].rank = b[i].rank;
1343         a[i].index = b[i].index;
1344       }
1345     }
1346   }
1347 }
1348 
1349 /*@C
1350   DMPlexCreatePointSF - Build a point SF from an SF describing a point migration
1351 
1352   Input Parameters:
1353 + dm          - The source DMPlex object
1354 . migrationSF - The star forest that describes the parallel point remapping
1355 - ownership   - Flag causing a vote to determine point ownership
1356 
1357   Output Parameter:
1358 . pointSF     - The star forest describing the point overlap in the remapped DM
1359 
1360   Notes:
1361   Output pointSF is guaranteed to have the array of local indices (leaves) sorted.
1362 
1363   Level: developer
1364 
1365 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1366 @*/
1367 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1368 {
1369   PetscMPIInt        rank, size;
1370   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1371   PetscInt          *pointLocal;
1372   const PetscInt    *leaves;
1373   const PetscSFNode *roots;
1374   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1375   Vec                shifts;
1376   const PetscInt     numShifts = 13759;
1377   const PetscScalar *shift = NULL;
1378   const PetscBool    shiftDebug = PETSC_FALSE;
1379   PetscBool          balance;
1380 
1381   PetscFunctionBegin;
1382   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1383   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank));
1384   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size));
1385   PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0));
1386 
1387   PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1388   PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1389   PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1390   if (ownership) {
1391     MPI_Op       op;
1392     MPI_Datatype datatype;
1393     Petsc3Int   *rootVote = NULL, *leafVote = NULL;
1394     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1395     if (balance) {
1396       PetscRandom r;
1397 
1398       PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1399       PetscCall(PetscRandomSetInterval(r, 0, 2467*size));
1400       PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1401       PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1402       PetscCall(VecSetType(shifts, VECSTANDARD));
1403       PetscCall(VecSetRandom(shifts, r));
1404       PetscCall(PetscRandomDestroy(&r));
1405       PetscCall(VecGetArrayRead(shifts, &shift));
1406     }
1407 
1408     PetscCall(PetscMalloc1(nroots, &rootVote));
1409     PetscCall(PetscMalloc1(nleaves, &leafVote));
1410     /* Point ownership vote: Process with highest rank owns shared points */
1411     for (p = 0; p < nleaves; ++p) {
1412       if (shiftDebug) {
1413         PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size));
1414       }
1415       /* Either put in a bid or we know we own it */
1416       leafVote[p].vote  = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1417       leafVote[p].rank = rank;
1418       leafVote[p].index = p;
1419     }
1420     for (p = 0; p < nroots; p++) {
1421       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1422       rootVote[p].vote  = -3;
1423       rootVote[p].rank  = -3;
1424       rootVote[p].index = -3;
1425     }
1426     PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1427     PetscCallMPI(MPI_Type_commit(&datatype));
1428     PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1429     PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1430     PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1431     PetscCallMPI(MPI_Op_free(&op));
1432     PetscCallMPI(MPI_Type_free(&datatype));
1433     for (p = 0; p < nroots; p++) {
1434       rootNodes[p].rank = rootVote[p].rank;
1435       rootNodes[p].index = rootVote[p].index;
1436     }
1437     PetscCall(PetscFree(leafVote));
1438     PetscCall(PetscFree(rootVote));
1439   } else {
1440     for (p = 0; p < nroots; p++) {
1441       rootNodes[p].index = -1;
1442       rootNodes[p].rank = rank;
1443     }
1444     for (p = 0; p < nleaves; p++) {
1445       /* Write new local id into old location */
1446       if (roots[p].rank == rank) {
1447         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1448       }
1449     }
1450   }
1451   PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE));
1452   PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE));
1453 
1454   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1455     if (leafNodes[p].rank != rank) npointLeaves++;
1456   }
1457   PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1458   PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1459   for (idx = 0, p = 0; p < nleaves; p++) {
1460     if (leafNodes[p].rank != rank) {
1461       /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1462       pointLocal[idx] = p;
1463       pointRemote[idx] = leafNodes[p];
1464       idx++;
1465     }
1466   }
1467   if (shift) {
1468     PetscCall(VecRestoreArrayRead(shifts, &shift));
1469     PetscCall(VecDestroy(&shifts));
1470   }
1471   if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT));
1472   PetscCall(PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF));
1473   PetscCall(PetscSFSetFromOptions(*pointSF));
1474   PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1475   PetscCall(PetscFree2(rootNodes, leafNodes));
1476   PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0));
1477   PetscFunctionReturn(0);
1478 }
1479 
1480 /*@C
1481   DMPlexMigrate  - Migrates internal DM data over the supplied star forest
1482 
1483   Collective on dm
1484 
1485   Input Parameters:
1486 + dm       - The source DMPlex object
1487 - sf       - The star forest communication context describing the migration pattern
1488 
1489   Output Parameter:
1490 . targetDM - The target DMPlex object
1491 
1492   Level: intermediate
1493 
1494 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1495 @*/
1496 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1497 {
1498   MPI_Comm               comm;
1499   PetscInt               dim, cdim, nroots;
1500   PetscSF                sfPoint;
1501   ISLocalToGlobalMapping ltogMigration;
1502   ISLocalToGlobalMapping ltogOriginal = NULL;
1503   PetscBool              flg;
1504 
1505   PetscFunctionBegin;
1506   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1507   PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1508   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
1509   PetscCall(DMGetDimension(dm, &dim));
1510   PetscCall(DMSetDimension(targetDM, dim));
1511   PetscCall(DMGetCoordinateDim(dm, &cdim));
1512   PetscCall(DMSetCoordinateDim(targetDM, cdim));
1513 
1514   /* Check for a one-to-all distribution pattern */
1515   PetscCall(DMGetPointSF(dm, &sfPoint));
1516   PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1517   if (nroots >= 0) {
1518     IS        isOriginal;
1519     PetscInt  n, size, nleaves;
1520     PetscInt  *numbering_orig, *numbering_new;
1521 
1522     /* Get the original point numbering */
1523     PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1524     PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal));
1525     PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1526     PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig));
1527     /* Convert to positive global numbers */
1528     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1529     /* Derive the new local-to-global mapping from the old one */
1530     PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1531     PetscCall(PetscMalloc1(nleaves, &numbering_new));
1532     PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE));
1533     PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE));
1534     PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, &ltogMigration));
1535     PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig));
1536     PetscCall(ISDestroy(&isOriginal));
1537   } else {
1538     /* One-to-all distribution pattern: We can derive LToG from SF */
1539     PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration));
1540   }
1541   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg));
1542   if (flg) {
1543     PetscCall(PetscPrintf(comm, "Point renumbering for DM migration:\n"));
1544     PetscCall(ISLocalToGlobalMappingView(ltogMigration, NULL));
1545   }
1546   /* Migrate DM data to target DM */
1547   PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1548   PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1549   PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1550   PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1551   PetscCall(ISLocalToGlobalMappingDestroy(&ltogOriginal));
1552   PetscCall(ISLocalToGlobalMappingDestroy(&ltogMigration));
1553   PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1554   PetscFunctionReturn(0);
1555 }
1556 
1557 /*@C
1558   DMPlexDistribute - Distributes the mesh and any associated sections.
1559 
1560   Collective on dm
1561 
1562   Input Parameters:
1563 + dm  - The original DMPlex object
1564 - overlap - The overlap of partitions, 0 is the default
1565 
1566   Output Parameters:
1567 + sf - The PetscSF used for point distribution, or NULL if not needed
1568 - dmParallel - The distributed DMPlex object
1569 
1570   Note: If the mesh was not distributed, the output dmParallel will be NULL.
1571 
1572   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1573   representation on the mesh.
1574 
1575   Level: intermediate
1576 
1577 .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexGetOverlap()
1578 @*/
1579 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1580 {
1581   MPI_Comm               comm;
1582   PetscPartitioner       partitioner;
1583   IS                     cellPart;
1584   PetscSection           cellPartSection;
1585   DM                     dmCoord;
1586   DMLabel                lblPartition, lblMigration;
1587   PetscSF                sfMigration, sfStratified, sfPoint;
1588   PetscBool              flg, balance;
1589   PetscMPIInt            rank, size;
1590 
1591   PetscFunctionBegin;
1592   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1593   PetscValidLogicalCollectiveInt(dm, overlap, 2);
1594   if (sf) PetscValidPointer(sf,3);
1595   PetscValidPointer(dmParallel,4);
1596 
1597   if (sf) *sf = NULL;
1598   *dmParallel = NULL;
1599   PetscCall(PetscObjectGetComm((PetscObject)dm,&comm));
1600   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1601   PetscCallMPI(MPI_Comm_size(comm, &size));
1602   if (size == 1) PetscFunctionReturn(0);
1603 
1604   PetscCall(PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0));
1605   /* Create cell partition */
1606   PetscCall(PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0));
1607   PetscCall(PetscSectionCreate(comm, &cellPartSection));
1608   PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1609   PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1610   PetscCall(PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0));
1611   {
1612     /* Convert partition to DMLabel */
1613     IS             is;
1614     PetscHSetI     ht;
1615     const PetscInt *points;
1616     PetscInt       *iranks;
1617     PetscInt       pStart, pEnd, proc, npoints, poff = 0, nranks;
1618 
1619     PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1620     /* Preallocate strata */
1621     PetscCall(PetscHSetICreate(&ht));
1622     PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1623     for (proc = pStart; proc < pEnd; proc++) {
1624       PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1625       if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1626     }
1627     PetscCall(PetscHSetIGetSize(ht, &nranks));
1628     PetscCall(PetscMalloc1(nranks, &iranks));
1629     PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1630     PetscCall(PetscHSetIDestroy(&ht));
1631     PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1632     PetscCall(PetscFree(iranks));
1633     /* Inline DMPlexPartitionLabelClosure() */
1634     PetscCall(ISGetIndices(cellPart, &points));
1635     PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1636     for (proc = pStart; proc < pEnd; proc++) {
1637       PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1638       if (!npoints) continue;
1639       PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1640       PetscCall(DMPlexClosurePoints_Private(dm, npoints, points+poff, &is));
1641       PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1642       PetscCall(ISDestroy(&is));
1643     }
1644     PetscCall(ISRestoreIndices(cellPart, &points));
1645   }
1646   PetscCall(PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0));
1647 
1648   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1649   PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1650   PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration));
1651   PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1652   PetscCall(PetscSFDestroy(&sfMigration));
1653   sfMigration = sfStratified;
1654   PetscCall(PetscSFSetUp(sfMigration));
1655   PetscCall(PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0));
1656   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg));
1657   if (flg) {
1658     PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1659     PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1660   }
1661 
1662   /* Create non-overlapping parallel DM and migrate internal data */
1663   PetscCall(DMPlexCreate(comm, dmParallel));
1664   PetscCall(PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh"));
1665   PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1666 
1667   /* Build the point SF without overlap */
1668   PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1669   PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1670   PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1671   PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1672   PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1673   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1674   if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1675 
1676   if (overlap > 0) {
1677     DM                 dmOverlap;
1678     PetscInt           nroots, nleaves, noldleaves, l;
1679     const PetscInt    *oldLeaves;
1680     PetscSFNode       *newRemote, *permRemote;
1681     const PetscSFNode *oldRemote;
1682     PetscSF            sfOverlap, sfOverlapPoint;
1683 
1684     /* Add the partition overlap to the distributed DM */
1685     PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1686     PetscCall(DMDestroy(dmParallel));
1687     *dmParallel = dmOverlap;
1688     if (flg) {
1689       PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1690       PetscCall(PetscSFView(sfOverlap, NULL));
1691     }
1692 
1693     /* Re-map the migration SF to establish the full migration pattern */
1694     PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1695     PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1696     PetscCall(PetscMalloc1(nleaves, &newRemote));
1697     /* oldRemote: original root point mapping to original leaf point
1698        newRemote: original leaf point mapping to overlapped leaf point */
1699     if (oldLeaves) {
1700       /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1701       PetscCall(PetscMalloc1(noldleaves, &permRemote));
1702       for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1703       oldRemote = permRemote;
1704     }
1705     PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE));
1706     PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE));
1707     if (oldLeaves) PetscCall(PetscFree(oldRemote));
1708     PetscCall(PetscSFCreate(comm, &sfOverlapPoint));
1709     PetscCall(PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1710     PetscCall(PetscSFDestroy(&sfOverlap));
1711     PetscCall(PetscSFDestroy(&sfMigration));
1712     sfMigration = sfOverlapPoint;
1713   }
1714   /* Cleanup Partition */
1715   PetscCall(DMLabelDestroy(&lblPartition));
1716   PetscCall(DMLabelDestroy(&lblMigration));
1717   PetscCall(PetscSectionDestroy(&cellPartSection));
1718   PetscCall(ISDestroy(&cellPart));
1719   /* Copy discretization */
1720   PetscCall(DMCopyDisc(dm, *dmParallel));
1721   /* Create sfNatural */
1722   if (dm->useNatural) {
1723     PetscSection section;
1724 
1725     PetscCall(DMGetLocalSection(dm, &section));
1726     PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
1727     PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
1728   }
1729   PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, *dmParallel));
1730   /* Cleanup */
1731   if (sf) {*sf = sfMigration;}
1732   else    PetscCall(PetscSFDestroy(&sfMigration));
1733   PetscCall(PetscSFDestroy(&sfPoint));
1734   PetscCall(PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0));
1735   PetscFunctionReturn(0);
1736 }
1737 
1738 /*@C
1739   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.
1740 
1741   Collective on dm
1742 
1743   Input Parameters:
1744 + dm  - The non-overlapping distributed DMPlex object
1745 - overlap - The overlap of partitions (the same on all ranks)
1746 
1747   Output Parameters:
1748 + sf - The PetscSF used for point distribution
1749 - dmOverlap - The overlapping distributed DMPlex object, or NULL
1750 
1751   Notes:
1752   If the mesh was not distributed, the return value is NULL.
1753 
1754   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1755   representation on the mesh.
1756 
1757   Level: advanced
1758 
1759 .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexDistribute(), DMPlexCreateOverlapLabel(), DMPlexGetOverlap()
1760 @*/
1761 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1762 {
1763   MPI_Comm               comm;
1764   PetscMPIInt            size, rank;
1765   PetscSection           rootSection, leafSection;
1766   IS                     rootrank, leafrank;
1767   DM                     dmCoord;
1768   DMLabel                lblOverlap;
1769   PetscSF                sfOverlap, sfStratified, sfPoint;
1770 
1771   PetscFunctionBegin;
1772   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1773   PetscValidLogicalCollectiveInt(dm, overlap, 2);
1774   if (sf) PetscValidPointer(sf, 3);
1775   PetscValidPointer(dmOverlap, 4);
1776 
1777   if (sf) *sf = NULL;
1778   *dmOverlap  = NULL;
1779   PetscCall(PetscObjectGetComm((PetscObject)dm,&comm));
1780   PetscCallMPI(MPI_Comm_size(comm, &size));
1781   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1782   if (size == 1) PetscFunctionReturn(0);
1783 
1784   PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1785   /* Compute point overlap with neighbouring processes on the distributed DM */
1786   PetscCall(PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0));
1787   PetscCall(PetscSectionCreate(comm, &rootSection));
1788   PetscCall(PetscSectionCreate(comm, &leafSection));
1789   PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
1790   PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
1791   /* Convert overlap label to stratified migration SF */
1792   PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap));
1793   PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
1794   PetscCall(PetscSFDestroy(&sfOverlap));
1795   sfOverlap = sfStratified;
1796   PetscCall(PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF"));
1797   PetscCall(PetscSFSetFromOptions(sfOverlap));
1798 
1799   PetscCall(PetscSectionDestroy(&rootSection));
1800   PetscCall(PetscSectionDestroy(&leafSection));
1801   PetscCall(ISDestroy(&rootrank));
1802   PetscCall(ISDestroy(&leafrank));
1803   PetscCall(PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0));
1804 
1805   /* Build the overlapping DM */
1806   PetscCall(DMPlexCreate(comm, dmOverlap));
1807   PetscCall(PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh"));
1808   PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
1809   /* Store the overlap in the new DM */
1810   ((DM_Plex*)(*dmOverlap)->data)->overlap = overlap + ((DM_Plex*)dm->data)->overlap;
1811   /* Build the new point SF */
1812   PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
1813   PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
1814   PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
1815   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1816   PetscCall(PetscSFDestroy(&sfPoint));
1817   /* Cleanup overlap partition */
1818   PetscCall(DMLabelDestroy(&lblOverlap));
1819   if (sf) *sf = sfOverlap;
1820   else    PetscCall(PetscSFDestroy(&sfOverlap));
1821   PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1822   PetscFunctionReturn(0);
1823 }
1824 
1825 PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1826 {
1827   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1828 
1829   PetscFunctionBegin;
1830   *overlap = mesh->overlap;
1831   PetscFunctionReturn(0);
1832 }
1833 
1834 /*@
1835   DMPlexGetOverlap - Get the DMPlex partition overlap.
1836 
1837   Not collective
1838 
1839   Input Parameter:
1840 . dm - The DM
1841 
1842   Output Parameter:
1843 . overlap - The overlap of this DM
1844 
1845   Level: intermediate
1846 
1847 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap(), DMPlexCreateOverlapLabel()
1848 @*/
1849 PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
1850 {
1851   PetscFunctionBegin;
1852   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1853   PetscCall(PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap)));
1854   PetscFunctionReturn(0);
1855 }
1856 
1857 PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
1858 {
1859   DM_Plex *mesh = (DM_Plex *) dm->data;
1860 
1861   PetscFunctionBegin;
1862   mesh->distDefault = dist;
1863   PetscFunctionReturn(0);
1864 }
1865 
1866 /*@
1867   DMPlexDistributeSetDefault - Set flag indicating whether the DM should be distributed by default
1868 
1869   Logically collective
1870 
1871   Input Parameters:
1872 + dm   - The DM
1873 - dist - Flag for distribution
1874 
1875   Level: intermediate
1876 
1877 .seealso: DMDistributeGetDefault(), DMPlexDistribute()
1878 @*/
1879 PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
1880 {
1881   PetscFunctionBegin;
1882   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1883   PetscValidLogicalCollectiveBool(dm, dist, 2);
1884   PetscCall(PetscTryMethod(dm,"DMPlexDistributeSetDefault_C",(DM,PetscBool),(dm,dist)));
1885   PetscFunctionReturn(0);
1886 }
1887 
1888 PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
1889 {
1890   DM_Plex *mesh = (DM_Plex *) dm->data;
1891 
1892   PetscFunctionBegin;
1893   *dist = mesh->distDefault;
1894   PetscFunctionReturn(0);
1895 }
1896 
1897 /*@
1898   DMPlexDistributeGetDefault - Get flag indicating whether the DM should be distributed by default
1899 
1900   Not collective
1901 
1902   Input Parameter:
1903 . dm   - The DM
1904 
1905   Output Parameter:
1906 . dist - Flag for distribution
1907 
1908   Level: intermediate
1909 
1910 .seealso: DMDistributeSetDefault(), DMPlexDistribute()
1911 @*/
1912 PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
1913 {
1914   PetscFunctionBegin;
1915   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1916   PetscValidBoolPointer(dist, 2);
1917   PetscCall(PetscUseMethod(dm,"DMPlexDistributeGetDefault_C",(DM,PetscBool*),(dm,dist)));
1918   PetscFunctionReturn(0);
1919 }
1920 
1921 /*@C
1922   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1923   root process of the original's communicator.
1924 
1925   Collective on dm
1926 
1927   Input Parameter:
1928 . dm - the original DMPlex object
1929 
1930   Output Parameters:
1931 + sf - the PetscSF used for point distribution (optional)
1932 - gatherMesh - the gathered DM object, or NULL
1933 
1934   Level: intermediate
1935 
1936 .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1937 @*/
1938 PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1939 {
1940   MPI_Comm       comm;
1941   PetscMPIInt    size;
1942   PetscPartitioner oldPart, gatherPart;
1943 
1944   PetscFunctionBegin;
1945   PetscValidHeaderSpecific(dm,DM_CLASSID,1);
1946   PetscValidPointer(gatherMesh,3);
1947   *gatherMesh = NULL;
1948   if (sf) *sf = NULL;
1949   comm = PetscObjectComm((PetscObject)dm);
1950   PetscCallMPI(MPI_Comm_size(comm,&size));
1951   if (size == 1) PetscFunctionReturn(0);
1952   PetscCall(DMPlexGetPartitioner(dm,&oldPart));
1953   PetscCall(PetscObjectReference((PetscObject)oldPart));
1954   PetscCall(PetscPartitionerCreate(comm,&gatherPart));
1955   PetscCall(PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER));
1956   PetscCall(DMPlexSetPartitioner(dm,gatherPart));
1957   PetscCall(DMPlexDistribute(dm,0,sf,gatherMesh));
1958 
1959   PetscCall(DMPlexSetPartitioner(dm,oldPart));
1960   PetscCall(PetscPartitionerDestroy(&gatherPart));
1961   PetscCall(PetscPartitionerDestroy(&oldPart));
1962   PetscFunctionReturn(0);
1963 }
1964 
1965 /*@C
1966   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1967 
1968   Collective on dm
1969 
1970   Input Parameter:
1971 . dm - the original DMPlex object
1972 
1973   Output Parameters:
1974 + sf - the PetscSF used for point distribution (optional)
1975 - redundantMesh - the redundant DM object, or NULL
1976 
1977   Level: intermediate
1978 
1979 .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1980 @*/
1981 PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1982 {
1983   MPI_Comm       comm;
1984   PetscMPIInt    size, rank;
1985   PetscInt       pStart, pEnd, p;
1986   PetscInt       numPoints = -1;
1987   PetscSF        migrationSF, sfPoint, gatherSF;
1988   DM             gatherDM, dmCoord;
1989   PetscSFNode    *points;
1990 
1991   PetscFunctionBegin;
1992   PetscValidHeaderSpecific(dm,DM_CLASSID,1);
1993   PetscValidPointer(redundantMesh,3);
1994   *redundantMesh = NULL;
1995   comm = PetscObjectComm((PetscObject)dm);
1996   PetscCallMPI(MPI_Comm_size(comm,&size));
1997   if (size == 1) {
1998     PetscCall(PetscObjectReference((PetscObject) dm));
1999     *redundantMesh = dm;
2000     if (sf) *sf = NULL;
2001     PetscFunctionReturn(0);
2002   }
2003   PetscCall(DMPlexGetGatherDM(dm,&gatherSF,&gatherDM));
2004   if (!gatherDM) PetscFunctionReturn(0);
2005   PetscCallMPI(MPI_Comm_rank(comm,&rank));
2006   PetscCall(DMPlexGetChart(gatherDM,&pStart,&pEnd));
2007   numPoints = pEnd - pStart;
2008   PetscCallMPI(MPI_Bcast(&numPoints,1,MPIU_INT,0,comm));
2009   PetscCall(PetscMalloc1(numPoints,&points));
2010   PetscCall(PetscSFCreate(comm,&migrationSF));
2011   for (p = 0; p < numPoints; p++) {
2012     points[p].index = p;
2013     points[p].rank  = 0;
2014   }
2015   PetscCall(PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER));
2016   PetscCall(DMPlexCreate(comm, redundantMesh));
2017   PetscCall(PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh"));
2018   PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2019   PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2020   PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2021   PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2022   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2023   PetscCall(PetscSFDestroy(&sfPoint));
2024   if (sf) {
2025     PetscSF tsf;
2026 
2027     PetscCall(PetscSFCompose(gatherSF,migrationSF,&tsf));
2028     PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2029     PetscCall(PetscSFDestroy(&tsf));
2030   }
2031   PetscCall(PetscSFDestroy(&migrationSF));
2032   PetscCall(PetscSFDestroy(&gatherSF));
2033   PetscCall(DMDestroy(&gatherDM));
2034   PetscCall(DMCopyDisc(dm, *redundantMesh));
2035   PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, *redundantMesh));
2036   PetscFunctionReturn(0);
2037 }
2038 
2039 /*@
2040   DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.
2041 
2042   Collective
2043 
2044   Input Parameter:
2045 . dm      - The DM object
2046 
2047   Output Parameter:
2048 . distributed - Flag whether the DM is distributed
2049 
2050   Level: intermediate
2051 
2052   Notes:
2053   This currently finds out whether at least two ranks have any DAG points.
2054   This involves MPI_Allreduce() with one integer.
2055   The result is currently not stashed so every call to this routine involves this global communication.
2056 
2057 .seealso: DMPlexDistribute(), DMPlexGetOverlap(), DMPlexIsInterpolated()
2058 @*/
2059 PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2060 {
2061   PetscInt          pStart, pEnd, count;
2062   MPI_Comm          comm;
2063   PetscMPIInt       size;
2064 
2065   PetscFunctionBegin;
2066   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2067   PetscValidBoolPointer(distributed,2);
2068   PetscCall(PetscObjectGetComm((PetscObject)dm,&comm));
2069   PetscCallMPI(MPI_Comm_size(comm,&size));
2070   if (size == 1) { *distributed = PETSC_FALSE; PetscFunctionReturn(0); }
2071   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2072   count = (pEnd - pStart) > 0 ? 1 : 0;
2073   PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2074   *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2075   PetscFunctionReturn(0);
2076 }
2077