xref: /petsc/src/dm/impls/plex/plexdistribute.c (revision 64eb36536758ba5fde9eda0d4d7f84c5f43dcef0)
1 #include <petsc/private/dmpleximpl.h>    /*I      "petscdmplex.h"   I*/
2 #include <petsc/private/dmlabelimpl.h>   /*I      "petscdmlabel.h"  I*/
3 
4 /*@C
5   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
6 
7   Input Parameters:
8 + dm      - The DM object
9 . user    - The user callback, may be NULL (to clear the callback)
10 - ctx     - context for callback evaluation, may be NULL
11 
12   Level: advanced
13 
14   Notes:
15      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.
16 
17      Any setting here overrides other configuration of DMPlex adjacency determination.
18 
19 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser()
20 @*/
21 PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx)
22 {
23   DM_Plex *mesh = (DM_Plex *)dm->data;
24 
25   PetscFunctionBegin;
26   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
27   mesh->useradjacency = user;
28   mesh->useradjacencyctx = ctx;
29   PetscFunctionReturn(0);
30 }
31 
32 /*@C
33   DMPlexGetAdjacencyUser - get the user-defined adjacency callback
34 
35   Input Parameter:
36 . dm      - The DM object
37 
38   Output Parameters:
39 + user    - The user callback
40 - ctx     - context for callback evaluation
41 
42   Level: advanced
43 
44 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser()
45 @*/
46 PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx)
47 {
48   DM_Plex *mesh = (DM_Plex *)dm->data;
49 
50   PetscFunctionBegin;
51   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
52   if (user) *user = mesh->useradjacency;
53   if (ctx) *ctx = mesh->useradjacencyctx;
54   PetscFunctionReturn(0);
55 }
56 
57 /*@
58   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
59 
60   Input Parameters:
61 + dm      - The DM object
62 - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
63 
64   Level: intermediate
65 
66 .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
67 @*/
68 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
69 {
70   DM_Plex *mesh = (DM_Plex *) dm->data;
71 
72   PetscFunctionBegin;
73   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
74   mesh->useAnchors = useAnchors;
75   PetscFunctionReturn(0);
76 }
77 
78 /*@
79   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
80 
81   Input Parameter:
82 . dm      - The DM object
83 
84   Output Parameter:
85 . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
86 
87   Level: intermediate
88 
89 .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors()
90 @*/
91 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
92 {
93   DM_Plex *mesh = (DM_Plex *) dm->data;
94 
95   PetscFunctionBegin;
96   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
97   PetscValidBoolPointer(useAnchors, 2);
98   *useAnchors = mesh->useAnchors;
99   PetscFunctionReturn(0);
100 }
101 
102 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
103 {
104   const PetscInt *cone = NULL;
105   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
106 
107   PetscFunctionBeginHot;
108   PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
109   PetscCall(DMPlexGetCone(dm, p, &cone));
110   for (c = 0; c <= coneSize; ++c) {
111     const PetscInt  point   = !c ? p : cone[c-1];
112     const PetscInt *support = NULL;
113     PetscInt        supportSize, s, q;
114 
115     PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
116     PetscCall(DMPlexGetSupport(dm, point, &support));
117     for (s = 0; s < supportSize; ++s) {
118       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) {
119         if (support[s] == adj[q]) break;
120       }
121       PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
122     }
123   }
124   *adjSize = numAdj;
125   PetscFunctionReturn(0);
126 }
127 
128 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
129 {
130   const PetscInt *support = NULL;
131   PetscInt        numAdj   = 0, maxAdjSize = *adjSize, supportSize, s;
132 
133   PetscFunctionBeginHot;
134   PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
135   PetscCall(DMPlexGetSupport(dm, p, &support));
136   for (s = 0; s <= supportSize; ++s) {
137     const PetscInt  point = !s ? p : support[s-1];
138     const PetscInt *cone  = NULL;
139     PetscInt        coneSize, c, q;
140 
141     PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
142     PetscCall(DMPlexGetCone(dm, point, &cone));
143     for (c = 0; c < coneSize; ++c) {
144       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) {
145         if (cone[c] == adj[q]) break;
146       }
147       PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
148     }
149   }
150   *adjSize = numAdj;
151   PetscFunctionReturn(0);
152 }
153 
154 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
155 {
156   PetscInt      *star = NULL;
157   PetscInt       numAdj = 0, maxAdjSize = *adjSize, starSize, s;
158 
159   PetscFunctionBeginHot;
160   PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
161   for (s = 0; s < starSize*2; s += 2) {
162     const PetscInt *closure = NULL;
163     PetscInt        closureSize, c, q;
164 
165     PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure));
166     for (c = 0; c < closureSize*2; c += 2) {
167       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) {
168         if (closure[c] == adj[q]) break;
169       }
170       PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
171     }
172     PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure));
173   }
174   PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
175   *adjSize = numAdj;
176   PetscFunctionReturn(0);
177 }
178 
179 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
180 {
181   static PetscInt asiz = 0;
182   PetscInt maxAnchors = 1;
183   PetscInt aStart = -1, aEnd = -1;
184   PetscInt maxAdjSize;
185   PetscSection aSec = NULL;
186   IS aIS = NULL;
187   const PetscInt *anchors;
188   DM_Plex *mesh = (DM_Plex *)dm->data;
189 
190   PetscFunctionBeginHot;
191   if (useAnchors) {
192     PetscCall(DMPlexGetAnchors(dm,&aSec,&aIS));
193     if (aSec) {
194       PetscCall(PetscSectionGetMaxDof(aSec,&maxAnchors));
195       maxAnchors = PetscMax(1,maxAnchors);
196       PetscCall(PetscSectionGetChart(aSec,&aStart,&aEnd));
197       PetscCall(ISGetIndices(aIS,&anchors));
198     }
199   }
200   if (!*adj) {
201     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
202 
203     PetscCall(DMPlexGetChart(dm, &pStart,&pEnd));
204     PetscCall(DMPlexGetDepth(dm, &depth));
205     depth = PetscMax(depth, -depth);
206     PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
207     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1;
208     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1;
209     asiz  = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries);
210     asiz *= maxAnchors;
211     asiz  = PetscMin(asiz,pEnd-pStart);
212     PetscCall(PetscMalloc1(asiz,adj));
213   }
214   if (*adjSize < 0) *adjSize = asiz;
215   maxAdjSize = *adjSize;
216   if (mesh->useradjacency) {
217     PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
218   } else if (useTransitiveClosure) {
219     PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
220   } else if (useCone) {
221     PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
222   } else {
223     PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
224   }
225   if (useAnchors && aSec) {
226     PetscInt origSize = *adjSize;
227     PetscInt numAdj = origSize;
228     PetscInt i = 0, j;
229     PetscInt *orig = *adj;
230 
231     while (i < origSize) {
232       PetscInt p = orig[i];
233       PetscInt aDof = 0;
234 
235       if (p >= aStart && p < aEnd) {
236         PetscCall(PetscSectionGetDof(aSec,p,&aDof));
237       }
238       if (aDof) {
239         PetscInt aOff;
240         PetscInt s, q;
241 
242         for (j = i + 1; j < numAdj; j++) {
243           orig[j - 1] = orig[j];
244         }
245         origSize--;
246         numAdj--;
247         PetscCall(PetscSectionGetOffset(aSec,p,&aOff));
248         for (s = 0; s < aDof; ++s) {
249           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) {
250             if (anchors[aOff+s] == orig[q]) break;
251           }
252           PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize);
253         }
254       }
255       else {
256         i++;
257       }
258     }
259     *adjSize = numAdj;
260     PetscCall(ISRestoreIndices(aIS,&anchors));
261   }
262   PetscFunctionReturn(0);
263 }
264 
265 /*@
266   DMPlexGetAdjacency - Return all points adjacent to the given point
267 
268   Input Parameters:
269 + dm - The DM object
270 - p  - The point
271 
272   Input/Output Parameters:
273 + adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE;
274             on output the number of adjacent points
275 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize;
276         on output contains the adjacent points
277 
278   Level: advanced
279 
280   Notes:
281     The user must PetscFree the adj array if it was not passed in.
282 
283 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator()
284 @*/
285 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
286 {
287   PetscBool      useCone, useClosure, useAnchors;
288 
289   PetscFunctionBeginHot;
290   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
291   PetscValidIntPointer(adjSize,3);
292   PetscValidPointer(adj,4);
293   PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
294   PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
295   PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
296   PetscFunctionReturn(0);
297 }
298 
299 /*@
300   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
301 
302   Collective on dm
303 
304   Input Parameters:
305 + dm      - The DM
306 . sfPoint - The PetscSF which encodes point connectivity
307 . rootRankSection -
308 . rootRanks -
309 . leftRankSection -
310 - leafRanks -
311 
312   Output Parameters:
313 + processRanks - A list of process neighbors, or NULL
314 - sfProcess    - An SF encoding the two-sided process connectivity, or NULL
315 
316   Level: developer
317 
318 .seealso: PetscSFCreate(), DMPlexCreateProcessSF()
319 @*/
320 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
321 {
322   const PetscSFNode *remotePoints;
323   PetscInt          *localPointsNew;
324   PetscSFNode       *remotePointsNew;
325   const PetscInt    *nranks;
326   PetscInt          *ranksNew;
327   PetscBT            neighbors;
328   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
329   PetscMPIInt        size, proc, rank;
330 
331   PetscFunctionBegin;
332   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
333   PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2);
334   if (processRanks) {PetscValidPointer(processRanks, 7);}
335   if (sfProcess)    {PetscValidPointer(sfProcess, 8);}
336   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size));
337   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank));
338   PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
339   PetscCall(PetscBTCreate(size, &neighbors));
340   PetscCall(PetscBTMemzero(size, neighbors));
341   /* Compute root-to-leaf process connectivity */
342   PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
343   PetscCall(ISGetIndices(rootRanks, &nranks));
344   for (p = pStart; p < pEnd; ++p) {
345     PetscInt ndof, noff, n;
346 
347     PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
348     PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
349     for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff+n]));
350   }
351   PetscCall(ISRestoreIndices(rootRanks, &nranks));
352   /* Compute leaf-to-neighbor process connectivity */
353   PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
354   PetscCall(ISGetIndices(leafRanks, &nranks));
355   for (p = pStart; p < pEnd; ++p) {
356     PetscInt ndof, noff, n;
357 
358     PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
359     PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
360     for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff+n]));
361   }
362   PetscCall(ISRestoreIndices(leafRanks, &nranks));
363   /* Compute leaf-to-root process connectivity */
364   for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);}
365   /* Calculate edges */
366   PetscBTClear(neighbors, rank);
367   for (proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;}
368   PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
369   PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
370   PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
371   for (proc = 0, n = 0; proc < size; ++proc) {
372     if (PetscBTLookup(neighbors, proc)) {
373       ranksNew[n]              = proc;
374       localPointsNew[n]        = proc;
375       remotePointsNew[n].index = rank;
376       remotePointsNew[n].rank  = proc;
377       ++n;
378     }
379   }
380   PetscCall(PetscBTDestroy(&neighbors));
381   if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
382   else              PetscCall(PetscFree(ranksNew));
383   if (sfProcess) {
384     PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
385     PetscCall(PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF"));
386     PetscCall(PetscSFSetFromOptions(*sfProcess));
387     PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
388   }
389   PetscFunctionReturn(0);
390 }
391 
392 /*@
393   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
394 
395   Collective on dm
396 
397   Input Parameter:
398 . dm - The DM
399 
400   Output Parameters:
401 + rootSection - The number of leaves for a given root point
402 . rootrank    - The rank of each edge into the root point
403 . leafSection - The number of processes sharing a given leaf point
404 - leafrank    - The rank of each process sharing a leaf point
405 
406   Level: developer
407 
408 .seealso: DMPlexCreateOverlapLabel(), DMPlexDistribute(), DMPlexDistributeOverlap()
409 @*/
410 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
411 {
412   MPI_Comm        comm;
413   PetscSF         sfPoint;
414   const PetscInt *rootdegree;
415   PetscInt       *myrank, *remoterank;
416   PetscInt        pStart, pEnd, p, nedges;
417   PetscMPIInt     rank;
418 
419   PetscFunctionBegin;
420   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
421   PetscCallMPI(MPI_Comm_rank(comm, &rank));
422   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
423   PetscCall(DMGetPointSF(dm, &sfPoint));
424   /* Compute number of leaves for each root */
425   PetscCall(PetscObjectSetName((PetscObject) rootSection, "Root Section"));
426   PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
427   PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
428   PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
429   for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]));
430   PetscCall(PetscSectionSetUp(rootSection));
431   /* Gather rank of each leaf to root */
432   PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
433   PetscCall(PetscMalloc1(pEnd-pStart, &myrank));
434   PetscCall(PetscMalloc1(nedges,  &remoterank));
435   for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank;
436   PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
437   PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
438   PetscCall(PetscFree(myrank));
439   PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
440   /* Distribute remote ranks to leaves */
441   PetscCall(PetscObjectSetName((PetscObject) leafSection, "Leaf Section"));
442   PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
443   PetscFunctionReturn(0);
444 }
445 
446 /*@C
447   DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF.
448 
449   Collective on dm
450 
451   Input Parameters:
452 + dm          - The DM
453 . levels      - Number of overlap levels
454 . rootSection - The number of leaves for a given root point
455 . rootrank    - The rank of each edge into the root point
456 . leafSection - The number of processes sharing a given leaf point
457 - leafrank    - The rank of each process sharing a leaf point
458 
459   Output Parameter:
460 . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings
461 
462   Level: developer
463 
464 .seealso: DMPlexDistributeOwnership(), DMPlexDistribute()
465 @*/
466 PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
467 {
468   MPI_Comm           comm;
469   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
470   PetscSF            sfPoint;
471   const PetscSFNode *remote;
472   const PetscInt    *local;
473   const PetscInt    *nrank, *rrank;
474   PetscInt          *adj = NULL;
475   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
476   PetscMPIInt        rank, size;
477   PetscBool          flg;
478 
479   PetscFunctionBegin;
480   *ovLabel = NULL;
481   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
482   PetscCallMPI(MPI_Comm_size(comm, &size));
483   PetscCallMPI(MPI_Comm_rank(comm, &rank));
484   if (size == 1) PetscFunctionReturn(0);
485   PetscCall(DMGetPointSF(dm, &sfPoint));
486   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
487   if (!levels) {
488     /* Add owned points */
489     PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
490     for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
491     PetscFunctionReturn(0);
492   }
493   PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
494   PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
495   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
496   /* Handle leaves: shared with the root point */
497   for (l = 0; l < nleaves; ++l) {
498     PetscInt adjSize = PETSC_DETERMINE, a;
499 
500     PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
501     for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
502   }
503   PetscCall(ISGetIndices(rootrank, &rrank));
504   PetscCall(ISGetIndices(leafrank, &nrank));
505   /* Handle roots */
506   for (p = pStart; p < pEnd; ++p) {
507     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
508 
509     if ((p >= sStart) && (p < sEnd)) {
510       /* Some leaves share a root with other leaves on different processes */
511       PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
512       if (neighbors) {
513         PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
514         PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
515         for (n = 0; n < neighbors; ++n) {
516           const PetscInt remoteRank = nrank[noff+n];
517 
518           if (remoteRank == rank) continue;
519           for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
520         }
521       }
522     }
523     /* Roots are shared with leaves */
524     PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
525     if (!neighbors) continue;
526     PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
527     PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
528     for (n = 0; n < neighbors; ++n) {
529       const PetscInt remoteRank = rrank[noff+n];
530 
531       if (remoteRank == rank) continue;
532       for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
533     }
534   }
535   PetscCall(PetscFree(adj));
536   PetscCall(ISRestoreIndices(rootrank, &rrank));
537   PetscCall(ISRestoreIndices(leafrank, &nrank));
538   /* Add additional overlap levels */
539   for (l = 1; l < levels; l++) {
540     /* Propagate point donations over SF to capture remote connections */
541     PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
542     /* Add next level of point donations to the label */
543     PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
544   }
545   /* We require the closure in the overlap */
546   PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
547   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg));
548   if (flg) {
549     PetscViewer viewer;
550     PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
551     PetscCall(DMLabelView(ovAdjByRank, viewer));
552   }
553   /* Invert sender to receiver label */
554   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
555   PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
556   /* Add owned points, except for shared local points */
557   for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
558   for (l = 0; l < nleaves; ++l) {
559     PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
560     PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
561   }
562   /* Clean up */
563   PetscCall(DMLabelDestroy(&ovAdjByRank));
564   PetscFunctionReturn(0);
565 }
566 
567 /*@C
568   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF
569 
570   Collective on dm
571 
572   Input Parameters:
573 + dm          - The DM
574 - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes
575 
576   Output Parameter:
577 . migrationSF - An SF that maps original points in old locations to points in new locations
578 
579   Level: developer
580 
581 .seealso: DMPlexCreateOverlapLabel(), DMPlexDistributeOverlap(), DMPlexDistribute()
582 @*/
583 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
584 {
585   MPI_Comm           comm;
586   PetscMPIInt        rank, size;
587   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
588   PetscInt          *pointDepths, *remoteDepths, *ilocal;
589   PetscInt          *depthRecv, *depthShift, *depthIdx;
590   PetscSFNode       *iremote;
591   PetscSF            pointSF;
592   const PetscInt    *sharedLocal;
593   const PetscSFNode *overlapRemote, *sharedRemote;
594 
595   PetscFunctionBegin;
596   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
597   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
598   PetscCallMPI(MPI_Comm_rank(comm, &rank));
599   PetscCallMPI(MPI_Comm_size(comm, &size));
600   PetscCall(DMGetDimension(dm, &dim));
601 
602   /* Before building the migration SF we need to know the new stratum offsets */
603   PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
604   PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
605   for (d=0; d<dim+1; d++) {
606     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
607     for (p=pStart; p<pEnd; p++) pointDepths[p] = d;
608   }
609   for (p=0; p<nleaves; p++) remoteDepths[p] = -1;
610   PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE));
611   PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE));
612 
613   /* Count received points in each stratum and compute the internal strata shift */
614   PetscCall(PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx));
615   for (d=0; d<dim+1; d++) depthRecv[d]=0;
616   for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++;
617   depthShift[dim] = 0;
618   for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim];
619   for (d=1; d<dim; d++) depthShift[d] += depthRecv[0];
620   for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1];
621   for (d=0; d<dim+1; d++) {
622     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
623     depthIdx[d] = pStart + depthShift[d];
624   }
625 
626   /* Form the overlap SF build an SF that describes the full overlap migration SF */
627   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
628   newLeaves = pEnd - pStart + nleaves;
629   PetscCall(PetscMalloc1(newLeaves, &ilocal));
630   PetscCall(PetscMalloc1(newLeaves, &iremote));
631   /* First map local points to themselves */
632   for (d=0; d<dim+1; d++) {
633     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
634     for (p=pStart; p<pEnd; p++) {
635       point = p + depthShift[d];
636       ilocal[point] = point;
637       iremote[point].index = p;
638       iremote[point].rank = rank;
639       depthIdx[d]++;
640     }
641   }
642 
643   /* Add in the remote roots for currently shared points */
644   PetscCall(DMGetPointSF(dm, &pointSF));
645   PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
646   for (d=0; d<dim+1; d++) {
647     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
648     for (p=0; p<numSharedPoints; p++) {
649       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
650         point = sharedLocal[p] + depthShift[d];
651         iremote[point].index = sharedRemote[p].index;
652         iremote[point].rank = sharedRemote[p].rank;
653       }
654     }
655   }
656 
657   /* Now add the incoming overlap points */
658   for (p=0; p<nleaves; p++) {
659     point = depthIdx[remoteDepths[p]];
660     ilocal[point] = point;
661     iremote[point].index = overlapRemote[p].index;
662     iremote[point].rank = overlapRemote[p].rank;
663     depthIdx[remoteDepths[p]]++;
664   }
665   PetscCall(PetscFree2(pointDepths,remoteDepths));
666 
667   PetscCall(PetscSFCreate(comm, migrationSF));
668   PetscCall(PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF"));
669   PetscCall(PetscSFSetFromOptions(*migrationSF));
670   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
671   PetscCall(PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
672 
673   PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
674   PetscFunctionReturn(0);
675 }
676 
677 /*@
678   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
679 
680   Input Parameters:
681 + dm          - The DM
682 - sf          - A star forest with non-ordered leaves, usually defining a DM point migration
683 
684   Output Parameter:
685 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
686 
687   Note:
688   This lexicographically sorts by (depth, cellType)
689 
690   Level: developer
691 
692 .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap()
693 @*/
694 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
695 {
696   MPI_Comm           comm;
697   PetscMPIInt        rank, size;
698   PetscInt           d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
699   PetscSFNode       *pointDepths, *remoteDepths;
700   PetscInt          *ilocal;
701   PetscInt          *depthRecv, *depthShift, *depthIdx;
702   PetscInt          *ctRecv,    *ctShift,    *ctIdx;
703   const PetscSFNode *iremote;
704 
705   PetscFunctionBegin;
706   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
707   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
708   PetscCallMPI(MPI_Comm_rank(comm, &rank));
709   PetscCallMPI(MPI_Comm_size(comm, &size));
710   PetscCall(DMPlexGetDepth(dm, &ldepth));
711   PetscCall(DMGetDimension(dm, &dim));
712   PetscCall(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
713   PetscCheck(!(ldepth >= 0) || !(depth != ldepth),PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth);
714   PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0));
715 
716   /* Before building the migration SF we need to know the new stratum offsets */
717   PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
718   PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
719   for (d = 0; d < depth+1; ++d) {
720     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
721     for (p = pStart; p < pEnd; ++p) {
722       DMPolytopeType ct;
723 
724       PetscCall(DMPlexGetCellType(dm, p, &ct));
725       pointDepths[p].index = d;
726       pointDepths[p].rank  = ct;
727     }
728   }
729   for (p = 0; p < nleaves; ++p) {remoteDepths[p].index = -1; remoteDepths[p].rank = -1;}
730   PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE));
731   PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE));
732   /* Count received points in each stratum and compute the internal strata shift */
733   PetscCall(PetscCalloc6(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
734   for (p = 0; p < nleaves; ++p) {
735     if (remoteDepths[p].rank < 0) {
736       ++depthRecv[remoteDepths[p].index];
737     } else {
738       ++ctRecv[remoteDepths[p].rank];
739     }
740   }
741   {
742     PetscInt depths[4], dims[4], shift = 0, i, c;
743 
744     /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
745          Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
746      */
747     depths[0] = depth; depths[1] = 0; depths[2] = depth-1; depths[3] = 1;
748     dims[0]   = dim;   dims[1]   = 0; dims[2]   = dim-1;   dims[3]   = 1;
749     for (i = 0; i <= depth; ++i) {
750       const PetscInt dep = depths[i];
751       const PetscInt dim = dims[i];
752 
753       for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
754         if (DMPolytopeTypeGetDim((DMPolytopeType) c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
755         ctShift[c] = shift;
756         shift     += ctRecv[c];
757       }
758       depthShift[dep] = shift;
759       shift          += depthRecv[dep];
760     }
761     for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
762       const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType) c);
763 
764       if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
765         ctShift[c] = shift;
766         shift     += ctRecv[c];
767       }
768     }
769   }
770   /* Derive a new local permutation based on stratified indices */
771   PetscCall(PetscMalloc1(nleaves, &ilocal));
772   for (p = 0; p < nleaves; ++p) {
773     const PetscInt       dep = remoteDepths[p].index;
774     const DMPolytopeType ct  = (DMPolytopeType) remoteDepths[p].rank;
775 
776     if ((PetscInt) ct < 0) {
777       ilocal[p] = depthShift[dep] + depthIdx[dep];
778       ++depthIdx[dep];
779     } else {
780       ilocal[p] = ctShift[ct] + ctIdx[ct];
781       ++ctIdx[ct];
782     }
783   }
784   PetscCall(PetscSFCreate(comm, migrationSF));
785   PetscCall(PetscObjectSetName((PetscObject) *migrationSF, "Migration SF"));
786   PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode*)iremote, PETSC_COPY_VALUES));
787   PetscCall(PetscFree2(pointDepths,remoteDepths));
788   PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
789   PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0));
790   PetscFunctionReturn(0);
791 }
792 
793 /*@
794   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
795 
796   Collective on dm
797 
798   Input Parameters:
799 + dm - The DMPlex object
800 . pointSF - The PetscSF describing the communication pattern
801 . originalSection - The PetscSection for existing data layout
802 - originalVec - The existing data in a local vector
803 
804   Output Parameters:
805 + newSection - The PetscSF describing the new data layout
806 - newVec - The new data in a local vector
807 
808   Level: developer
809 
810 .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData()
811 @*/
812 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
813 {
814   PetscSF        fieldSF;
815   PetscInt      *remoteOffsets, fieldSize;
816   PetscScalar   *originalValues, *newValues;
817 
818   PetscFunctionBegin;
819   PetscCall(PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0));
820   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
821 
822   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
823   PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
824   PetscCall(VecSetType(newVec,dm->vectype));
825 
826   PetscCall(VecGetArray(originalVec, &originalValues));
827   PetscCall(VecGetArray(newVec, &newValues));
828   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
829   PetscCall(PetscFree(remoteOffsets));
830   PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE));
831   PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE));
832   PetscCall(PetscSFDestroy(&fieldSF));
833   PetscCall(VecRestoreArray(newVec, &newValues));
834   PetscCall(VecRestoreArray(originalVec, &originalValues));
835   PetscCall(PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0));
836   PetscFunctionReturn(0);
837 }
838 
839 /*@
840   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
841 
842   Collective on dm
843 
844   Input Parameters:
845 + dm - The DMPlex object
846 . pointSF - The PetscSF describing the communication pattern
847 . originalSection - The PetscSection for existing data layout
848 - originalIS - The existing data
849 
850   Output Parameters:
851 + newSection - The PetscSF describing the new data layout
852 - newIS - The new data
853 
854   Level: developer
855 
856 .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData()
857 @*/
858 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
859 {
860   PetscSF         fieldSF;
861   PetscInt       *newValues, *remoteOffsets, fieldSize;
862   const PetscInt *originalValues;
863 
864   PetscFunctionBegin;
865   PetscCall(PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0));
866   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
867 
868   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
869   PetscCall(PetscMalloc1(fieldSize, &newValues));
870 
871   PetscCall(ISGetIndices(originalIS, &originalValues));
872   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
873   PetscCall(PetscFree(remoteOffsets));
874   PetscCall(PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE));
875   PetscCall(PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE));
876   PetscCall(PetscSFDestroy(&fieldSF));
877   PetscCall(ISRestoreIndices(originalIS, &originalValues));
878   PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
879   PetscCall(PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0));
880   PetscFunctionReturn(0);
881 }
882 
883 /*@
884   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
885 
886   Collective on dm
887 
888   Input Parameters:
889 + dm - The DMPlex object
890 . pointSF - The PetscSF describing the communication pattern
891 . originalSection - The PetscSection for existing data layout
892 . datatype - The type of data
893 - originalData - The existing data
894 
895   Output Parameters:
896 + newSection - The PetscSection describing the new data layout
897 - newData - The new data
898 
899   Level: developer
900 
901 .seealso: DMPlexDistribute(), DMPlexDistributeField()
902 @*/
903 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
904 {
905   PetscSF        fieldSF;
906   PetscInt      *remoteOffsets, fieldSize;
907   PetscMPIInt    dataSize;
908 
909   PetscFunctionBegin;
910   PetscCall(PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0));
911   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
912 
913   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
914   PetscCallMPI(MPI_Type_size(datatype, &dataSize));
915   PetscCall(PetscMalloc(fieldSize * dataSize, newData));
916 
917   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
918   PetscCall(PetscFree(remoteOffsets));
919   PetscCall(PetscSFBcastBegin(fieldSF, datatype, originalData, *newData,MPI_REPLACE));
920   PetscCall(PetscSFBcastEnd(fieldSF, datatype, originalData, *newData,MPI_REPLACE));
921   PetscCall(PetscSFDestroy(&fieldSF));
922   PetscCall(PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0));
923   PetscFunctionReturn(0);
924 }
925 
926 static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
927 {
928   DM_Plex               *pmesh = (DM_Plex*) (dmParallel)->data;
929   MPI_Comm               comm;
930   PetscSF                coneSF;
931   PetscSection           originalConeSection, newConeSection;
932   PetscInt              *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
933   PetscBool              flg;
934 
935   PetscFunctionBegin;
936   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
937   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5);
938   PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0));
939   /* Distribute cone section */
940   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
941   PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
942   PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
943   PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
944   PetscCall(DMSetUp(dmParallel));
945   /* Communicate and renumber cones */
946   PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
947   PetscCall(PetscFree(remoteOffsets));
948   PetscCall(DMPlexGetCones(dm, &cones));
949   if (original) {
950     PetscInt numCones;
951 
952     PetscCall(PetscSectionGetStorageSize(originalConeSection,&numCones));
953     PetscCall(PetscMalloc1(numCones,&globCones));
954     PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
955   } else {
956     globCones = cones;
957   }
958   PetscCall(DMPlexGetCones(dmParallel, &newCones));
959   PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE));
960   PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE));
961   if (original) {
962     PetscCall(PetscFree(globCones));
963   }
964   PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
965   PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
966   if (PetscDefined(USE_DEBUG)) {
967     PetscInt  p;
968     PetscBool valid = PETSC_TRUE;
969     for (p = 0; p < newConesSize; ++p) {
970       if (newCones[p] < 0) {valid = PETSC_FALSE; PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p));}
971     }
972     PetscCheck(valid,PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
973   }
974   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg));
975   if (flg) {
976     PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
977     PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
978     PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
979     PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
980     PetscCall(PetscSFView(coneSF, NULL));
981   }
982   PetscCall(DMPlexGetConeOrientations(dm, &cones));
983   PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
984   PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE));
985   PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE));
986   PetscCall(PetscSFDestroy(&coneSF));
987   PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0));
988   /* Create supports and stratify DMPlex */
989   {
990     PetscInt pStart, pEnd;
991 
992     PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
993     PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
994   }
995   PetscCall(DMPlexSymmetrize(dmParallel));
996   PetscCall(DMPlexStratify(dmParallel));
997   {
998     PetscBool useCone, useClosure, useAnchors;
999 
1000     PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1001     PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1002     PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1003     PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1004   }
1005   PetscFunctionReturn(0);
1006 }
1007 
1008 static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1009 {
1010   MPI_Comm         comm;
1011   DM               cdm, cdmParallel;
1012   PetscSection     originalCoordSection, newCoordSection;
1013   Vec              originalCoordinates, newCoordinates;
1014   PetscInt         bs;
1015   PetscBool        isper;
1016   const char      *name;
1017   const PetscReal *maxCell, *L;
1018   const DMBoundaryType *bd;
1019 
1020   PetscFunctionBegin;
1021   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1022   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1023 
1024   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1025   PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1026   PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1027   PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1028   if (originalCoordinates) {
1029     PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1030     PetscCall(PetscObjectGetName((PetscObject) originalCoordinates, &name));
1031     PetscCall(PetscObjectSetName((PetscObject) newCoordinates, name));
1032 
1033     PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1034     PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1035     PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1036     PetscCall(VecSetBlockSize(newCoordinates, bs));
1037     PetscCall(VecDestroy(&newCoordinates));
1038   }
1039   PetscCall(DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd));
1040   PetscCall(DMSetPeriodicity(dmParallel, isper, maxCell, L, bd));
1041   PetscCall(DMGetCoordinateDM(dm, &cdm));
1042   PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1043   PetscCall(DMCopyDisc(cdm, cdmParallel));
1044   PetscFunctionReturn(0);
1045 }
1046 
1047 static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1048 {
1049   DM_Plex         *mesh = (DM_Plex*) dm->data;
1050   MPI_Comm         comm;
1051   DMLabel          depthLabel;
1052   PetscMPIInt      rank;
1053   PetscInt         depth, d, numLabels, numLocalLabels, l;
1054   PetscBool        hasLabels = PETSC_FALSE, lsendDepth, sendDepth;
1055   PetscObjectState depthState = -1;
1056 
1057   PetscFunctionBegin;
1058   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1059   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1060 
1061   PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0));
1062   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1063   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1064 
1065   /* If the user has changed the depth label, communicate it instead */
1066   PetscCall(DMPlexGetDepth(dm, &depth));
1067   PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1068   if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject) depthLabel, &depthState));
1069   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1070   PetscCall(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm));
1071   if (sendDepth) {
1072     PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1073     PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1074   }
1075   /* Everyone must have either the same number of labels, or none */
1076   PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1077   numLabels = numLocalLabels;
1078   PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1079   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1080   for (l = 0; l < numLabels; ++l) {
1081     DMLabel     label = NULL, labelNew = NULL;
1082     PetscBool   isDepth, lisOutput = PETSC_TRUE, isOutput;
1083     const char *name = NULL;
1084 
1085     if (hasLabels) {
1086       PetscCall(DMGetLabelByNum(dm, l, &label));
1087       /* Skip "depth" because it is recreated */
1088       PetscCall(PetscObjectGetName((PetscObject) label, &name));
1089       PetscCall(PetscStrcmp(name, "depth", &isDepth));
1090     }
1091     PetscCallMPI(MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm));
1092     if (isDepth && !sendDepth) continue;
1093     PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1094     if (isDepth) {
1095       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1096       PetscInt gdepth;
1097 
1098       PetscCall(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1099       PetscCheck(!(depth >= 0) || !(gdepth != depth),PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth);
1100       for (d = 0; d <= gdepth; ++d) {
1101         PetscBool has;
1102 
1103         PetscCall(DMLabelHasStratum(labelNew, d, &has));
1104         if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1105       }
1106     }
1107     PetscCall(DMAddLabel(dmParallel, labelNew));
1108     /* Put the output flag in the new label */
1109     if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1110     PetscCall(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm));
1111     PetscCall(PetscObjectGetName((PetscObject) labelNew, &name));
1112     PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1113     PetscCall(DMLabelDestroy(&labelNew));
1114   }
1115   PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0));
1116   PetscFunctionReturn(0);
1117 }
1118 
1119 static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1120 {
1121   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1122   DM_Plex        *pmesh = (DM_Plex*) (dmParallel)->data;
1123   MPI_Comm        comm;
1124   DM              refTree;
1125   PetscSection    origParentSection, newParentSection;
1126   PetscInt        *origParents, *origChildIDs;
1127   PetscBool       flg;
1128 
1129   PetscFunctionBegin;
1130   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1131   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5);
1132   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1133 
1134   /* Set up tree */
1135   PetscCall(DMPlexGetReferenceTree(dm,&refTree));
1136   PetscCall(DMPlexSetReferenceTree(dmParallel,refTree));
1137   PetscCall(DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL));
1138   if (origParentSection) {
1139     PetscInt        pStart, pEnd;
1140     PetscInt        *newParents, *newChildIDs, *globParents;
1141     PetscInt        *remoteOffsetsParents, newParentSize;
1142     PetscSF         parentSF;
1143 
1144     PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1145     PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection));
1146     PetscCall(PetscSectionSetChart(newParentSection,pStart,pEnd));
1147     PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1148     PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1149     PetscCall(PetscFree(remoteOffsetsParents));
1150     PetscCall(PetscSectionGetStorageSize(newParentSection,&newParentSize));
1151     PetscCall(PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs));
1152     if (original) {
1153       PetscInt numParents;
1154 
1155       PetscCall(PetscSectionGetStorageSize(origParentSection,&numParents));
1156       PetscCall(PetscMalloc1(numParents,&globParents));
1157       PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1158     }
1159     else {
1160       globParents = origParents;
1161     }
1162     PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE));
1163     PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE));
1164     if (original) {
1165       PetscCall(PetscFree(globParents));
1166     }
1167     PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE));
1168     PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE));
1169     PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1170     if (PetscDefined(USE_DEBUG)) {
1171       PetscInt  p;
1172       PetscBool valid = PETSC_TRUE;
1173       for (p = 0; p < newParentSize; ++p) {
1174         if (newParents[p] < 0) {valid = PETSC_FALSE; PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p));}
1175       }
1176       PetscCheck(valid,PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1177     }
1178     PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg));
1179     if (flg) {
1180       PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1181       PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1182       PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1183       PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1184       PetscCall(PetscSFView(parentSF, NULL));
1185     }
1186     PetscCall(DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs));
1187     PetscCall(PetscSectionDestroy(&newParentSection));
1188     PetscCall(PetscFree2(newParents,newChildIDs));
1189     PetscCall(PetscSFDestroy(&parentSF));
1190   }
1191   pmesh->useAnchors = mesh->useAnchors;
1192   PetscFunctionReturn(0);
1193 }
1194 
1195 PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1196 {
1197   PetscMPIInt            rank, size;
1198   MPI_Comm               comm;
1199 
1200   PetscFunctionBegin;
1201   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1202   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1203 
1204   /* Create point SF for parallel mesh */
1205   PetscCall(PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0));
1206   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1207   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1208   PetscCallMPI(MPI_Comm_size(comm, &size));
1209   {
1210     const PetscInt *leaves;
1211     PetscSFNode    *remotePoints, *rowners, *lowners;
1212     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1213     PetscInt        pStart, pEnd;
1214 
1215     PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1216     PetscCall(PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL));
1217     PetscCall(PetscMalloc2(numRoots,&rowners,numLeaves,&lowners));
1218     for (p=0; p<numRoots; p++) {
1219       rowners[p].rank  = -1;
1220       rowners[p].index = -1;
1221     }
1222     PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1223     PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1224     for (p = 0; p < numLeaves; ++p) {
1225       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1226         lowners[p].rank  = rank;
1227         lowners[p].index = leaves ? leaves[p] : p;
1228       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1229         lowners[p].rank  = -2;
1230         lowners[p].index = -2;
1231       }
1232     }
1233     for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1234       rowners[p].rank  = -3;
1235       rowners[p].index = -3;
1236     }
1237     PetscCall(PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC));
1238     PetscCall(PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC));
1239     PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1240     PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE));
1241     for (p = 0; p < numLeaves; ++p) {
1242       PetscCheckFalse(lowners[p].rank < 0 || lowners[p].index < 0,PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed");
1243       if (lowners[p].rank != rank) ++numGhostPoints;
1244     }
1245     PetscCall(PetscMalloc1(numGhostPoints, &ghostPoints));
1246     PetscCall(PetscMalloc1(numGhostPoints, &remotePoints));
1247     for (p = 0, gp = 0; p < numLeaves; ++p) {
1248       if (lowners[p].rank != rank) {
1249         ghostPoints[gp]        = leaves ? leaves[p] : p;
1250         remotePoints[gp].rank  = lowners[p].rank;
1251         remotePoints[gp].index = lowners[p].index;
1252         ++gp;
1253       }
1254     }
1255     PetscCall(PetscFree2(rowners,lowners));
1256     PetscCall(PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER));
1257     PetscCall(PetscSFSetFromOptions((dmParallel)->sf));
1258   }
1259   {
1260     PetscBool useCone, useClosure, useAnchors;
1261 
1262     PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1263     PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1264     PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1265     PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1266   }
1267   PetscCall(PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0));
1268   PetscFunctionReturn(0);
1269 }
1270 
1271 /*@
1272   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?
1273 
1274   Input Parameters:
1275 + dm - The DMPlex object
1276 - flg - Balance the partition?
1277 
1278   Level: intermediate
1279 
1280 .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance()
1281 @*/
1282 PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1283 {
1284   DM_Plex *mesh = (DM_Plex *)dm->data;
1285 
1286   PetscFunctionBegin;
1287   mesh->partitionBalance = flg;
1288   PetscFunctionReturn(0);
1289 }
1290 
1291 /*@
1292   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?
1293 
1294   Input Parameter:
1295 . dm - The DMPlex object
1296 
1297   Output Parameter:
1298 . flg - Balance the partition?
1299 
1300   Level: intermediate
1301 
1302 .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance()
1303 @*/
1304 PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1305 {
1306   DM_Plex *mesh = (DM_Plex *)dm->data;
1307 
1308   PetscFunctionBegin;
1309   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1310   PetscValidBoolPointer(flg, 2);
1311   *flg = mesh->partitionBalance;
1312   PetscFunctionReturn(0);
1313 }
1314 
1315 typedef struct {
1316   PetscInt vote, rank, index;
1317 } Petsc3Int;
1318 
1319 /* MaxLoc, but carry a third piece of information around */
1320 static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1321 {
1322   Petsc3Int *a = (Petsc3Int *)inout_;
1323   Petsc3Int *b = (Petsc3Int *)in_;
1324   PetscInt i, len = *len_;
1325   for (i = 0; i < len; i++) {
1326     if (a[i].vote < b[i].vote) {
1327       a[i].vote = b[i].vote;
1328       a[i].rank = b[i].rank;
1329       a[i].index = b[i].index;
1330     } else if (a[i].vote <= b[i].vote) {
1331       if (a[i].rank >= b[i].rank) {
1332         a[i].rank = b[i].rank;
1333         a[i].index = b[i].index;
1334       }
1335     }
1336   }
1337 }
1338 
1339 /*@C
1340   DMPlexCreatePointSF - Build a point SF from an SF describing a point migration
1341 
1342   Input Parameters:
1343 + dm          - The source DMPlex object
1344 . migrationSF - The star forest that describes the parallel point remapping
1345 - ownership   - Flag causing a vote to determine point ownership
1346 
1347   Output Parameter:
1348 . pointSF     - The star forest describing the point overlap in the remapped DM
1349 
1350   Notes:
1351   Output pointSF is guaranteed to have the array of local indices (leaves) sorted.
1352 
1353   Level: developer
1354 
1355 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1356 @*/
1357 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1358 {
1359   PetscMPIInt        rank, size;
1360   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1361   PetscInt          *pointLocal;
1362   const PetscInt    *leaves;
1363   const PetscSFNode *roots;
1364   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1365   Vec                shifts;
1366   const PetscInt     numShifts = 13759;
1367   const PetscScalar *shift = NULL;
1368   const PetscBool    shiftDebug = PETSC_FALSE;
1369   PetscBool          balance;
1370 
1371   PetscFunctionBegin;
1372   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1373   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank));
1374   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size));
1375   PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0));
1376 
1377   PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1378   PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1379   PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1380   if (ownership) {
1381     MPI_Op       op;
1382     MPI_Datatype datatype;
1383     Petsc3Int   *rootVote = NULL, *leafVote = NULL;
1384     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1385     if (balance) {
1386       PetscRandom r;
1387 
1388       PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1389       PetscCall(PetscRandomSetInterval(r, 0, 2467*size));
1390       PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1391       PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1392       PetscCall(VecSetType(shifts, VECSTANDARD));
1393       PetscCall(VecSetRandom(shifts, r));
1394       PetscCall(PetscRandomDestroy(&r));
1395       PetscCall(VecGetArrayRead(shifts, &shift));
1396     }
1397 
1398     PetscCall(PetscMalloc1(nroots, &rootVote));
1399     PetscCall(PetscMalloc1(nleaves, &leafVote));
1400     /* Point ownership vote: Process with highest rank owns shared points */
1401     for (p = 0; p < nleaves; ++p) {
1402       if (shiftDebug) {
1403         PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size));
1404       }
1405       /* Either put in a bid or we know we own it */
1406       leafVote[p].vote  = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size;
1407       leafVote[p].rank = rank;
1408       leafVote[p].index = p;
1409     }
1410     for (p = 0; p < nroots; p++) {
1411       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1412       rootVote[p].vote  = -3;
1413       rootVote[p].rank  = -3;
1414       rootVote[p].index = -3;
1415     }
1416     PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1417     PetscCallMPI(MPI_Type_commit(&datatype));
1418     PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1419     PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1420     PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1421     PetscCallMPI(MPI_Op_free(&op));
1422     PetscCallMPI(MPI_Type_free(&datatype));
1423     for (p = 0; p < nroots; p++) {
1424       rootNodes[p].rank = rootVote[p].rank;
1425       rootNodes[p].index = rootVote[p].index;
1426     }
1427     PetscCall(PetscFree(leafVote));
1428     PetscCall(PetscFree(rootVote));
1429   } else {
1430     for (p = 0; p < nroots; p++) {
1431       rootNodes[p].index = -1;
1432       rootNodes[p].rank = rank;
1433     }
1434     for (p = 0; p < nleaves; p++) {
1435       /* Write new local id into old location */
1436       if (roots[p].rank == rank) {
1437         rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1438       }
1439     }
1440   }
1441   PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE));
1442   PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE));
1443 
1444   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1445     if (leafNodes[p].rank != rank) npointLeaves++;
1446   }
1447   PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1448   PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1449   for (idx = 0, p = 0; p < nleaves; p++) {
1450     if (leafNodes[p].rank != rank) {
1451       /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1452       pointLocal[idx] = p;
1453       pointRemote[idx] = leafNodes[p];
1454       idx++;
1455     }
1456   }
1457   if (shift) {
1458     PetscCall(VecRestoreArrayRead(shifts, &shift));
1459     PetscCall(VecDestroy(&shifts));
1460   }
1461   if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT));
1462   PetscCall(PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF));
1463   PetscCall(PetscSFSetFromOptions(*pointSF));
1464   PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1465   PetscCall(PetscFree2(rootNodes, leafNodes));
1466   PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0));
1467   PetscFunctionReturn(0);
1468 }
1469 
1470 /*@C
1471   DMPlexMigrate  - Migrates internal DM data over the supplied star forest
1472 
1473   Collective on dm
1474 
1475   Input Parameters:
1476 + dm       - The source DMPlex object
1477 - sf       - The star forest communication context describing the migration pattern
1478 
1479   Output Parameter:
1480 . targetDM - The target DMPlex object
1481 
1482   Level: intermediate
1483 
1484 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap()
1485 @*/
1486 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1487 {
1488   MPI_Comm               comm;
1489   PetscInt               dim, cdim, nroots;
1490   PetscSF                sfPoint;
1491   ISLocalToGlobalMapping ltogMigration;
1492   ISLocalToGlobalMapping ltogOriginal = NULL;
1493   PetscBool              flg;
1494 
1495   PetscFunctionBegin;
1496   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1497   PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1498   PetscCall(PetscObjectGetComm((PetscObject) dm, &comm));
1499   PetscCall(DMGetDimension(dm, &dim));
1500   PetscCall(DMSetDimension(targetDM, dim));
1501   PetscCall(DMGetCoordinateDim(dm, &cdim));
1502   PetscCall(DMSetCoordinateDim(targetDM, cdim));
1503 
1504   /* Check for a one-to-all distribution pattern */
1505   PetscCall(DMGetPointSF(dm, &sfPoint));
1506   PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1507   if (nroots >= 0) {
1508     IS        isOriginal;
1509     PetscInt  n, size, nleaves;
1510     PetscInt  *numbering_orig, *numbering_new;
1511 
1512     /* Get the original point numbering */
1513     PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1514     PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal));
1515     PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1516     PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig));
1517     /* Convert to positive global numbers */
1518     for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);}
1519     /* Derive the new local-to-global mapping from the old one */
1520     PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1521     PetscCall(PetscMalloc1(nleaves, &numbering_new));
1522     PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE));
1523     PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE));
1524     PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, &ltogMigration));
1525     PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig));
1526     PetscCall(ISDestroy(&isOriginal));
1527   } else {
1528     /* One-to-all distribution pattern: We can derive LToG from SF */
1529     PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration));
1530   }
1531   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg));
1532   if (flg) {
1533     PetscCall(PetscPrintf(comm, "Point renumbering for DM migration:\n"));
1534     PetscCall(ISLocalToGlobalMappingView(ltogMigration, NULL));
1535   }
1536   /* Migrate DM data to target DM */
1537   PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1538   PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1539   PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1540   PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1541   PetscCall(ISLocalToGlobalMappingDestroy(&ltogOriginal));
1542   PetscCall(ISLocalToGlobalMappingDestroy(&ltogMigration));
1543   PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1544   PetscFunctionReturn(0);
1545 }
1546 
1547 /*@C
1548   DMPlexDistribute - Distributes the mesh and any associated sections.
1549 
1550   Collective on dm
1551 
1552   Input Parameters:
1553 + dm  - The original DMPlex object
1554 - overlap - The overlap of partitions, 0 is the default
1555 
1556   Output Parameters:
1557 + sf - The PetscSF used for point distribution, or NULL if not needed
1558 - dmParallel - The distributed DMPlex object
1559 
1560   Note: If the mesh was not distributed, the output dmParallel will be NULL.
1561 
1562   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1563   representation on the mesh.
1564 
1565   Level: intermediate
1566 
1567 .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexGetOverlap()
1568 @*/
1569 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1570 {
1571   MPI_Comm               comm;
1572   PetscPartitioner       partitioner;
1573   IS                     cellPart;
1574   PetscSection           cellPartSection;
1575   DM                     dmCoord;
1576   DMLabel                lblPartition, lblMigration;
1577   PetscSF                sfMigration, sfStratified, sfPoint;
1578   PetscBool              flg, balance;
1579   PetscMPIInt            rank, size;
1580 
1581   PetscFunctionBegin;
1582   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1583   PetscValidLogicalCollectiveInt(dm, overlap, 2);
1584   if (sf) PetscValidPointer(sf,3);
1585   PetscValidPointer(dmParallel,4);
1586 
1587   if (sf) *sf = NULL;
1588   *dmParallel = NULL;
1589   PetscCall(PetscObjectGetComm((PetscObject)dm,&comm));
1590   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1591   PetscCallMPI(MPI_Comm_size(comm, &size));
1592   if (size == 1) PetscFunctionReturn(0);
1593 
1594   PetscCall(PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0));
1595   /* Create cell partition */
1596   PetscCall(PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0));
1597   PetscCall(PetscSectionCreate(comm, &cellPartSection));
1598   PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1599   PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1600   PetscCall(PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0));
1601   {
1602     /* Convert partition to DMLabel */
1603     IS             is;
1604     PetscHSetI     ht;
1605     const PetscInt *points;
1606     PetscInt       *iranks;
1607     PetscInt       pStart, pEnd, proc, npoints, poff = 0, nranks;
1608 
1609     PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1610     /* Preallocate strata */
1611     PetscCall(PetscHSetICreate(&ht));
1612     PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1613     for (proc = pStart; proc < pEnd; proc++) {
1614       PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1615       if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1616     }
1617     PetscCall(PetscHSetIGetSize(ht, &nranks));
1618     PetscCall(PetscMalloc1(nranks, &iranks));
1619     PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1620     PetscCall(PetscHSetIDestroy(&ht));
1621     PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1622     PetscCall(PetscFree(iranks));
1623     /* Inline DMPlexPartitionLabelClosure() */
1624     PetscCall(ISGetIndices(cellPart, &points));
1625     PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1626     for (proc = pStart; proc < pEnd; proc++) {
1627       PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1628       if (!npoints) continue;
1629       PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1630       PetscCall(DMPlexClosurePoints_Private(dm, npoints, points+poff, &is));
1631       PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1632       PetscCall(ISDestroy(&is));
1633     }
1634     PetscCall(ISRestoreIndices(cellPart, &points));
1635   }
1636   PetscCall(PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0));
1637 
1638   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1639   PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1640   PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration));
1641   PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1642   PetscCall(PetscSFDestroy(&sfMigration));
1643   sfMigration = sfStratified;
1644   PetscCall(PetscSFSetUp(sfMigration));
1645   PetscCall(PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0));
1646   PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg));
1647   if (flg) {
1648     PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1649     PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1650   }
1651 
1652   /* Create non-overlapping parallel DM and migrate internal data */
1653   PetscCall(DMPlexCreate(comm, dmParallel));
1654   PetscCall(PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh"));
1655   PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1656 
1657   /* Build the point SF without overlap */
1658   PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1659   PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1660   PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1661   PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1662   PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1663   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1664   if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1665 
1666   if (overlap > 0) {
1667     DM                 dmOverlap;
1668     PetscInt           nroots, nleaves, noldleaves, l;
1669     const PetscInt    *oldLeaves;
1670     PetscSFNode       *newRemote, *permRemote;
1671     const PetscSFNode *oldRemote;
1672     PetscSF            sfOverlap, sfOverlapPoint;
1673 
1674     /* Add the partition overlap to the distributed DM */
1675     PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1676     PetscCall(DMDestroy(dmParallel));
1677     *dmParallel = dmOverlap;
1678     if (flg) {
1679       PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1680       PetscCall(PetscSFView(sfOverlap, NULL));
1681     }
1682 
1683     /* Re-map the migration SF to establish the full migration pattern */
1684     PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1685     PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1686     PetscCall(PetscMalloc1(nleaves, &newRemote));
1687     /* oldRemote: original root point mapping to original leaf point
1688        newRemote: original leaf point mapping to overlapped leaf point */
1689     if (oldLeaves) {
1690       /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1691       PetscCall(PetscMalloc1(noldleaves, &permRemote));
1692       for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1693       oldRemote = permRemote;
1694     }
1695     PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE));
1696     PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE));
1697     if (oldLeaves) PetscCall(PetscFree(oldRemote));
1698     PetscCall(PetscSFCreate(comm, &sfOverlapPoint));
1699     PetscCall(PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1700     PetscCall(PetscSFDestroy(&sfOverlap));
1701     PetscCall(PetscSFDestroy(&sfMigration));
1702     sfMigration = sfOverlapPoint;
1703   }
1704   /* Cleanup Partition */
1705   PetscCall(DMLabelDestroy(&lblPartition));
1706   PetscCall(DMLabelDestroy(&lblMigration));
1707   PetscCall(PetscSectionDestroy(&cellPartSection));
1708   PetscCall(ISDestroy(&cellPart));
1709   /* Copy discretization */
1710   PetscCall(DMCopyDisc(dm, *dmParallel));
1711   /* Create sfNatural */
1712   if (dm->useNatural) {
1713     PetscSection section;
1714 
1715     PetscCall(DMGetLocalSection(dm, &section));
1716     PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
1717     PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
1718   }
1719   PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, *dmParallel));
1720   /* Cleanup */
1721   if (sf) {*sf = sfMigration;}
1722   else    PetscCall(PetscSFDestroy(&sfMigration));
1723   PetscCall(PetscSFDestroy(&sfPoint));
1724   PetscCall(PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0));
1725   PetscFunctionReturn(0);
1726 }
1727 
1728 /*@C
1729   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.
1730 
1731   Collective on dm
1732 
1733   Input Parameters:
1734 + dm  - The non-overlapping distributed DMPlex object
1735 - overlap - The overlap of partitions (the same on all ranks)
1736 
1737   Output Parameters:
1738 + sf - The PetscSF used for point distribution
1739 - dmOverlap - The overlapping distributed DMPlex object, or NULL
1740 
1741   Notes:
1742   If the mesh was not distributed, the return value is NULL.
1743 
1744   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1745   representation on the mesh.
1746 
1747   Level: advanced
1748 
1749 .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexDistribute(), DMPlexCreateOverlapLabel(), DMPlexGetOverlap()
1750 @*/
1751 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1752 {
1753   MPI_Comm               comm;
1754   PetscMPIInt            size, rank;
1755   PetscSection           rootSection, leafSection;
1756   IS                     rootrank, leafrank;
1757   DM                     dmCoord;
1758   DMLabel                lblOverlap;
1759   PetscSF                sfOverlap, sfStratified, sfPoint;
1760 
1761   PetscFunctionBegin;
1762   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1763   PetscValidLogicalCollectiveInt(dm, overlap, 2);
1764   if (sf) PetscValidPointer(sf, 3);
1765   PetscValidPointer(dmOverlap, 4);
1766 
1767   if (sf) *sf = NULL;
1768   *dmOverlap  = NULL;
1769   PetscCall(PetscObjectGetComm((PetscObject)dm,&comm));
1770   PetscCallMPI(MPI_Comm_size(comm, &size));
1771   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1772   if (size == 1) PetscFunctionReturn(0);
1773 
1774   PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1775   /* Compute point overlap with neighbouring processes on the distributed DM */
1776   PetscCall(PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0));
1777   PetscCall(PetscSectionCreate(comm, &rootSection));
1778   PetscCall(PetscSectionCreate(comm, &leafSection));
1779   PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
1780   PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
1781   /* Convert overlap label to stratified migration SF */
1782   PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap));
1783   PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
1784   PetscCall(PetscSFDestroy(&sfOverlap));
1785   sfOverlap = sfStratified;
1786   PetscCall(PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF"));
1787   PetscCall(PetscSFSetFromOptions(sfOverlap));
1788 
1789   PetscCall(PetscSectionDestroy(&rootSection));
1790   PetscCall(PetscSectionDestroy(&leafSection));
1791   PetscCall(ISDestroy(&rootrank));
1792   PetscCall(ISDestroy(&leafrank));
1793   PetscCall(PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0));
1794 
1795   /* Build the overlapping DM */
1796   PetscCall(DMPlexCreate(comm, dmOverlap));
1797   PetscCall(PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh"));
1798   PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
1799   /* Store the overlap in the new DM */
1800   ((DM_Plex*)(*dmOverlap)->data)->overlap = overlap + ((DM_Plex*)dm->data)->overlap;
1801   /* Build the new point SF */
1802   PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
1803   PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
1804   PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
1805   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1806   PetscCall(PetscSFDestroy(&sfPoint));
1807   /* Cleanup overlap partition */
1808   PetscCall(DMLabelDestroy(&lblOverlap));
1809   if (sf) *sf = sfOverlap;
1810   else    PetscCall(PetscSFDestroy(&sfOverlap));
1811   PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
1812   PetscFunctionReturn(0);
1813 }
1814 
1815 PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
1816 {
1817   DM_Plex        *mesh  = (DM_Plex*) dm->data;
1818 
1819   PetscFunctionBegin;
1820   *overlap = mesh->overlap;
1821   PetscFunctionReturn(0);
1822 }
1823 
1824 /*@
1825   DMPlexGetOverlap - Get the DMPlex partition overlap.
1826 
1827   Not collective
1828 
1829   Input Parameter:
1830 . dm - The DM
1831 
1832   Output Parameter:
1833 . overlap - The overlap of this DM
1834 
1835   Level: intermediate
1836 
1837 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap(), DMPlexCreateOverlapLabel()
1838 @*/
1839 PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
1840 {
1841   PetscFunctionBegin;
1842   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1843   PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap));
1844   PetscFunctionReturn(0);
1845 }
1846 
1847 PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
1848 {
1849   DM_Plex *mesh = (DM_Plex *) dm->data;
1850 
1851   PetscFunctionBegin;
1852   mesh->distDefault = dist;
1853   PetscFunctionReturn(0);
1854 }
1855 
1856 /*@
1857   DMPlexDistributeSetDefault - Set flag indicating whether the DM should be distributed by default
1858 
1859   Logically collective
1860 
1861   Input Parameters:
1862 + dm   - The DM
1863 - dist - Flag for distribution
1864 
1865   Level: intermediate
1866 
1867 .seealso: DMDistributeGetDefault(), DMPlexDistribute()
1868 @*/
1869 PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
1870 {
1871   PetscFunctionBegin;
1872   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1873   PetscValidLogicalCollectiveBool(dm, dist, 2);
1874   PetscTryMethod(dm,"DMPlexDistributeSetDefault_C",(DM,PetscBool),(dm,dist));
1875   PetscFunctionReturn(0);
1876 }
1877 
1878 PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
1879 {
1880   DM_Plex *mesh = (DM_Plex *) dm->data;
1881 
1882   PetscFunctionBegin;
1883   *dist = mesh->distDefault;
1884   PetscFunctionReturn(0);
1885 }
1886 
1887 /*@
1888   DMPlexDistributeGetDefault - Get flag indicating whether the DM should be distributed by default
1889 
1890   Not collective
1891 
1892   Input Parameter:
1893 . dm   - The DM
1894 
1895   Output Parameter:
1896 . dist - Flag for distribution
1897 
1898   Level: intermediate
1899 
1900 .seealso: DMDistributeSetDefault(), DMPlexDistribute()
1901 @*/
1902 PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
1903 {
1904   PetscFunctionBegin;
1905   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1906   PetscValidBoolPointer(dist, 2);
1907   PetscUseMethod(dm,"DMPlexDistributeGetDefault_C",(DM,PetscBool*),(dm,dist));
1908   PetscFunctionReturn(0);
1909 }
1910 
1911 /*@C
1912   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
1913   root process of the original's communicator.
1914 
1915   Collective on dm
1916 
1917   Input Parameter:
1918 . dm - the original DMPlex object
1919 
1920   Output Parameters:
1921 + sf - the PetscSF used for point distribution (optional)
1922 - gatherMesh - the gathered DM object, or NULL
1923 
1924   Level: intermediate
1925 
1926 .seealso: DMPlexDistribute(), DMPlexGetRedundantDM()
1927 @*/
1928 PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
1929 {
1930   MPI_Comm       comm;
1931   PetscMPIInt    size;
1932   PetscPartitioner oldPart, gatherPart;
1933 
1934   PetscFunctionBegin;
1935   PetscValidHeaderSpecific(dm,DM_CLASSID,1);
1936   PetscValidPointer(gatherMesh,3);
1937   *gatherMesh = NULL;
1938   if (sf) *sf = NULL;
1939   comm = PetscObjectComm((PetscObject)dm);
1940   PetscCallMPI(MPI_Comm_size(comm,&size));
1941   if (size == 1) PetscFunctionReturn(0);
1942   PetscCall(DMPlexGetPartitioner(dm,&oldPart));
1943   PetscCall(PetscObjectReference((PetscObject)oldPart));
1944   PetscCall(PetscPartitionerCreate(comm,&gatherPart));
1945   PetscCall(PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER));
1946   PetscCall(DMPlexSetPartitioner(dm,gatherPart));
1947   PetscCall(DMPlexDistribute(dm,0,sf,gatherMesh));
1948 
1949   PetscCall(DMPlexSetPartitioner(dm,oldPart));
1950   PetscCall(PetscPartitionerDestroy(&gatherPart));
1951   PetscCall(PetscPartitionerDestroy(&oldPart));
1952   PetscFunctionReturn(0);
1953 }
1954 
1955 /*@C
1956   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
1957 
1958   Collective on dm
1959 
1960   Input Parameter:
1961 . dm - the original DMPlex object
1962 
1963   Output Parameters:
1964 + sf - the PetscSF used for point distribution (optional)
1965 - redundantMesh - the redundant DM object, or NULL
1966 
1967   Level: intermediate
1968 
1969 .seealso: DMPlexDistribute(), DMPlexGetGatherDM()
1970 @*/
1971 PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
1972 {
1973   MPI_Comm       comm;
1974   PetscMPIInt    size, rank;
1975   PetscInt       pStart, pEnd, p;
1976   PetscInt       numPoints = -1;
1977   PetscSF        migrationSF, sfPoint, gatherSF;
1978   DM             gatherDM, dmCoord;
1979   PetscSFNode    *points;
1980 
1981   PetscFunctionBegin;
1982   PetscValidHeaderSpecific(dm,DM_CLASSID,1);
1983   PetscValidPointer(redundantMesh,3);
1984   *redundantMesh = NULL;
1985   comm = PetscObjectComm((PetscObject)dm);
1986   PetscCallMPI(MPI_Comm_size(comm,&size));
1987   if (size == 1) {
1988     PetscCall(PetscObjectReference((PetscObject) dm));
1989     *redundantMesh = dm;
1990     if (sf) *sf = NULL;
1991     PetscFunctionReturn(0);
1992   }
1993   PetscCall(DMPlexGetGatherDM(dm,&gatherSF,&gatherDM));
1994   if (!gatherDM) PetscFunctionReturn(0);
1995   PetscCallMPI(MPI_Comm_rank(comm,&rank));
1996   PetscCall(DMPlexGetChart(gatherDM,&pStart,&pEnd));
1997   numPoints = pEnd - pStart;
1998   PetscCallMPI(MPI_Bcast(&numPoints,1,MPIU_INT,0,comm));
1999   PetscCall(PetscMalloc1(numPoints,&points));
2000   PetscCall(PetscSFCreate(comm,&migrationSF));
2001   for (p = 0; p < numPoints; p++) {
2002     points[p].index = p;
2003     points[p].rank  = 0;
2004   }
2005   PetscCall(PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER));
2006   PetscCall(DMPlexCreate(comm, redundantMesh));
2007   PetscCall(PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh"));
2008   PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2009   PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2010   PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2011   PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2012   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2013   PetscCall(PetscSFDestroy(&sfPoint));
2014   if (sf) {
2015     PetscSF tsf;
2016 
2017     PetscCall(PetscSFCompose(gatherSF,migrationSF,&tsf));
2018     PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2019     PetscCall(PetscSFDestroy(&tsf));
2020   }
2021   PetscCall(PetscSFDestroy(&migrationSF));
2022   PetscCall(PetscSFDestroy(&gatherSF));
2023   PetscCall(DMDestroy(&gatherDM));
2024   PetscCall(DMCopyDisc(dm, *redundantMesh));
2025   PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, *redundantMesh));
2026   PetscFunctionReturn(0);
2027 }
2028 
2029 /*@
2030   DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.
2031 
2032   Collective
2033 
2034   Input Parameter:
2035 . dm      - The DM object
2036 
2037   Output Parameter:
2038 . distributed - Flag whether the DM is distributed
2039 
2040   Level: intermediate
2041 
2042   Notes:
2043   This currently finds out whether at least two ranks have any DAG points.
2044   This involves MPI_Allreduce() with one integer.
2045   The result is currently not stashed so every call to this routine involves this global communication.
2046 
2047 .seealso: DMPlexDistribute(), DMPlexGetOverlap(), DMPlexIsInterpolated()
2048 @*/
2049 PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2050 {
2051   PetscInt          pStart, pEnd, count;
2052   MPI_Comm          comm;
2053   PetscMPIInt       size;
2054 
2055   PetscFunctionBegin;
2056   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2057   PetscValidBoolPointer(distributed,2);
2058   PetscCall(PetscObjectGetComm((PetscObject)dm,&comm));
2059   PetscCallMPI(MPI_Comm_size(comm,&size));
2060   if (size == 1) { *distributed = PETSC_FALSE; PetscFunctionReturn(0); }
2061   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2062   count = (pEnd - pStart) > 0 ? 1 : 0;
2063   PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2064   *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2065   PetscFunctionReturn(0);
2066 }
2067