xref: /petsc/src/dm/impls/plex/plexdistribute.c (revision e0f5bfbec699682fa3e8b8532b1176849ea4e12a)
1 #include <petsc/private/dmpleximpl.h>  /*I      "petscdmplex.h"   I*/
2 #include <petsc/private/dmlabelimpl.h> /*I      "petscdmlabel.h"  I*/
3 
4 /*@C
5   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback
6 
7   Input Parameters:
8 + dm      - The DM object
9 . user    - The user callback, may be NULL (to clear the callback)
10 - ctx     - context for callback evaluation, may be NULL
11 
12   Level: advanced
13 
14   Notes:
15      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.
16 
17      Any setting here overrides other configuration of DMPlex adjacency determination.
18 
19 .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()`
20 @*/
21 PetscErrorCode DMPlexSetAdjacencyUser(DM dm, PetscErrorCode (*user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void *ctx) {
22   DM_Plex *mesh = (DM_Plex *)dm->data;
23 
24   PetscFunctionBegin;
25   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
26   mesh->useradjacency    = user;
27   mesh->useradjacencyctx = ctx;
28   PetscFunctionReturn(0);
29 }
30 
31 /*@C
32   DMPlexGetAdjacencyUser - get the user-defined adjacency callback
33 
34   Input Parameter:
35 . dm      - The DM object
36 
37   Output Parameters:
38 + user    - The callback
39 - ctx     - context for callback evaluation
40 
41   Level: advanced
42 
43 .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()`
44 @*/
45 PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void **ctx) {
46   DM_Plex *mesh = (DM_Plex *)dm->data;
47 
48   PetscFunctionBegin;
49   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
50   if (user) *user = mesh->useradjacency;
51   if (ctx) *ctx = mesh->useradjacencyctx;
52   PetscFunctionReturn(0);
53 }
54 
55 /*@
56   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.
57 
58   Input Parameters:
59 + dm      - The DM object
60 - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
61 
62   Level: intermediate
63 
64 .seealso: `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
65 @*/
66 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors) {
67   DM_Plex *mesh = (DM_Plex *)dm->data;
68 
69   PetscFunctionBegin;
70   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
71   mesh->useAnchors = useAnchors;
72   PetscFunctionReturn(0);
73 }
74 
75 /*@
76   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.
77 
78   Input Parameter:
79 . dm      - The DM object
80 
81   Output Parameter:
82 . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.
83 
84   Level: intermediate
85 
86 .seealso: `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
87 @*/
88 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors) {
89   DM_Plex *mesh = (DM_Plex *)dm->data;
90 
91   PetscFunctionBegin;
92   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
93   PetscValidBoolPointer(useAnchors, 2);
94   *useAnchors = mesh->useAnchors;
95   PetscFunctionReturn(0);
96 }
97 
98 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) {
99   const PetscInt *cone   = NULL;
100   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;
101 
102   PetscFunctionBeginHot;
103   PetscCall(DMPlexGetConeSize(dm, p, &coneSize));
104   PetscCall(DMPlexGetCone(dm, p, &cone));
105   for (c = 0; c <= coneSize; ++c) {
106     const PetscInt  point   = !c ? p : cone[c - 1];
107     const PetscInt *support = NULL;
108     PetscInt        supportSize, s, q;
109 
110     PetscCall(DMPlexGetSupportSize(dm, point, &supportSize));
111     PetscCall(DMPlexGetSupport(dm, point, &support));
112     for (s = 0; s < supportSize; ++s) {
113       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]), 0); ++q) {
114         if (support[s] == adj[q]) break;
115       }
116       PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
117     }
118   }
119   *adjSize = numAdj;
120   PetscFunctionReturn(0);
121 }
122 
123 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) {
124   const PetscInt *support = NULL;
125   PetscInt        numAdj = 0, maxAdjSize = *adjSize, supportSize, s;
126 
127   PetscFunctionBeginHot;
128   PetscCall(DMPlexGetSupportSize(dm, p, &supportSize));
129   PetscCall(DMPlexGetSupport(dm, p, &support));
130   for (s = 0; s <= supportSize; ++s) {
131     const PetscInt  point = !s ? p : support[s - 1];
132     const PetscInt *cone  = NULL;
133     PetscInt        coneSize, c, q;
134 
135     PetscCall(DMPlexGetConeSize(dm, point, &coneSize));
136     PetscCall(DMPlexGetCone(dm, point, &cone));
137     for (c = 0; c < coneSize; ++c) {
138       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]), 0); ++q) {
139         if (cone[c] == adj[q]) break;
140       }
141       PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
142     }
143   }
144   *adjSize = numAdj;
145   PetscFunctionReturn(0);
146 }
147 
148 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[]) {
149   PetscInt *star   = NULL;
150   PetscInt  numAdj = 0, maxAdjSize = *adjSize, starSize, s;
151 
152   PetscFunctionBeginHot;
153   PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star));
154   for (s = 0; s < starSize * 2; s += 2) {
155     const PetscInt *closure = NULL;
156     PetscInt        closureSize, c, q;
157 
158     PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
159     for (c = 0; c < closureSize * 2; c += 2) {
160       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]), 0); ++q) {
161         if (closure[c] == adj[q]) break;
162       }
163       PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
164     }
165     PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure));
166   }
167   PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star));
168   *adjSize = numAdj;
169   PetscFunctionReturn(0);
170 }
171 
172 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[]) {
173   static PetscInt asiz       = 0;
174   PetscInt        maxAnchors = 1;
175   PetscInt        aStart = -1, aEnd = -1;
176   PetscInt        maxAdjSize;
177   PetscSection    aSec = NULL;
178   IS              aIS  = NULL;
179   const PetscInt *anchors;
180   DM_Plex        *mesh = (DM_Plex *)dm->data;
181 
182   PetscFunctionBeginHot;
183   if (useAnchors) {
184     PetscCall(DMPlexGetAnchors(dm, &aSec, &aIS));
185     if (aSec) {
186       PetscCall(PetscSectionGetMaxDof(aSec, &maxAnchors));
187       maxAnchors = PetscMax(1, maxAnchors);
188       PetscCall(PetscSectionGetChart(aSec, &aStart, &aEnd));
189       PetscCall(ISGetIndices(aIS, &anchors));
190     }
191   }
192   if (!*adj) {
193     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;
194 
195     PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
196     PetscCall(DMPlexGetDepth(dm, &depth));
197     depth = PetscMax(depth, -depth);
198     PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS));
199     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC, depth + 1) - 1) / (maxC - 1)) : depth + 1;
200     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS, depth + 1) - 1) / (maxS - 1)) : depth + 1;
201     asiz          = PetscMax(PetscPowInt(maxS, depth) * coneSeries, PetscPowInt(maxC, depth) * supportSeries);
202     asiz *= maxAnchors;
203     asiz = PetscMin(asiz, pEnd - pStart);
204     PetscCall(PetscMalloc1(asiz, adj));
205   }
206   if (*adjSize < 0) *adjSize = asiz;
207   maxAdjSize = *adjSize;
208   if (mesh->useradjacency) {
209     PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx));
210   } else if (useTransitiveClosure) {
211     PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj));
212   } else if (useCone) {
213     PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj));
214   } else {
215     PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj));
216   }
217   if (useAnchors && aSec) {
218     PetscInt  origSize = *adjSize;
219     PetscInt  numAdj   = origSize;
220     PetscInt  i        = 0, j;
221     PetscInt *orig     = *adj;
222 
223     while (i < origSize) {
224       PetscInt p    = orig[i];
225       PetscInt aDof = 0;
226 
227       if (p >= aStart && p < aEnd) PetscCall(PetscSectionGetDof(aSec, p, &aDof));
228       if (aDof) {
229         PetscInt aOff;
230         PetscInt s, q;
231 
232         for (j = i + 1; j < numAdj; j++) orig[j - 1] = orig[j];
233         origSize--;
234         numAdj--;
235         PetscCall(PetscSectionGetOffset(aSec, p, &aOff));
236         for (s = 0; s < aDof; ++s) {
237           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff + s]), 0); ++q) {
238             if (anchors[aOff + s] == orig[q]) break;
239           }
240           PetscCheck(numAdj <= maxAdjSize, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize);
241         }
242       } else {
243         i++;
244       }
245     }
246     *adjSize = numAdj;
247     PetscCall(ISRestoreIndices(aIS, &anchors));
248   }
249   PetscFunctionReturn(0);
250 }
251 
252 /*@
253   DMPlexGetAdjacency - Return all points adjacent to the given point
254 
255   Input Parameters:
256 + dm - The DM object
257 - p  - The point
258 
259   Input/Output Parameters:
260 + adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE;
261             on output the number of adjacent points
262 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize;
263         on output contains the adjacent points
264 
265   Level: advanced
266 
267   Notes:
268     The user must PetscFree the adj array if it was not passed in.
269 
270 .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()`
271 @*/
272 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[]) {
273   PetscBool useCone, useClosure, useAnchors;
274 
275   PetscFunctionBeginHot;
276   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
277   PetscValidIntPointer(adjSize, 3);
278   PetscValidPointer(adj, 4);
279   PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
280   PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
281   PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj));
282   PetscFunctionReturn(0);
283 }
284 
285 /*@
286   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity
287 
288   Collective on dm
289 
290   Input Parameters:
291 + dm      - The DM
292 . sfPoint - The PetscSF which encodes point connectivity
293 . rootRankSection -
294 . rootRanks -
295 . leftRankSection -
296 - leafRanks -
297 
298   Output Parameters:
299 + processRanks - A list of process neighbors, or NULL
300 - sfProcess    - An SF encoding the two-sided process connectivity, or NULL
301 
302   Level: developer
303 
304 .seealso: `PetscSFCreate()`, `DMPlexCreateProcessSF()`
305 @*/
306 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess) {
307   const PetscSFNode *remotePoints;
308   PetscInt          *localPointsNew;
309   PetscSFNode       *remotePointsNew;
310   const PetscInt    *nranks;
311   PetscInt          *ranksNew;
312   PetscBT            neighbors;
313   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
314   PetscMPIInt        size, proc, rank;
315 
316   PetscFunctionBegin;
317   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
318   PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2);
319   if (processRanks) PetscValidPointer(processRanks, 7);
320   if (sfProcess) PetscValidPointer(sfProcess, 8);
321   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
322   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
323   PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints));
324   PetscCall(PetscBTCreate(size, &neighbors));
325   PetscCall(PetscBTMemzero(size, neighbors));
326   /* Compute root-to-leaf process connectivity */
327   PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd));
328   PetscCall(ISGetIndices(rootRanks, &nranks));
329   for (p = pStart; p < pEnd; ++p) {
330     PetscInt ndof, noff, n;
331 
332     PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof));
333     PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff));
334     for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
335   }
336   PetscCall(ISRestoreIndices(rootRanks, &nranks));
337   /* Compute leaf-to-neighbor process connectivity */
338   PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd));
339   PetscCall(ISGetIndices(leafRanks, &nranks));
340   for (p = pStart; p < pEnd; ++p) {
341     PetscInt ndof, noff, n;
342 
343     PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof));
344     PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff));
345     for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff + n]));
346   }
347   PetscCall(ISRestoreIndices(leafRanks, &nranks));
348   /* Compute leaf-to-root process connectivity */
349   for (l = 0; l < numLeaves; ++l) PetscBTSet(neighbors, remotePoints[l].rank);
350   /* Calculate edges */
351   PetscBTClear(neighbors, rank);
352   for (proc = 0, numNeighbors = 0; proc < size; ++proc) {
353     if (PetscBTLookup(neighbors, proc)) ++numNeighbors;
354   }
355   PetscCall(PetscMalloc1(numNeighbors, &ranksNew));
356   PetscCall(PetscMalloc1(numNeighbors, &localPointsNew));
357   PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew));
358   for (proc = 0, n = 0; proc < size; ++proc) {
359     if (PetscBTLookup(neighbors, proc)) {
360       ranksNew[n]              = proc;
361       localPointsNew[n]        = proc;
362       remotePointsNew[n].index = rank;
363       remotePointsNew[n].rank  = proc;
364       ++n;
365     }
366   }
367   PetscCall(PetscBTDestroy(&neighbors));
368   if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks));
369   else PetscCall(PetscFree(ranksNew));
370   if (sfProcess) {
371     PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess));
372     PetscCall(PetscObjectSetName((PetscObject)*sfProcess, "Two-Sided Process SF"));
373     PetscCall(PetscSFSetFromOptions(*sfProcess));
374     PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER));
375   }
376   PetscFunctionReturn(0);
377 }
378 
379 /*@
380   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.
381 
382   Collective on dm
383 
384   Input Parameter:
385 . dm - The DM
386 
387   Output Parameters:
388 + rootSection - The number of leaves for a given root point
389 . rootrank    - The rank of each edge into the root point
390 . leafSection - The number of processes sharing a given leaf point
391 - leafrank    - The rank of each process sharing a leaf point
392 
393   Level: developer
394 
395 .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
396 @*/
397 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank) {
398   MPI_Comm        comm;
399   PetscSF         sfPoint;
400   const PetscInt *rootdegree;
401   PetscInt       *myrank, *remoterank;
402   PetscInt        pStart, pEnd, p, nedges;
403   PetscMPIInt     rank;
404 
405   PetscFunctionBegin;
406   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
407   PetscCallMPI(MPI_Comm_rank(comm, &rank));
408   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
409   PetscCall(DMGetPointSF(dm, &sfPoint));
410   /* Compute number of leaves for each root */
411   PetscCall(PetscObjectSetName((PetscObject)rootSection, "Root Section"));
412   PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd));
413   PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree));
414   PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree));
415   for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p - pStart]));
416   PetscCall(PetscSectionSetUp(rootSection));
417   /* Gather rank of each leaf to root */
418   PetscCall(PetscSectionGetStorageSize(rootSection, &nedges));
419   PetscCall(PetscMalloc1(pEnd - pStart, &myrank));
420   PetscCall(PetscMalloc1(nedges, &remoterank));
421   for (p = 0; p < pEnd - pStart; ++p) myrank[p] = rank;
422   PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank));
423   PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank));
424   PetscCall(PetscFree(myrank));
425   PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank));
426   /* Distribute remote ranks to leaves */
427   PetscCall(PetscObjectSetName((PetscObject)leafSection, "Leaf Section"));
428   PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank));
429   PetscFunctionReturn(0);
430 }
431 
432 #if 0
433 static PetscErrorCode DMPlexCopyOverlapLabels(DM dm, DM ndm)
434 {
435   DM_Plex *mesh  = (DM_Plex *) dm->data;
436   DM_Plex *nmesh = (DM_Plex *) ndm->data;
437 
438   PetscFunctionBegin;
439   if (mesh->numOvLabels) {
440     const char *name;
441     PetscInt    l;
442 
443     nmesh->numOvLabels = mesh->numOvLabels;
444     for (l = 0; l < mesh->numOvLabels; ++l) {
445       PetscCall(PetscObjectGetName((PetscObject) mesh->ovLabels[l], &name));
446       PetscCall(DMGetLabel(ndm, name, &nmesh->ovLabels[l]));
447       nmesh->ovValues[l] = mesh->ovValues[l];
448     }
449     PetscCall(PetscObjectGetName((PetscObject) mesh->ovExLabel, &name));
450     PetscCall(DMGetLabel(ndm, name, &nmesh->ovExLabel));
451     nmesh->ovExValue = mesh->ovExValue;
452   }
453   PetscFunctionReturn(0);
454 }
455 #endif
456 
457 /*@C
458   DMPlexCreateOverlapLabel - Compute a label indicating what overlap points should be sent to new processes
459 
460   Collective on dm
461 
462   Input Parameters:
463 + dm          - The DM
464 . levels      - Number of overlap levels
465 . rootSection - The number of leaves for a given root point
466 . rootrank    - The rank of each edge into the root point
467 . leafSection - The number of processes sharing a given leaf point
468 - leafrank    - The rank of each process sharing a leaf point
469 
470   Output Parameter:
471 . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings
472 
473   Level: developer
474 
475 .seealso: `DMPlexCreateOverlapLabelFromLabels()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
476 @*/
477 PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) {
478   MPI_Comm           comm;
479   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
480   PetscSF            sfPoint;
481   const PetscSFNode *remote;
482   const PetscInt    *local;
483   const PetscInt    *nrank, *rrank;
484   PetscInt          *adj = NULL;
485   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
486   PetscMPIInt        rank, size;
487   PetscBool          flg;
488 
489   PetscFunctionBegin;
490   *ovLabel = NULL;
491   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
492   PetscCallMPI(MPI_Comm_size(comm, &size));
493   PetscCallMPI(MPI_Comm_rank(comm, &rank));
494   if (size == 1) PetscFunctionReturn(0);
495   PetscCall(DMGetPointSF(dm, &sfPoint));
496   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
497   if (!levels) {
498     /* Add owned points */
499     PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
500     for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
501     PetscFunctionReturn(0);
502   }
503   PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
504   PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
505   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
506   /* Handle leaves: shared with the root point */
507   for (l = 0; l < nleaves; ++l) {
508     PetscInt adjSize = PETSC_DETERMINE, a;
509 
510     PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj));
511     for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank));
512   }
513   PetscCall(ISGetIndices(rootrank, &rrank));
514   PetscCall(ISGetIndices(leafrank, &nrank));
515   /* Handle roots */
516   for (p = pStart; p < pEnd; ++p) {
517     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;
518 
519     if ((p >= sStart) && (p < sEnd)) {
520       /* Some leaves share a root with other leaves on different processes */
521       PetscCall(PetscSectionGetDof(leafSection, p, &neighbors));
522       if (neighbors) {
523         PetscCall(PetscSectionGetOffset(leafSection, p, &noff));
524         PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
525         for (n = 0; n < neighbors; ++n) {
526           const PetscInt remoteRank = nrank[noff + n];
527 
528           if (remoteRank == rank) continue;
529           for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
530         }
531       }
532     }
533     /* Roots are shared with leaves */
534     PetscCall(PetscSectionGetDof(rootSection, p, &neighbors));
535     if (!neighbors) continue;
536     PetscCall(PetscSectionGetOffset(rootSection, p, &noff));
537     PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
538     for (n = 0; n < neighbors; ++n) {
539       const PetscInt remoteRank = rrank[noff + n];
540 
541       if (remoteRank == rank) continue;
542       for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
543     }
544   }
545   PetscCall(PetscFree(adj));
546   PetscCall(ISRestoreIndices(rootrank, &rrank));
547   PetscCall(ISRestoreIndices(leafrank, &nrank));
548   /* Add additional overlap levels */
549   for (l = 1; l < levels; l++) {
550     /* Propagate point donations over SF to capture remote connections */
551     PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank));
552     /* Add next level of point donations to the label */
553     PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank));
554   }
555   /* We require the closure in the overlap */
556   PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
557   PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
558   if (flg) {
559     PetscViewer viewer;
560     PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
561     PetscCall(DMLabelView(ovAdjByRank, viewer));
562   }
563   /* Invert sender to receiver label */
564   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
565   PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
566   /* Add owned points, except for shared local points */
567   for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
568   for (l = 0; l < nleaves; ++l) {
569     PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
570     PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
571   }
572   /* Clean up */
573   PetscCall(DMLabelDestroy(&ovAdjByRank));
574   PetscFunctionReturn(0);
575 }
576 
577 static PetscErrorCode HandlePoint_Private(DM dm, PetscInt p, PetscSection section, const PetscInt ranks[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], DMLabel ovAdjByRank) {
578   PetscInt neighbors, el;
579 
580   PetscFunctionBegin;
581   PetscCall(PetscSectionGetDof(section, p, &neighbors));
582   if (neighbors) {
583     PetscInt   *adj     = NULL;
584     PetscInt    adjSize = PETSC_DETERMINE, noff, n, a;
585     PetscMPIInt rank;
586 
587     PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
588     PetscCall(PetscSectionGetOffset(section, p, &noff));
589     PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
590     for (n = 0; n < neighbors; ++n) {
591       const PetscInt remoteRank = ranks[noff + n];
592 
593       if (remoteRank == rank) continue;
594       for (a = 0; a < adjSize; ++a) {
595         PetscBool insert = PETSC_TRUE;
596 
597         for (el = 0; el < numExLabels; ++el) {
598           PetscInt exVal;
599           PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
600           if (exVal == exValue[el]) {
601             insert = PETSC_FALSE;
602             break;
603           }
604         }
605         if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
606       }
607     }
608     PetscCall(PetscFree(adj));
609   }
610   PetscFunctionReturn(0);
611 }
612 
613 /*@C
614   DMPlexCreateOverlapLabelFromLabels - Compute a label indicating what overlap points should be sent to new processes
615 
616   Collective on dm
617 
618   Input Parameters:
619 + dm          - The DM
620 . numLabels   - The number of labels to draw candidate points from
621 . label       - An array of labels containing candidate points
622 . value       - An array of label values marking the candidate points
623 . numExLabels - The number of labels to use for exclusion
624 . exLabel     - An array of labels indicating points to be excluded, or NULL
625 . exValue     - An array of label values to be excluded, or NULL
626 . rootSection - The number of leaves for a given root point
627 . rootrank    - The rank of each edge into the root point
628 . leafSection - The number of processes sharing a given leaf point
629 - leafrank    - The rank of each process sharing a leaf point
630 
631   Output Parameter:
632 . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings
633 
634   Note:
635   The candidate points are only added to the overlap if they are adjacent to a shared point
636 
637   Level: developer
638 
639 .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
640 @*/
641 PetscErrorCode DMPlexCreateOverlapLabelFromLabels(DM dm, PetscInt numLabels, const DMLabel label[], const PetscInt value[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) {
642   MPI_Comm           comm;
643   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
644   PetscSF            sfPoint;
645   const PetscSFNode *remote;
646   const PetscInt    *local;
647   const PetscInt    *nrank, *rrank;
648   PetscInt          *adj = NULL;
649   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l, el;
650   PetscMPIInt        rank, size;
651   PetscBool          flg;
652 
653   PetscFunctionBegin;
654   *ovLabel = NULL;
655   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
656   PetscCallMPI(MPI_Comm_size(comm, &size));
657   PetscCallMPI(MPI_Comm_rank(comm, &rank));
658   if (size == 1) PetscFunctionReturn(0);
659   PetscCall(DMGetPointSF(dm, &sfPoint));
660   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
661   PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd));
662   PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote));
663   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank));
664   PetscCall(ISGetIndices(rootrank, &rrank));
665   PetscCall(ISGetIndices(leafrank, &nrank));
666   for (l = 0; l < numLabels; ++l) {
667     IS              valIS;
668     const PetscInt *points;
669     PetscInt        n;
670 
671     PetscCall(DMLabelGetStratumIS(label[l], value[l], &valIS));
672     if (!valIS) continue;
673     PetscCall(ISGetIndices(valIS, &points));
674     PetscCall(ISGetLocalSize(valIS, &n));
675     for (PetscInt i = 0; i < n; ++i) {
676       const PetscInt p = points[i];
677 
678       if ((p >= sStart) && (p < sEnd)) {
679         PetscInt loc, adjSize = PETSC_DETERMINE;
680 
681         /* Handle leaves: shared with the root point */
682         if (local) PetscCall(PetscFindInt(p, nleaves, local, &loc));
683         else loc = (p >= 0 && p < nleaves) ? p : -1;
684         if (loc >= 0) {
685           const PetscInt remoteRank = remote[loc].rank;
686 
687           PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj));
688           for (PetscInt a = 0; a < adjSize; ++a) {
689             PetscBool insert = PETSC_TRUE;
690 
691             for (el = 0; el < numExLabels; ++el) {
692               PetscInt exVal;
693               PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal));
694               if (exVal == exValue[el]) {
695                 insert = PETSC_FALSE;
696                 break;
697               }
698             }
699             if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank));
700           }
701         }
702         /* Some leaves share a root with other leaves on different processes */
703         HandlePoint_Private(dm, p, leafSection, nrank, numExLabels, exLabel, exValue, ovAdjByRank);
704       }
705       /* Roots are shared with leaves */
706       HandlePoint_Private(dm, p, rootSection, rrank, numExLabels, exLabel, exValue, ovAdjByRank);
707     }
708     PetscCall(ISRestoreIndices(valIS, &points));
709     PetscCall(ISDestroy(&valIS));
710   }
711   PetscCall(PetscFree(adj));
712   PetscCall(ISRestoreIndices(rootrank, &rrank));
713   PetscCall(ISRestoreIndices(leafrank, &nrank));
714   /* We require the closure in the overlap */
715   PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank));
716   PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg));
717   if (flg) {
718     PetscViewer viewer;
719     PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer));
720     PetscCall(DMLabelView(ovAdjByRank, viewer));
721   }
722   /* Invert sender to receiver label */
723   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel));
724   PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel));
725   /* Add owned points, except for shared local points */
726   for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank));
727   for (l = 0; l < nleaves; ++l) {
728     PetscCall(DMLabelClearValue(*ovLabel, local[l], rank));
729     PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank));
730   }
731   /* Clean up */
732   PetscCall(DMLabelDestroy(&ovAdjByRank));
733   PetscFunctionReturn(0);
734 }
735 
736 /*@C
737   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF
738 
739   Collective on dm
740 
741   Input Parameters:
742 + dm          - The DM
743 - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes
744 
745   Output Parameter:
746 . migrationSF - An SF that maps original points in old locations to points in new locations
747 
748   Level: developer
749 
750 .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()`
751 @*/
752 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF) {
753   MPI_Comm           comm;
754   PetscMPIInt        rank, size;
755   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
756   PetscInt          *pointDepths, *remoteDepths, *ilocal;
757   PetscInt          *depthRecv, *depthShift, *depthIdx;
758   PetscSFNode       *iremote;
759   PetscSF            pointSF;
760   const PetscInt    *sharedLocal;
761   const PetscSFNode *overlapRemote, *sharedRemote;
762 
763   PetscFunctionBegin;
764   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
765   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
766   PetscCallMPI(MPI_Comm_rank(comm, &rank));
767   PetscCallMPI(MPI_Comm_size(comm, &size));
768   PetscCall(DMGetDimension(dm, &dim));
769 
770   /* Before building the migration SF we need to know the new stratum offsets */
771   PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote));
772   PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
773   for (d = 0; d < dim + 1; d++) {
774     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
775     for (p = pStart; p < pEnd; p++) pointDepths[p] = d;
776   }
777   for (p = 0; p < nleaves; p++) remoteDepths[p] = -1;
778   PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
779   PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE));
780 
781   /* Count received points in each stratum and compute the internal strata shift */
782   PetscCall(PetscMalloc3(dim + 1, &depthRecv, dim + 1, &depthShift, dim + 1, &depthIdx));
783   for (d = 0; d < dim + 1; d++) depthRecv[d] = 0;
784   for (p = 0; p < nleaves; p++) depthRecv[remoteDepths[p]]++;
785   depthShift[dim] = 0;
786   for (d = 0; d < dim; d++) depthShift[d] = depthRecv[dim];
787   for (d = 1; d < dim; d++) depthShift[d] += depthRecv[0];
788   for (d = dim - 2; d > 0; d--) depthShift[d] += depthRecv[d + 1];
789   for (d = 0; d < dim + 1; d++) {
790     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
791     depthIdx[d] = pStart + depthShift[d];
792   }
793 
794   /* Form the overlap SF build an SF that describes the full overlap migration SF */
795   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
796   newLeaves = pEnd - pStart + nleaves;
797   PetscCall(PetscMalloc1(newLeaves, &ilocal));
798   PetscCall(PetscMalloc1(newLeaves, &iremote));
799   /* First map local points to themselves */
800   for (d = 0; d < dim + 1; d++) {
801     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
802     for (p = pStart; p < pEnd; p++) {
803       point                = p + depthShift[d];
804       ilocal[point]        = point;
805       iremote[point].index = p;
806       iremote[point].rank  = rank;
807       depthIdx[d]++;
808     }
809   }
810 
811   /* Add in the remote roots for currently shared points */
812   PetscCall(DMGetPointSF(dm, &pointSF));
813   PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote));
814   for (d = 0; d < dim + 1; d++) {
815     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
816     for (p = 0; p < numSharedPoints; p++) {
817       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
818         point                = sharedLocal[p] + depthShift[d];
819         iremote[point].index = sharedRemote[p].index;
820         iremote[point].rank  = sharedRemote[p].rank;
821       }
822     }
823   }
824 
825   /* Now add the incoming overlap points */
826   for (p = 0; p < nleaves; p++) {
827     point                = depthIdx[remoteDepths[p]];
828     ilocal[point]        = point;
829     iremote[point].index = overlapRemote[p].index;
830     iremote[point].rank  = overlapRemote[p].rank;
831     depthIdx[remoteDepths[p]]++;
832   }
833   PetscCall(PetscFree2(pointDepths, remoteDepths));
834 
835   PetscCall(PetscSFCreate(comm, migrationSF));
836   PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Overlap Migration SF"));
837   PetscCall(PetscSFSetFromOptions(*migrationSF));
838   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
839   PetscCall(PetscSFSetGraph(*migrationSF, pEnd - pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
840 
841   PetscCall(PetscFree3(depthRecv, depthShift, depthIdx));
842   PetscFunctionReturn(0);
843 }
844 
845 /*@
846   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.
847 
848   Input Parameters:
849 + dm          - The DM
850 - sf          - A star forest with non-ordered leaves, usually defining a DM point migration
851 
852   Output Parameter:
853 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified
854 
855   Note:
856   This lexicographically sorts by (depth, cellType)
857 
858   Level: developer
859 
860 .seealso: `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
861 @*/
862 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF) {
863   MPI_Comm           comm;
864   PetscMPIInt        rank, size;
865   PetscInt           d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
866   PetscSFNode       *pointDepths, *remoteDepths;
867   PetscInt          *ilocal;
868   PetscInt          *depthRecv, *depthShift, *depthIdx;
869   PetscInt          *ctRecv, *ctShift, *ctIdx;
870   const PetscSFNode *iremote;
871 
872   PetscFunctionBegin;
873   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
874   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
875   PetscCallMPI(MPI_Comm_rank(comm, &rank));
876   PetscCallMPI(MPI_Comm_size(comm, &size));
877   PetscCall(DMPlexGetDepth(dm, &ldepth));
878   PetscCall(DMGetDimension(dm, &dim));
879   PetscCall(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm));
880   PetscCheck(!(ldepth >= 0) || !(depth != ldepth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, ldepth, depth);
881   PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF, dm, 0, 0, 0));
882 
883   /* Before building the migration SF we need to know the new stratum offsets */
884   PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote));
885   PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths));
886   for (d = 0; d < depth + 1; ++d) {
887     PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd));
888     for (p = pStart; p < pEnd; ++p) {
889       DMPolytopeType ct;
890 
891       PetscCall(DMPlexGetCellType(dm, p, &ct));
892       pointDepths[p].index = d;
893       pointDepths[p].rank  = ct;
894     }
895   }
896   for (p = 0; p < nleaves; ++p) {
897     remoteDepths[p].index = -1;
898     remoteDepths[p].rank  = -1;
899   }
900   PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths, MPI_REPLACE));
901   PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths, MPI_REPLACE));
902   /* Count received points in each stratum and compute the internal strata shift */
903   PetscCall(PetscCalloc6(depth + 1, &depthRecv, depth + 1, &depthShift, depth + 1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx));
904   for (p = 0; p < nleaves; ++p) {
905     if (remoteDepths[p].rank < 0) {
906       ++depthRecv[remoteDepths[p].index];
907     } else {
908       ++ctRecv[remoteDepths[p].rank];
909     }
910   }
911   {
912     PetscInt depths[4], dims[4], shift = 0, i, c;
913 
914     /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
915          Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
916      */
917     depths[0] = depth;
918     depths[1] = 0;
919     depths[2] = depth - 1;
920     depths[3] = 1;
921     dims[0]   = dim;
922     dims[1]   = 0;
923     dims[2]   = dim - 1;
924     dims[3]   = 1;
925     for (i = 0; i <= depth; ++i) {
926       const PetscInt dep = depths[i];
927       const PetscInt dim = dims[i];
928 
929       for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
930         if (DMPolytopeTypeGetDim((DMPolytopeType)c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
931         ctShift[c] = shift;
932         shift += ctRecv[c];
933       }
934       depthShift[dep] = shift;
935       shift += depthRecv[dep];
936     }
937     for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
938       const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType)c);
939 
940       if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
941         ctShift[c] = shift;
942         shift += ctRecv[c];
943       }
944     }
945   }
946   /* Derive a new local permutation based on stratified indices */
947   PetscCall(PetscMalloc1(nleaves, &ilocal));
948   for (p = 0; p < nleaves; ++p) {
949     const PetscInt       dep = remoteDepths[p].index;
950     const DMPolytopeType ct  = (DMPolytopeType)remoteDepths[p].rank;
951 
952     if ((PetscInt)ct < 0) {
953       ilocal[p] = depthShift[dep] + depthIdx[dep];
954       ++depthIdx[dep];
955     } else {
956       ilocal[p] = ctShift[ct] + ctIdx[ct];
957       ++ctIdx[ct];
958     }
959   }
960   PetscCall(PetscSFCreate(comm, migrationSF));
961   PetscCall(PetscObjectSetName((PetscObject)*migrationSF, "Migration SF"));
962   PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode *)iremote, PETSC_COPY_VALUES));
963   PetscCall(PetscFree2(pointDepths, remoteDepths));
964   PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx));
965   PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF, dm, 0, 0, 0));
966   PetscFunctionReturn(0);
967 }
968 
969 /*@
970   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
971 
972   Collective on dm
973 
974   Input Parameters:
975 + dm - The DMPlex object
976 . pointSF - The PetscSF describing the communication pattern
977 . originalSection - The PetscSection for existing data layout
978 - originalVec - The existing data in a local vector
979 
980   Output Parameters:
981 + newSection - The PetscSF describing the new data layout
982 - newVec - The new data in a local vector
983 
984   Level: developer
985 
986 .seealso: `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()`
987 @*/
988 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec) {
989   PetscSF      fieldSF;
990   PetscInt    *remoteOffsets, fieldSize;
991   PetscScalar *originalValues, *newValues;
992 
993   PetscFunctionBegin;
994   PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
995   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
996 
997   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
998   PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE));
999   PetscCall(VecSetType(newVec, dm->vectype));
1000 
1001   PetscCall(VecGetArray(originalVec, &originalValues));
1002   PetscCall(VecGetArray(newVec, &newValues));
1003   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1004   PetscCall(PetscFree(remoteOffsets));
1005   PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1006   PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE));
1007   PetscCall(PetscSFDestroy(&fieldSF));
1008   PetscCall(VecRestoreArray(newVec, &newValues));
1009   PetscCall(VecRestoreArray(originalVec, &originalValues));
1010   PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1011   PetscFunctionReturn(0);
1012 }
1013 
1014 /*@
1015   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
1016 
1017   Collective on dm
1018 
1019   Input Parameters:
1020 + dm - The DMPlex object
1021 . pointSF - The PetscSF describing the communication pattern
1022 . originalSection - The PetscSection for existing data layout
1023 - originalIS - The existing data
1024 
1025   Output Parameters:
1026 + newSection - The PetscSF describing the new data layout
1027 - newIS - The new data
1028 
1029   Level: developer
1030 
1031 .seealso: `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()`
1032 @*/
1033 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS) {
1034   PetscSF         fieldSF;
1035   PetscInt       *newValues, *remoteOffsets, fieldSize;
1036   const PetscInt *originalValues;
1037 
1038   PetscFunctionBegin;
1039   PetscCall(PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0));
1040   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1041 
1042   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1043   PetscCall(PetscMalloc1(fieldSize, &newValues));
1044 
1045   PetscCall(ISGetIndices(originalIS, &originalValues));
1046   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1047   PetscCall(PetscFree(remoteOffsets));
1048   PetscCall(PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1049   PetscCall(PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE));
1050   PetscCall(PetscSFDestroy(&fieldSF));
1051   PetscCall(ISRestoreIndices(originalIS, &originalValues));
1052   PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS));
1053   PetscCall(PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0));
1054   PetscFunctionReturn(0);
1055 }
1056 
1057 /*@
1058   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution
1059 
1060   Collective on dm
1061 
1062   Input Parameters:
1063 + dm - The DMPlex object
1064 . pointSF - The PetscSF describing the communication pattern
1065 . originalSection - The PetscSection for existing data layout
1066 . datatype - The type of data
1067 - originalData - The existing data
1068 
1069   Output Parameters:
1070 + newSection - The PetscSection describing the new data layout
1071 - newData - The new data
1072 
1073   Level: developer
1074 
1075 .seealso: `DMPlexDistribute()`, `DMPlexDistributeField()`
1076 @*/
1077 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData) {
1078   PetscSF     fieldSF;
1079   PetscInt   *remoteOffsets, fieldSize;
1080   PetscMPIInt dataSize;
1081 
1082   PetscFunctionBegin;
1083   PetscCall(PetscLogEventBegin(DMPLEX_DistributeData, dm, 0, 0, 0));
1084   PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection));
1085 
1086   PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize));
1087   PetscCallMPI(MPI_Type_size(datatype, &dataSize));
1088   PetscCall(PetscMalloc(fieldSize * dataSize, newData));
1089 
1090   PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF));
1091   PetscCall(PetscFree(remoteOffsets));
1092   PetscCall(PetscSFBcastBegin(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1093   PetscCall(PetscSFBcastEnd(fieldSF, datatype, originalData, *newData, MPI_REPLACE));
1094   PetscCall(PetscSFDestroy(&fieldSF));
1095   PetscCall(PetscLogEventEnd(DMPLEX_DistributeData, dm, 0, 0, 0));
1096   PetscFunctionReturn(0);
1097 }
1098 
1099 static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) {
1100   DM_Plex     *pmesh = (DM_Plex *)(dmParallel)->data;
1101   MPI_Comm     comm;
1102   PetscSF      coneSF;
1103   PetscSection originalConeSection, newConeSection;
1104   PetscInt    *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1105   PetscBool    flg;
1106 
1107   PetscFunctionBegin;
1108   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1109   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5);
1110   PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones, dm, 0, 0, 0));
1111   /* Distribute cone section */
1112   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1113   PetscCall(DMPlexGetConeSection(dm, &originalConeSection));
1114   PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection));
1115   PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection));
1116   PetscCall(DMSetUp(dmParallel));
1117   /* Communicate and renumber cones */
1118   PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF));
1119   PetscCall(PetscFree(remoteOffsets));
1120   PetscCall(DMPlexGetCones(dm, &cones));
1121   if (original) {
1122     PetscInt numCones;
1123 
1124     PetscCall(PetscSectionGetStorageSize(originalConeSection, &numCones));
1125     PetscCall(PetscMalloc1(numCones, &globCones));
1126     PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones));
1127   } else {
1128     globCones = cones;
1129   }
1130   PetscCall(DMPlexGetCones(dmParallel, &newCones));
1131   PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1132   PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE));
1133   if (original) PetscCall(PetscFree(globCones));
1134   PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize));
1135   PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones));
1136   if (PetscDefined(USE_DEBUG)) {
1137     PetscInt  p;
1138     PetscBool valid = PETSC_TRUE;
1139     for (p = 0; p < newConesSize; ++p) {
1140       if (newCones[p] < 0) {
1141         valid = PETSC_FALSE;
1142         PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank, p));
1143       }
1144     }
1145     PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1146   }
1147   PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-cones_view", &flg));
1148   if (flg) {
1149     PetscCall(PetscPrintf(comm, "Serial Cone Section:\n"));
1150     PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm)));
1151     PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n"));
1152     PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm)));
1153     PetscCall(PetscSFView(coneSF, NULL));
1154   }
1155   PetscCall(DMPlexGetConeOrientations(dm, &cones));
1156   PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones));
1157   PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1158   PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE));
1159   PetscCall(PetscSFDestroy(&coneSF));
1160   PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones, dm, 0, 0, 0));
1161   /* Create supports and stratify DMPlex */
1162   {
1163     PetscInt pStart, pEnd;
1164 
1165     PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd));
1166     PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd));
1167   }
1168   PetscCall(DMPlexSymmetrize(dmParallel));
1169   PetscCall(DMPlexStratify(dmParallel));
1170   {
1171     PetscBool useCone, useClosure, useAnchors;
1172 
1173     PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1174     PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1175     PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1176     PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1177   }
1178   PetscFunctionReturn(0);
1179 }
1180 
1181 static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel) {
1182   MPI_Comm         comm;
1183   DM               cdm, cdmParallel;
1184   PetscSection     originalCoordSection, newCoordSection;
1185   Vec              originalCoordinates, newCoordinates;
1186   PetscInt         bs;
1187   const char      *name;
1188   const PetscReal *maxCell, *Lstart, *L;
1189 
1190   PetscFunctionBegin;
1191   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1192   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1193 
1194   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1195   PetscCall(DMGetCoordinateDM(dm, &cdm));
1196   PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel));
1197   PetscCall(DMCopyDisc(cdm, cdmParallel));
1198   PetscCall(DMGetCoordinateSection(dm, &originalCoordSection));
1199   PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection));
1200   PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates));
1201   if (originalCoordinates) {
1202     PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1203     PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1204     PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1205 
1206     PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1207     PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates));
1208     PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1209     PetscCall(VecSetBlockSize(newCoordinates, bs));
1210     PetscCall(VecDestroy(&newCoordinates));
1211   }
1212 
1213   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1214   PetscCall(DMGetPeriodicity(dm, &maxCell, &Lstart, &L));
1215   PetscCall(DMSetPeriodicity(dmParallel, maxCell, Lstart, L));
1216   PetscCall(DMGetCellCoordinateDM(dm, &cdm));
1217   if (cdm) {
1218     PetscCall(DMClone(dmParallel, &cdmParallel));
1219     PetscCall(DMSetCellCoordinateDM(dmParallel, cdmParallel));
1220     PetscCall(DMCopyDisc(cdm, cdmParallel));
1221     PetscCall(DMDestroy(&cdmParallel));
1222     PetscCall(DMGetCellCoordinateSection(dm, &originalCoordSection));
1223     PetscCall(DMGetCellCoordinatesLocal(dm, &originalCoordinates));
1224     PetscCall(PetscSectionCreate(comm, &newCoordSection));
1225     if (originalCoordinates) {
1226       PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates));
1227       PetscCall(PetscObjectGetName((PetscObject)originalCoordinates, &name));
1228       PetscCall(PetscObjectSetName((PetscObject)newCoordinates, name));
1229 
1230       PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates));
1231       PetscCall(VecGetBlockSize(originalCoordinates, &bs));
1232       PetscCall(VecSetBlockSize(newCoordinates, bs));
1233       PetscCall(DMSetCellCoordinateSection(dmParallel, bs, newCoordSection));
1234       PetscCall(DMSetCellCoordinatesLocal(dmParallel, newCoordinates));
1235       PetscCall(VecDestroy(&newCoordinates));
1236     }
1237     PetscCall(PetscSectionDestroy(&newCoordSection));
1238   }
1239   PetscFunctionReturn(0);
1240 }
1241 
1242 static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel) {
1243   DM_Plex         *mesh = (DM_Plex *)dm->data;
1244   MPI_Comm         comm;
1245   DMLabel          depthLabel;
1246   PetscMPIInt      rank;
1247   PetscInt         depth, d, numLabels, numLocalLabels, l;
1248   PetscBool        hasLabels  = PETSC_FALSE, lsendDepth, sendDepth;
1249   PetscObjectState depthState = -1;
1250 
1251   PetscFunctionBegin;
1252   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1253   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1254 
1255   PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1256   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1257   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1258 
1259   /* If the user has changed the depth label, communicate it instead */
1260   PetscCall(DMPlexGetDepth(dm, &depth));
1261   PetscCall(DMPlexGetDepthLabel(dm, &depthLabel));
1262   if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject)depthLabel, &depthState));
1263   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1264   PetscCall(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm));
1265   if (sendDepth) {
1266     PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel));
1267     PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE));
1268   }
1269   /* Everyone must have either the same number of labels, or none */
1270   PetscCall(DMGetNumLabels(dm, &numLocalLabels));
1271   numLabels = numLocalLabels;
1272   PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm));
1273   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1274   for (l = 0; l < numLabels; ++l) {
1275     DMLabel     label = NULL, labelNew = NULL;
1276     PetscBool   isDepth, lisOutput     = PETSC_TRUE, isOutput;
1277     const char *name = NULL;
1278 
1279     if (hasLabels) {
1280       PetscCall(DMGetLabelByNum(dm, l, &label));
1281       /* Skip "depth" because it is recreated */
1282       PetscCall(PetscObjectGetName((PetscObject)label, &name));
1283       PetscCall(PetscStrcmp(name, "depth", &isDepth));
1284     }
1285     PetscCallMPI(MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm));
1286     if (isDepth && !sendDepth) continue;
1287     PetscCall(DMLabelDistribute(label, migrationSF, &labelNew));
1288     if (isDepth) {
1289       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1290       PetscInt gdepth;
1291 
1292       PetscCall(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm));
1293       PetscCheck(!(depth >= 0) || !(gdepth != depth), PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, depth, gdepth);
1294       for (d = 0; d <= gdepth; ++d) {
1295         PetscBool has;
1296 
1297         PetscCall(DMLabelHasStratum(labelNew, d, &has));
1298         if (!has) PetscCall(DMLabelAddStratum(labelNew, d));
1299       }
1300     }
1301     PetscCall(DMAddLabel(dmParallel, labelNew));
1302     /* Put the output flag in the new label */
1303     if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput));
1304     PetscCall(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm));
1305     PetscCall(PetscObjectGetName((PetscObject)labelNew, &name));
1306     PetscCall(DMSetLabelOutput(dmParallel, name, isOutput));
1307     PetscCall(DMLabelDestroy(&labelNew));
1308   }
1309   {
1310     DMLabel ctLabel;
1311 
1312     // Reset label for fast lookup
1313     PetscCall(DMPlexGetCellTypeLabel(dmParallel, &ctLabel));
1314     PetscCall(DMLabelMakeAllInvalid_Internal(ctLabel));
1315   }
1316   PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels, dm, 0, 0, 0));
1317   PetscFunctionReturn(0);
1318 }
1319 
1320 static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) {
1321   DM_Plex     *mesh  = (DM_Plex *)dm->data;
1322   DM_Plex     *pmesh = (DM_Plex *)(dmParallel)->data;
1323   MPI_Comm     comm;
1324   DM           refTree;
1325   PetscSection origParentSection, newParentSection;
1326   PetscInt    *origParents, *origChildIDs;
1327   PetscBool    flg;
1328 
1329   PetscFunctionBegin;
1330   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1331   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5);
1332   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1333 
1334   /* Set up tree */
1335   PetscCall(DMPlexGetReferenceTree(dm, &refTree));
1336   PetscCall(DMPlexSetReferenceTree(dmParallel, refTree));
1337   PetscCall(DMPlexGetTree(dm, &origParentSection, &origParents, &origChildIDs, NULL, NULL));
1338   if (origParentSection) {
1339     PetscInt  pStart, pEnd;
1340     PetscInt *newParents, *newChildIDs, *globParents;
1341     PetscInt *remoteOffsetsParents, newParentSize;
1342     PetscSF   parentSF;
1343 
1344     PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1345     PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel), &newParentSection));
1346     PetscCall(PetscSectionSetChart(newParentSection, pStart, pEnd));
1347     PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection));
1348     PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF));
1349     PetscCall(PetscFree(remoteOffsetsParents));
1350     PetscCall(PetscSectionGetStorageSize(newParentSection, &newParentSize));
1351     PetscCall(PetscMalloc2(newParentSize, &newParents, newParentSize, &newChildIDs));
1352     if (original) {
1353       PetscInt numParents;
1354 
1355       PetscCall(PetscSectionGetStorageSize(origParentSection, &numParents));
1356       PetscCall(PetscMalloc1(numParents, &globParents));
1357       PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents));
1358     } else {
1359       globParents = origParents;
1360     }
1361     PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1362     PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE));
1363     if (original) PetscCall(PetscFree(globParents));
1364     PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1365     PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE));
1366     PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents));
1367     if (PetscDefined(USE_DEBUG)) {
1368       PetscInt  p;
1369       PetscBool valid = PETSC_TRUE;
1370       for (p = 0; p < newParentSize; ++p) {
1371         if (newParents[p] < 0) {
1372           valid = PETSC_FALSE;
1373           PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p));
1374         }
1375       }
1376       PetscCheck(valid, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map");
1377     }
1378     PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-parents_view", &flg));
1379     if (flg) {
1380       PetscCall(PetscPrintf(comm, "Serial Parent Section: \n"));
1381       PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm)));
1382       PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n"));
1383       PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm)));
1384       PetscCall(PetscSFView(parentSF, NULL));
1385     }
1386     PetscCall(DMPlexSetTree(dmParallel, newParentSection, newParents, newChildIDs));
1387     PetscCall(PetscSectionDestroy(&newParentSection));
1388     PetscCall(PetscFree2(newParents, newChildIDs));
1389     PetscCall(PetscSFDestroy(&parentSF));
1390   }
1391   pmesh->useAnchors = mesh->useAnchors;
1392   PetscFunctionReturn(0);
1393 }
1394 
1395 PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel) {
1396   PetscMPIInt rank, size;
1397   MPI_Comm    comm;
1398 
1399   PetscFunctionBegin;
1400   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1401   PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3);
1402 
1403   /* Create point SF for parallel mesh */
1404   PetscCall(PetscLogEventBegin(DMPLEX_DistributeSF, dm, 0, 0, 0));
1405   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1406   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1407   PetscCallMPI(MPI_Comm_size(comm, &size));
1408   {
1409     const PetscInt *leaves;
1410     PetscSFNode    *remotePoints, *rowners, *lowners;
1411     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1412     PetscInt        pStart, pEnd;
1413 
1414     PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd));
1415     PetscCall(PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL));
1416     PetscCall(PetscMalloc2(numRoots, &rowners, numLeaves, &lowners));
1417     for (p = 0; p < numRoots; p++) {
1418       rowners[p].rank  = -1;
1419       rowners[p].index = -1;
1420     }
1421     PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners, MPI_REPLACE));
1422     PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners, MPI_REPLACE));
1423     for (p = 0; p < numLeaves; ++p) {
1424       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1425         lowners[p].rank  = rank;
1426         lowners[p].index = leaves ? leaves[p] : p;
1427       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1428         lowners[p].rank  = -2;
1429         lowners[p].index = -2;
1430       }
1431     }
1432     for (p = 0; p < numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1433       rowners[p].rank  = -3;
1434       rowners[p].index = -3;
1435     }
1436     PetscCall(PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC));
1437     PetscCall(PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC));
1438     PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners, MPI_REPLACE));
1439     PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners, MPI_REPLACE));
1440     for (p = 0; p < numLeaves; ++p) {
1441       PetscCheck(lowners[p].rank >= 0 && lowners[p].index >= 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cell partition corrupt: point not claimed");
1442       if (lowners[p].rank != rank) ++numGhostPoints;
1443     }
1444     PetscCall(PetscMalloc1(numGhostPoints, &ghostPoints));
1445     PetscCall(PetscMalloc1(numGhostPoints, &remotePoints));
1446     for (p = 0, gp = 0; p < numLeaves; ++p) {
1447       if (lowners[p].rank != rank) {
1448         ghostPoints[gp]        = leaves ? leaves[p] : p;
1449         remotePoints[gp].rank  = lowners[p].rank;
1450         remotePoints[gp].index = lowners[p].index;
1451         ++gp;
1452       }
1453     }
1454     PetscCall(PetscFree2(rowners, lowners));
1455     PetscCall(PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER));
1456     PetscCall(PetscSFSetFromOptions((dmParallel)->sf));
1457   }
1458   {
1459     PetscBool useCone, useClosure, useAnchors;
1460 
1461     PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure));
1462     PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure));
1463     PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors));
1464     PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors));
1465   }
1466   PetscCall(PetscLogEventEnd(DMPLEX_DistributeSF, dm, 0, 0, 0));
1467   PetscFunctionReturn(0);
1468 }
1469 
1470 /*@
1471   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?
1472 
1473   Input Parameters:
1474 + dm - The DMPlex object
1475 - flg - Balance the partition?
1476 
1477   Level: intermediate
1478 
1479 .seealso: `DMPlexDistribute()`, `DMPlexGetPartitionBalance()`
1480 @*/
1481 PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg) {
1482   DM_Plex *mesh = (DM_Plex *)dm->data;
1483 
1484   PetscFunctionBegin;
1485   mesh->partitionBalance = flg;
1486   PetscFunctionReturn(0);
1487 }
1488 
1489 /*@
1490   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?
1491 
1492   Input Parameter:
1493 . dm - The DMPlex object
1494 
1495   Output Parameter:
1496 . flg - Balance the partition?
1497 
1498   Level: intermediate
1499 
1500 .seealso: `DMPlexDistribute()`, `DMPlexSetPartitionBalance()`
1501 @*/
1502 PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg) {
1503   DM_Plex *mesh = (DM_Plex *)dm->data;
1504 
1505   PetscFunctionBegin;
1506   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1507   PetscValidBoolPointer(flg, 2);
1508   *flg = mesh->partitionBalance;
1509   PetscFunctionReturn(0);
1510 }
1511 
1512 typedef struct {
1513   PetscInt vote, rank, index;
1514 } Petsc3Int;
1515 
1516 /* MaxLoc, but carry a third piece of information around */
1517 static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype) {
1518   Petsc3Int *a = (Petsc3Int *)inout_;
1519   Petsc3Int *b = (Petsc3Int *)in_;
1520   PetscInt   i, len = *len_;
1521   for (i = 0; i < len; i++) {
1522     if (a[i].vote < b[i].vote) {
1523       a[i].vote  = b[i].vote;
1524       a[i].rank  = b[i].rank;
1525       a[i].index = b[i].index;
1526     } else if (a[i].vote <= b[i].vote) {
1527       if (a[i].rank >= b[i].rank) {
1528         a[i].rank  = b[i].rank;
1529         a[i].index = b[i].index;
1530       }
1531     }
1532   }
1533 }
1534 
1535 /*@C
1536   DMPlexCreatePointSF - Build a point SF from an SF describing a point migration
1537 
1538   Input Parameters:
1539 + dm          - The source DMPlex object
1540 . migrationSF - The star forest that describes the parallel point remapping
1541 - ownership   - Flag causing a vote to determine point ownership
1542 
1543   Output Parameter:
1544 . pointSF     - The star forest describing the point overlap in the remapped DM
1545 
1546   Notes:
1547   Output pointSF is guaranteed to have the array of local indices (leaves) sorted.
1548 
1549   Level: developer
1550 
1551 .seealso: `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1552 @*/
1553 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF) {
1554   PetscMPIInt        rank, size;
1555   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1556   PetscInt          *pointLocal;
1557   const PetscInt    *leaves;
1558   const PetscSFNode *roots;
1559   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1560   Vec                shifts;
1561   const PetscInt     numShifts  = 13759;
1562   const PetscScalar *shift      = NULL;
1563   const PetscBool    shiftDebug = PETSC_FALSE;
1564   PetscBool          balance;
1565 
1566   PetscFunctionBegin;
1567   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1568   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
1569   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
1570   PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1571 
1572   PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1573   PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots));
1574   PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes));
1575   if (ownership) {
1576     MPI_Op       op;
1577     MPI_Datatype datatype;
1578     Petsc3Int   *rootVote = NULL, *leafVote = NULL;
1579     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1580     if (balance) {
1581       PetscRandom r;
1582 
1583       PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r));
1584       PetscCall(PetscRandomSetInterval(r, 0, 2467 * size));
1585       PetscCall(VecCreate(PETSC_COMM_SELF, &shifts));
1586       PetscCall(VecSetSizes(shifts, numShifts, numShifts));
1587       PetscCall(VecSetType(shifts, VECSTANDARD));
1588       PetscCall(VecSetRandom(shifts, r));
1589       PetscCall(PetscRandomDestroy(&r));
1590       PetscCall(VecGetArrayRead(shifts, &shift));
1591     }
1592 
1593     PetscCall(PetscMalloc1(nroots, &rootVote));
1594     PetscCall(PetscMalloc1(nleaves, &leafVote));
1595     /* Point ownership vote: Process with highest rank owns shared points */
1596     for (p = 0; p < nleaves; ++p) {
1597       if (shiftDebug) {
1598         PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject)dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index,
1599                                           (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]), (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size));
1600       }
1601       /* Either put in a bid or we know we own it */
1602       leafVote[p].vote  = (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size;
1603       leafVote[p].rank  = rank;
1604       leafVote[p].index = p;
1605     }
1606     for (p = 0; p < nroots; p++) {
1607       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1608       rootVote[p].vote  = -3;
1609       rootVote[p].rank  = -3;
1610       rootVote[p].index = -3;
1611     }
1612     PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype));
1613     PetscCallMPI(MPI_Type_commit(&datatype));
1614     PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op));
1615     PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op));
1616     PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op));
1617     PetscCallMPI(MPI_Op_free(&op));
1618     PetscCallMPI(MPI_Type_free(&datatype));
1619     for (p = 0; p < nroots; p++) {
1620       rootNodes[p].rank  = rootVote[p].rank;
1621       rootNodes[p].index = rootVote[p].index;
1622     }
1623     PetscCall(PetscFree(leafVote));
1624     PetscCall(PetscFree(rootVote));
1625   } else {
1626     for (p = 0; p < nroots; p++) {
1627       rootNodes[p].index = -1;
1628       rootNodes[p].rank  = rank;
1629     }
1630     for (p = 0; p < nleaves; p++) {
1631       /* Write new local id into old location */
1632       if (roots[p].rank == rank) rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1633     }
1634   }
1635   PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes, MPI_REPLACE));
1636   PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes, MPI_REPLACE));
1637 
1638   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1639     if (leafNodes[p].rank != rank) npointLeaves++;
1640   }
1641   PetscCall(PetscMalloc1(npointLeaves, &pointLocal));
1642   PetscCall(PetscMalloc1(npointLeaves, &pointRemote));
1643   for (idx = 0, p = 0; p < nleaves; p++) {
1644     if (leafNodes[p].rank != rank) {
1645       /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1646       pointLocal[idx]  = p;
1647       pointRemote[idx] = leafNodes[p];
1648       idx++;
1649     }
1650   }
1651   if (shift) {
1652     PetscCall(VecRestoreArrayRead(shifts, &shift));
1653     PetscCall(VecDestroy(&shifts));
1654   }
1655   if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject)dm), PETSC_STDOUT));
1656   PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), pointSF));
1657   PetscCall(PetscSFSetFromOptions(*pointSF));
1658   PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER));
1659   PetscCall(PetscFree2(rootNodes, leafNodes));
1660   PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF, dm, 0, 0, 0));
1661   if (PetscDefined(USE_DEBUG)) PetscCall(DMPlexCheckPointSF(dm, *pointSF, PETSC_FALSE));
1662   PetscFunctionReturn(0);
1663 }
1664 
1665 /*@C
1666   DMPlexMigrate  - Migrates internal DM data over the supplied star forest
1667 
1668   Collective on dm
1669 
1670   Input Parameters:
1671 + dm       - The source DMPlex object
1672 - sf       - The star forest communication context describing the migration pattern
1673 
1674   Output Parameter:
1675 . targetDM - The target DMPlex object
1676 
1677   Level: intermediate
1678 
1679 .seealso: `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1680 @*/
1681 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM) {
1682   MPI_Comm               comm;
1683   PetscInt               dim, cdim, nroots;
1684   PetscSF                sfPoint;
1685   ISLocalToGlobalMapping ltogMigration;
1686   ISLocalToGlobalMapping ltogOriginal = NULL;
1687 
1688   PetscFunctionBegin;
1689   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1690   PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0));
1691   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1692   PetscCall(DMGetDimension(dm, &dim));
1693   PetscCall(DMSetDimension(targetDM, dim));
1694   PetscCall(DMGetCoordinateDim(dm, &cdim));
1695   PetscCall(DMSetCoordinateDim(targetDM, cdim));
1696 
1697   /* Check for a one-to-all distribution pattern */
1698   PetscCall(DMGetPointSF(dm, &sfPoint));
1699   PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL));
1700   if (nroots >= 0) {
1701     IS        isOriginal;
1702     PetscInt  n, size, nleaves;
1703     PetscInt *numbering_orig, *numbering_new;
1704 
1705     /* Get the original point numbering */
1706     PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal));
1707     PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal));
1708     PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size));
1709     PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1710     /* Convert to positive global numbers */
1711     for (n = 0; n < size; n++) {
1712       if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n] + 1);
1713     }
1714     /* Derive the new local-to-global mapping from the old one */
1715     PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL));
1716     PetscCall(PetscMalloc1(nleaves, &numbering_new));
1717     PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1718     PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE));
1719     PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, &ltogMigration));
1720     PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt **)&numbering_orig));
1721     PetscCall(ISDestroy(&isOriginal));
1722   } else {
1723     /* One-to-all distribution pattern: We can derive LToG from SF */
1724     PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration));
1725   }
1726   PetscCall(PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration"));
1727   PetscCall(ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view"));
1728   /* Migrate DM data to target DM */
1729   PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM));
1730   PetscCall(DMPlexDistributeLabels(dm, sf, targetDM));
1731   PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM));
1732   PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM));
1733   PetscCall(ISLocalToGlobalMappingDestroy(&ltogOriginal));
1734   PetscCall(ISLocalToGlobalMappingDestroy(&ltogMigration));
1735   PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0));
1736   PetscFunctionReturn(0);
1737 }
1738 
1739 /*@C
1740   DMPlexDistribute - Distributes the mesh and any associated sections.
1741 
1742   Collective on dm
1743 
1744   Input Parameters:
1745 + dm  - The original DMPlex object
1746 - overlap - The overlap of partitions, 0 is the default
1747 
1748   Output Parameters:
1749 + sf - The PetscSF used for point distribution, or NULL if not needed
1750 - dmParallel - The distributed DMPlex object
1751 
1752   Note: If the mesh was not distributed, the output dmParallel will be NULL.
1753 
1754   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1755   representation on the mesh.
1756 
1757   Level: intermediate
1758 
1759 .seealso: `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()`
1760 @*/
1761 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel) {
1762   MPI_Comm         comm;
1763   PetscPartitioner partitioner;
1764   IS               cellPart;
1765   PetscSection     cellPartSection;
1766   DM               dmCoord;
1767   DMLabel          lblPartition, lblMigration;
1768   PetscSF          sfMigration, sfStratified, sfPoint;
1769   PetscBool        flg, balance;
1770   PetscMPIInt      rank, size;
1771 
1772   PetscFunctionBegin;
1773   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1774   PetscValidLogicalCollectiveInt(dm, overlap, 2);
1775   if (sf) PetscValidPointer(sf, 3);
1776   PetscValidPointer(dmParallel, 4);
1777 
1778   if (sf) *sf = NULL;
1779   *dmParallel = NULL;
1780   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1781   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1782   PetscCallMPI(MPI_Comm_size(comm, &size));
1783   if (size == 1) PetscFunctionReturn(0);
1784 
1785   PetscCall(PetscLogEventBegin(DMPLEX_Distribute, dm, 0, 0, 0));
1786   /* Create cell partition */
1787   PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
1788   PetscCall(PetscSectionCreate(comm, &cellPartSection));
1789   PetscCall(DMPlexGetPartitioner(dm, &partitioner));
1790   PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart));
1791   PetscCall(PetscLogEventBegin(DMPLEX_PartSelf, dm, 0, 0, 0));
1792   {
1793     /* Convert partition to DMLabel */
1794     IS              is;
1795     PetscHSetI      ht;
1796     const PetscInt *points;
1797     PetscInt       *iranks;
1798     PetscInt        pStart, pEnd, proc, npoints, poff = 0, nranks;
1799 
1800     PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition));
1801     /* Preallocate strata */
1802     PetscCall(PetscHSetICreate(&ht));
1803     PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1804     for (proc = pStart; proc < pEnd; proc++) {
1805       PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1806       if (npoints) PetscCall(PetscHSetIAdd(ht, proc));
1807     }
1808     PetscCall(PetscHSetIGetSize(ht, &nranks));
1809     PetscCall(PetscMalloc1(nranks, &iranks));
1810     PetscCall(PetscHSetIGetElems(ht, &poff, iranks));
1811     PetscCall(PetscHSetIDestroy(&ht));
1812     PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks));
1813     PetscCall(PetscFree(iranks));
1814     /* Inline DMPlexPartitionLabelClosure() */
1815     PetscCall(ISGetIndices(cellPart, &points));
1816     PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd));
1817     for (proc = pStart; proc < pEnd; proc++) {
1818       PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints));
1819       if (!npoints) continue;
1820       PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff));
1821       PetscCall(DMPlexClosurePoints_Private(dm, npoints, points + poff, &is));
1822       PetscCall(DMLabelSetStratumIS(lblPartition, proc, is));
1823       PetscCall(ISDestroy(&is));
1824     }
1825     PetscCall(ISRestoreIndices(cellPart, &points));
1826   }
1827   PetscCall(PetscLogEventEnd(DMPLEX_PartSelf, dm, 0, 0, 0));
1828 
1829   PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration));
1830   PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration));
1831   PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration));
1832   PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified));
1833   PetscCall(PetscSFDestroy(&sfMigration));
1834   sfMigration = sfStratified;
1835   PetscCall(PetscSFSetUp(sfMigration));
1836   PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
1837   PetscCall(PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-partition_view", &flg));
1838   if (flg) {
1839     PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm)));
1840     PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm)));
1841   }
1842 
1843   /* Create non-overlapping parallel DM and migrate internal data */
1844   PetscCall(DMPlexCreate(comm, dmParallel));
1845   PetscCall(PetscObjectSetName((PetscObject)*dmParallel, "Parallel Mesh"));
1846   PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel));
1847 
1848   /* Build the point SF without overlap */
1849   PetscCall(DMPlexGetPartitionBalance(dm, &balance));
1850   PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance));
1851   PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint));
1852   PetscCall(DMSetPointSF(*dmParallel, sfPoint));
1853   PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord));
1854   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
1855   if (flg) PetscCall(PetscSFView(sfPoint, NULL));
1856 
1857   if (overlap > 0) {
1858     DM                 dmOverlap;
1859     PetscInt           nroots, nleaves, noldleaves, l;
1860     const PetscInt    *oldLeaves;
1861     PetscSFNode       *newRemote, *permRemote;
1862     const PetscSFNode *oldRemote;
1863     PetscSF            sfOverlap, sfOverlapPoint;
1864 
1865     /* Add the partition overlap to the distributed DM */
1866     PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap));
1867     PetscCall(DMDestroy(dmParallel));
1868     *dmParallel = dmOverlap;
1869     if (flg) {
1870       PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n"));
1871       PetscCall(PetscSFView(sfOverlap, NULL));
1872     }
1873 
1874     /* Re-map the migration SF to establish the full migration pattern */
1875     PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote));
1876     PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL));
1877     PetscCall(PetscMalloc1(nleaves, &newRemote));
1878     /* oldRemote: original root point mapping to original leaf point
1879        newRemote: original leaf point mapping to overlapped leaf point */
1880     if (oldLeaves) {
1881       /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1882       PetscCall(PetscMalloc1(noldleaves, &permRemote));
1883       for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1884       oldRemote = permRemote;
1885     }
1886     PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote, MPI_REPLACE));
1887     PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote, MPI_REPLACE));
1888     if (oldLeaves) PetscCall(PetscFree(oldRemote));
1889     PetscCall(PetscSFCreate(comm, &sfOverlapPoint));
1890     PetscCall(PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER));
1891     PetscCall(PetscSFDestroy(&sfOverlap));
1892     PetscCall(PetscSFDestroy(&sfMigration));
1893     sfMigration = sfOverlapPoint;
1894   }
1895   /* Cleanup Partition */
1896   PetscCall(DMLabelDestroy(&lblPartition));
1897   PetscCall(DMLabelDestroy(&lblMigration));
1898   PetscCall(PetscSectionDestroy(&cellPartSection));
1899   PetscCall(ISDestroy(&cellPart));
1900   /* Copy discretization */
1901   PetscCall(DMCopyDisc(dm, *dmParallel));
1902   /* Create sfNatural */
1903   if (dm->useNatural) {
1904     PetscSection section;
1905 
1906     PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE));
1907     PetscCall(DMGetLocalSection(dm, &section));
1908 
1909     /* First DM with useNatural = PETSC_TRUE is considered natural */
1910     /* sfMigration and sfNatural are respectively the point and dofs SFs mapping to this natural DM */
1911     /* Compose with a previous sfNatural if present */
1912     if (dm->sfNatural) {
1913       PetscSF natSF;
1914 
1915       PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &natSF));
1916       PetscCall(PetscSFCompose(dm->sfNatural, natSF, &(*dmParallel)->sfNatural));
1917       PetscCall(PetscSFDestroy(&natSF));
1918     } else {
1919       PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural));
1920     }
1921     /* Compose with a previous sfMigration if present */
1922     if (dm->sfMigration) {
1923       PetscSF naturalPointSF;
1924 
1925       PetscCall(PetscSFCompose(dm->sfMigration, sfMigration, &naturalPointSF));
1926       PetscCall(PetscSFDestroy(&sfMigration));
1927       sfMigration = naturalPointSF;
1928     }
1929   }
1930   PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel));
1931   /* Cleanup */
1932   if (sf) {
1933     *sf = sfMigration;
1934   } else PetscCall(PetscSFDestroy(&sfMigration));
1935   PetscCall(PetscSFDestroy(&sfPoint));
1936   PetscCall(PetscLogEventEnd(DMPLEX_Distribute, dm, 0, 0, 0));
1937   PetscFunctionReturn(0);
1938 }
1939 
1940 /*@C
1941   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.
1942 
1943   Collective on dm
1944 
1945   Input Parameters:
1946 + dm  - The non-overlapping distributed DMPlex object
1947 - overlap - The overlap of partitions (the same on all ranks)
1948 
1949   Output Parameters:
1950 + sf - The PetscSF used for point distribution
1951 - dmOverlap - The overlapping distributed DMPlex object, or NULL
1952 
1953   Notes:
1954   If the mesh was not distributed, the return value is NULL.
1955 
1956   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1957   representation on the mesh.
1958 
1959   Options Database Keys:
1960 + -dm_plex_overlap_labels <name1,name2,...> - List of overlap label names
1961 . -dm_plex_overlap_values <int1,int2,...>   - List of overlap label values
1962 . -dm_plex_overlap_exclude_label <name>     - Label used to exclude points from overlap
1963 - -dm_plex_overlap_exclude_value <int>      - Label value used to exclude points from overlap
1964 
1965   Level: advanced
1966 
1967 .seealso: `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()`
1968 @*/
1969 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap) {
1970   DM_Plex     *mesh = (DM_Plex *)dm->data;
1971   MPI_Comm     comm;
1972   PetscMPIInt  size, rank;
1973   PetscSection rootSection, leafSection;
1974   IS           rootrank, leafrank;
1975   DM           dmCoord;
1976   DMLabel      lblOverlap;
1977   PetscSF      sfOverlap, sfStratified, sfPoint;
1978 
1979   PetscFunctionBegin;
1980   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
1981   PetscValidLogicalCollectiveInt(dm, overlap, 2);
1982   if (sf) PetscValidPointer(sf, 3);
1983   PetscValidPointer(dmOverlap, 4);
1984 
1985   if (sf) *sf = NULL;
1986   *dmOverlap = NULL;
1987   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
1988   PetscCallMPI(MPI_Comm_size(comm, &size));
1989   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1990   if (size == 1) PetscFunctionReturn(0);
1991   {
1992     // We need to get options for the _already_distributed mesh, so it must be done here
1993     PetscInt    overlap;
1994     const char *prefix;
1995     char        oldPrefix[PETSC_MAX_PATH_LEN];
1996 
1997     oldPrefix[0] = '\0';
1998     PetscCall(PetscObjectGetOptionsPrefix((PetscObject)dm, &prefix));
1999     PetscCall(PetscStrcpy(oldPrefix, prefix));
2000     PetscCall(PetscObjectAppendOptionsPrefix((PetscObject)dm, "dist_"));
2001     PetscCall(DMPlexGetOverlap(dm, &overlap));
2002     PetscObjectOptionsBegin((PetscObject)dm);
2003     PetscCall(DMSetFromOptions_Overlap_Plex(dm, PetscOptionsObject, &overlap));
2004     PetscOptionsEnd();
2005     PetscCall(PetscObjectSetOptionsPrefix((PetscObject)dm, oldPrefix[0] == '\0' ? NULL : oldPrefix));
2006   }
2007   PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
2008   /* Compute point overlap with neighbouring processes on the distributed DM */
2009   PetscCall(PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0));
2010   PetscCall(PetscSectionCreate(comm, &rootSection));
2011   PetscCall(PetscSectionCreate(comm, &leafSection));
2012   PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank));
2013   if (mesh->numOvLabels) PetscCall(DMPlexCreateOverlapLabelFromLabels(dm, mesh->numOvLabels, mesh->ovLabels, mesh->ovValues, mesh->numOvExLabels, mesh->ovExLabels, mesh->ovExValues, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
2014   else PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap));
2015   /* Convert overlap label to stratified migration SF */
2016   PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap));
2017   PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified));
2018   PetscCall(PetscSFDestroy(&sfOverlap));
2019   sfOverlap = sfStratified;
2020   PetscCall(PetscObjectSetName((PetscObject)sfOverlap, "Overlap SF"));
2021   PetscCall(PetscSFSetFromOptions(sfOverlap));
2022 
2023   PetscCall(PetscSectionDestroy(&rootSection));
2024   PetscCall(PetscSectionDestroy(&leafSection));
2025   PetscCall(ISDestroy(&rootrank));
2026   PetscCall(ISDestroy(&leafrank));
2027   PetscCall(PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0));
2028 
2029   /* Build the overlapping DM */
2030   PetscCall(DMPlexCreate(comm, dmOverlap));
2031   PetscCall(PetscObjectSetName((PetscObject)*dmOverlap, "Parallel Mesh"));
2032   PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap));
2033   /* Store the overlap in the new DM */
2034   PetscCall(DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap));
2035   /* Build the new point SF */
2036   PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint));
2037   PetscCall(DMSetPointSF(*dmOverlap, sfPoint));
2038   PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord));
2039   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2040   PetscCall(DMGetCellCoordinateDM(*dmOverlap, &dmCoord));
2041   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2042   PetscCall(PetscSFDestroy(&sfPoint));
2043   /* Cleanup overlap partition */
2044   PetscCall(DMLabelDestroy(&lblOverlap));
2045   if (sf) *sf = sfOverlap;
2046   else PetscCall(PetscSFDestroy(&sfOverlap));
2047   PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0));
2048   PetscFunctionReturn(0);
2049 }
2050 
2051 PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap) {
2052   DM_Plex *mesh = (DM_Plex *)dm->data;
2053 
2054   PetscFunctionBegin;
2055   *overlap = mesh->overlap;
2056   PetscFunctionReturn(0);
2057 }
2058 
2059 PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap) {
2060   DM_Plex *mesh    = NULL;
2061   DM_Plex *meshSrc = NULL;
2062 
2063   PetscFunctionBegin;
2064   PetscValidHeaderSpecificType(dm, DM_CLASSID, 1, DMPLEX);
2065   mesh          = (DM_Plex *)dm->data;
2066   mesh->overlap = overlap;
2067   if (dmSrc) {
2068     PetscValidHeaderSpecificType(dmSrc, DM_CLASSID, 2, DMPLEX);
2069     meshSrc = (DM_Plex *)dmSrc->data;
2070     mesh->overlap += meshSrc->overlap;
2071   }
2072   PetscFunctionReturn(0);
2073 }
2074 
2075 /*@
2076   DMPlexGetOverlap - Get the width of the cell overlap
2077 
2078   Not collective
2079 
2080   Input Parameter:
2081 . dm   - The DM
2082 
2083   Output Parameter:
2084 . overlap - the width of the cell overlap
2085 
2086   Level: intermediate
2087 
2088 .seealso: `DMPlexSetOverlap()`, `DMPlexDistribute()`
2089 @*/
2090 PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap) {
2091   PetscFunctionBegin;
2092   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2093   PetscValidIntPointer(overlap, 2);
2094   PetscUseMethod(dm, "DMPlexGetOverlap_C", (DM, PetscInt *), (dm, overlap));
2095   PetscFunctionReturn(0);
2096 }
2097 
2098 /*@
2099   DMPlexSetOverlap - Set the width of the cell overlap
2100 
2101   Logically collective
2102 
2103   Input Parameters:
2104 + dm      - The DM
2105 . dmSrc   - The DM that produced this one, or NULL
2106 - overlap - the width of the cell overlap
2107 
2108   Note:
2109   The overlap from dmSrc is added to dm
2110 
2111   Level: intermediate
2112 
2113 .seealso: `DMPlexGetOverlap()`, `DMPlexDistribute()`
2114 @*/
2115 PetscErrorCode DMPlexSetOverlap(DM dm, DM dmSrc, PetscInt overlap) {
2116   PetscFunctionBegin;
2117   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2118   PetscValidLogicalCollectiveInt(dm, overlap, 3);
2119   PetscTryMethod(dm, "DMPlexSetOverlap_C", (DM, DM, PetscInt), (dm, dmSrc, overlap));
2120   PetscFunctionReturn(0);
2121 }
2122 
2123 PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist) {
2124   DM_Plex *mesh = (DM_Plex *)dm->data;
2125 
2126   PetscFunctionBegin;
2127   mesh->distDefault = dist;
2128   PetscFunctionReturn(0);
2129 }
2130 
2131 /*@
2132   DMPlexDistributeSetDefault - Set flag indicating whether the DM should be distributed by default
2133 
2134   Logically collective
2135 
2136   Input Parameters:
2137 + dm   - The DM
2138 - dist - Flag for distribution
2139 
2140   Level: intermediate
2141 
2142 .seealso: `DMPlexDistributeGetDefault()`, `DMPlexDistribute()`
2143 @*/
2144 PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist) {
2145   PetscFunctionBegin;
2146   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2147   PetscValidLogicalCollectiveBool(dm, dist, 2);
2148   PetscTryMethod(dm, "DMPlexDistributeSetDefault_C", (DM, PetscBool), (dm, dist));
2149   PetscFunctionReturn(0);
2150 }
2151 
2152 PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist) {
2153   DM_Plex *mesh = (DM_Plex *)dm->data;
2154 
2155   PetscFunctionBegin;
2156   *dist = mesh->distDefault;
2157   PetscFunctionReturn(0);
2158 }
2159 
2160 /*@
2161   DMPlexDistributeGetDefault - Get flag indicating whether the DM should be distributed by default
2162 
2163   Not collective
2164 
2165   Input Parameter:
2166 . dm   - The DM
2167 
2168   Output Parameter:
2169 . dist - Flag for distribution
2170 
2171   Level: intermediate
2172 
2173 .seealso: `DMPlexDistributeSetDefault()`, `DMPlexDistribute()`
2174 @*/
2175 PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist) {
2176   PetscFunctionBegin;
2177   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2178   PetscValidBoolPointer(dist, 2);
2179   PetscUseMethod(dm, "DMPlexDistributeGetDefault_C", (DM, PetscBool *), (dm, dist));
2180   PetscFunctionReturn(0);
2181 }
2182 
2183 /*@C
2184   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
2185   root process of the original's communicator.
2186 
2187   Collective on dm
2188 
2189   Input Parameter:
2190 . dm - the original DMPlex object
2191 
2192   Output Parameters:
2193 + sf - the PetscSF used for point distribution (optional)
2194 - gatherMesh - the gathered DM object, or NULL
2195 
2196   Level: intermediate
2197 
2198 .seealso: `DMPlexDistribute()`, `DMPlexGetRedundantDM()`
2199 @*/
2200 PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh) {
2201   MPI_Comm         comm;
2202   PetscMPIInt      size;
2203   PetscPartitioner oldPart, gatherPart;
2204 
2205   PetscFunctionBegin;
2206   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2207   PetscValidPointer(gatherMesh, 3);
2208   *gatherMesh = NULL;
2209   if (sf) *sf = NULL;
2210   comm = PetscObjectComm((PetscObject)dm);
2211   PetscCallMPI(MPI_Comm_size(comm, &size));
2212   if (size == 1) PetscFunctionReturn(0);
2213   PetscCall(DMPlexGetPartitioner(dm, &oldPart));
2214   PetscCall(PetscObjectReference((PetscObject)oldPart));
2215   PetscCall(PetscPartitionerCreate(comm, &gatherPart));
2216   PetscCall(PetscPartitionerSetType(gatherPart, PETSCPARTITIONERGATHER));
2217   PetscCall(DMPlexSetPartitioner(dm, gatherPart));
2218   PetscCall(DMPlexDistribute(dm, 0, sf, gatherMesh));
2219 
2220   PetscCall(DMPlexSetPartitioner(dm, oldPart));
2221   PetscCall(PetscPartitionerDestroy(&gatherPart));
2222   PetscCall(PetscPartitionerDestroy(&oldPart));
2223   PetscFunctionReturn(0);
2224 }
2225 
2226 /*@C
2227   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.
2228 
2229   Collective on dm
2230 
2231   Input Parameter:
2232 . dm - the original DMPlex object
2233 
2234   Output Parameters:
2235 + sf - the PetscSF used for point distribution (optional)
2236 - redundantMesh - the redundant DM object, or NULL
2237 
2238   Level: intermediate
2239 
2240 .seealso: `DMPlexDistribute()`, `DMPlexGetGatherDM()`
2241 @*/
2242 PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh) {
2243   MPI_Comm     comm;
2244   PetscMPIInt  size, rank;
2245   PetscInt     pStart, pEnd, p;
2246   PetscInt     numPoints = -1;
2247   PetscSF      migrationSF, sfPoint, gatherSF;
2248   DM           gatherDM, dmCoord;
2249   PetscSFNode *points;
2250 
2251   PetscFunctionBegin;
2252   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2253   PetscValidPointer(redundantMesh, 3);
2254   *redundantMesh = NULL;
2255   comm           = PetscObjectComm((PetscObject)dm);
2256   PetscCallMPI(MPI_Comm_size(comm, &size));
2257   if (size == 1) {
2258     PetscCall(PetscObjectReference((PetscObject)dm));
2259     *redundantMesh = dm;
2260     if (sf) *sf = NULL;
2261     PetscFunctionReturn(0);
2262   }
2263   PetscCall(DMPlexGetGatherDM(dm, &gatherSF, &gatherDM));
2264   if (!gatherDM) PetscFunctionReturn(0);
2265   PetscCallMPI(MPI_Comm_rank(comm, &rank));
2266   PetscCall(DMPlexGetChart(gatherDM, &pStart, &pEnd));
2267   numPoints = pEnd - pStart;
2268   PetscCallMPI(MPI_Bcast(&numPoints, 1, MPIU_INT, 0, comm));
2269   PetscCall(PetscMalloc1(numPoints, &points));
2270   PetscCall(PetscSFCreate(comm, &migrationSF));
2271   for (p = 0; p < numPoints; p++) {
2272     points[p].index = p;
2273     points[p].rank  = 0;
2274   }
2275   PetscCall(PetscSFSetGraph(migrationSF, pEnd - pStart, numPoints, NULL, PETSC_OWN_POINTER, points, PETSC_OWN_POINTER));
2276   PetscCall(DMPlexCreate(comm, redundantMesh));
2277   PetscCall(PetscObjectSetName((PetscObject)*redundantMesh, "Redundant Mesh"));
2278   PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh));
2279   /* This is to express that all point are in overlap */
2280   PetscCall(DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_MAX_INT));
2281   PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint));
2282   PetscCall(DMSetPointSF(*redundantMesh, sfPoint));
2283   PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord));
2284   if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint));
2285   PetscCall(PetscSFDestroy(&sfPoint));
2286   if (sf) {
2287     PetscSF tsf;
2288 
2289     PetscCall(PetscSFCompose(gatherSF, migrationSF, &tsf));
2290     PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf));
2291     PetscCall(PetscSFDestroy(&tsf));
2292   }
2293   PetscCall(PetscSFDestroy(&migrationSF));
2294   PetscCall(PetscSFDestroy(&gatherSF));
2295   PetscCall(DMDestroy(&gatherDM));
2296   PetscCall(DMCopyDisc(dm, *redundantMesh));
2297   PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh));
2298   PetscFunctionReturn(0);
2299 }
2300 
2301 /*@
2302   DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.
2303 
2304   Collective
2305 
2306   Input Parameter:
2307 . dm      - The DM object
2308 
2309   Output Parameter:
2310 . distributed - Flag whether the DM is distributed
2311 
2312   Level: intermediate
2313 
2314   Notes:
2315   This currently finds out whether at least two ranks have any DAG points.
2316   This involves MPI_Allreduce() with one integer.
2317   The result is currently not stashed so every call to this routine involves this global communication.
2318 
2319 .seealso: `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()`
2320 @*/
2321 PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed) {
2322   PetscInt    pStart, pEnd, count;
2323   MPI_Comm    comm;
2324   PetscMPIInt size;
2325 
2326   PetscFunctionBegin;
2327   PetscValidHeaderSpecific(dm, DM_CLASSID, 1);
2328   PetscValidBoolPointer(distributed, 2);
2329   PetscCall(PetscObjectGetComm((PetscObject)dm, &comm));
2330   PetscCallMPI(MPI_Comm_size(comm, &size));
2331   if (size == 1) {
2332     *distributed = PETSC_FALSE;
2333     PetscFunctionReturn(0);
2334   }
2335   PetscCall(DMPlexGetChart(dm, &pStart, &pEnd));
2336   count = (pEnd - pStart) > 0 ? 1 : 0;
2337   PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm));
2338   *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2339   PetscFunctionReturn(0);
2340 }
2341 
2342 /*@C
2343   DMPlexDistributionSetName - Set the name of the specific parallel distribution
2344 
2345   Input Parameters:
2346 + dm   - The DM
2347 - name - The name of the specific parallel distribution
2348 
2349   Note:
2350   If distribution name is set when saving, DMPlexTopologyView() saves the plex's
2351   parallel distribution (i.e., partition, ownership, and local ordering of points) under
2352   this name. Conversely, if distribution name is set when loading, DMPlexTopologyLoad()
2353   loads the parallel distribution stored in file under this name.
2354 
2355   Level: developer
2356 
2357 .seealso: `DMPlexDistributionGetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2358 @*/
2359 PetscErrorCode DMPlexDistributionSetName(DM dm, const char name[]) {
2360   DM_Plex *mesh = (DM_Plex *)dm->data;
2361 
2362   PetscFunctionBegin;
2363   PetscValidHeaderSpecificType(dm, DM_CLASSID, 1, DMPLEX);
2364   if (name) PetscValidCharPointer(name, 2);
2365   PetscCall(PetscFree(mesh->distributionName));
2366   PetscCall(PetscStrallocpy(name, &mesh->distributionName));
2367   PetscFunctionReturn(0);
2368 }
2369 
2370 /*@C
2371   DMPlexDistributionGetName - Retrieve the name of the specific parallel distribution
2372 
2373   Input Parameter:
2374 . dm - The DM
2375 
2376   Output Parameter:
2377 . name - The name of the specific parallel distribution
2378 
2379   Note:
2380   If distribution name is set when saving, DMPlexTopologyView() saves the plex's
2381   parallel distribution (i.e., partition, ownership, and local ordering of points) under
2382   this name. Conversely, if distribution name is set when loading, DMPlexTopologyLoad()
2383   loads the parallel distribution stored in file under this name.
2384 
2385   Level: developer
2386 
2387 .seealso: `DMPlexDistributionSetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2388 @*/
2389 PetscErrorCode DMPlexDistributionGetName(DM dm, const char *name[]) {
2390   DM_Plex *mesh = (DM_Plex *)dm->data;
2391 
2392   PetscFunctionBegin;
2393   PetscValidHeaderSpecificType(dm, DM_CLASSID, 1, DMPLEX);
2394   PetscValidPointer(name, 2);
2395   *name = mesh->distributionName;
2396   PetscFunctionReturn(0);
2397 }
2398