1 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 2 #include <petsc/private/dmlabelimpl.h> /*I "petscdmlabel.h" I*/ 3 4 /*@C 5 DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback 6 7 Input Parameters: 8 + dm - The DM object 9 . user - The user callback, may be NULL (to clear the callback) 10 - ctx - context for callback evaluation, may be NULL 11 12 Level: advanced 13 14 Notes: 15 The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency. 16 17 Any setting here overrides other configuration of DMPlex adjacency determination. 18 19 .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()` 20 @*/ 21 PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx) 22 { 23 DM_Plex *mesh = (DM_Plex *)dm->data; 24 25 PetscFunctionBegin; 26 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 27 mesh->useradjacency = user; 28 mesh->useradjacencyctx = ctx; 29 PetscFunctionReturn(0); 30 } 31 32 /*@C 33 DMPlexGetAdjacencyUser - get the user-defined adjacency callback 34 35 Input Parameter: 36 . dm - The DM object 37 38 Output Parameters: 39 + user - The user callback 40 - ctx - context for callback evaluation 41 42 Level: advanced 43 44 .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()` 45 @*/ 46 PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx) 47 { 48 DM_Plex *mesh = (DM_Plex *)dm->data; 49 50 PetscFunctionBegin; 51 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 52 if (user) *user = mesh->useradjacency; 53 if (ctx) *ctx = mesh->useradjacencyctx; 54 PetscFunctionReturn(0); 55 } 56 57 /*@ 58 DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints. 59 60 Input Parameters: 61 + dm - The DM object 62 - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 63 64 Level: intermediate 65 66 .seealso: `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()` 67 @*/ 68 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors) 69 { 70 DM_Plex *mesh = (DM_Plex *) dm->data; 71 72 PetscFunctionBegin; 73 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 74 mesh->useAnchors = useAnchors; 75 PetscFunctionReturn(0); 76 } 77 78 /*@ 79 DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints. 80 81 Input Parameter: 82 . dm - The DM object 83 84 Output Parameter: 85 . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 86 87 Level: intermediate 88 89 .seealso: `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()` 90 @*/ 91 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors) 92 { 93 DM_Plex *mesh = (DM_Plex *) dm->data; 94 95 PetscFunctionBegin; 96 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 97 PetscValidBoolPointer(useAnchors, 2); 98 *useAnchors = mesh->useAnchors; 99 PetscFunctionReturn(0); 100 } 101 102 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 103 { 104 const PetscInt *cone = NULL; 105 PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c; 106 107 PetscFunctionBeginHot; 108 PetscCall(DMPlexGetConeSize(dm, p, &coneSize)); 109 PetscCall(DMPlexGetCone(dm, p, &cone)); 110 for (c = 0; c <= coneSize; ++c) { 111 const PetscInt point = !c ? p : cone[c-1]; 112 const PetscInt *support = NULL; 113 PetscInt supportSize, s, q; 114 115 PetscCall(DMPlexGetSupportSize(dm, point, &supportSize)); 116 PetscCall(DMPlexGetSupport(dm, point, &support)); 117 for (s = 0; s < supportSize; ++s) { 118 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) { 119 if (support[s] == adj[q]) break; 120 } 121 PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize); 122 } 123 } 124 *adjSize = numAdj; 125 PetscFunctionReturn(0); 126 } 127 128 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 129 { 130 const PetscInt *support = NULL; 131 PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s; 132 133 PetscFunctionBeginHot; 134 PetscCall(DMPlexGetSupportSize(dm, p, &supportSize)); 135 PetscCall(DMPlexGetSupport(dm, p, &support)); 136 for (s = 0; s <= supportSize; ++s) { 137 const PetscInt point = !s ? p : support[s-1]; 138 const PetscInt *cone = NULL; 139 PetscInt coneSize, c, q; 140 141 PetscCall(DMPlexGetConeSize(dm, point, &coneSize)); 142 PetscCall(DMPlexGetCone(dm, point, &cone)); 143 for (c = 0; c < coneSize; ++c) { 144 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) { 145 if (cone[c] == adj[q]) break; 146 } 147 PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize); 148 } 149 } 150 *adjSize = numAdj; 151 PetscFunctionReturn(0); 152 } 153 154 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[]) 155 { 156 PetscInt *star = NULL; 157 PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s; 158 159 PetscFunctionBeginHot; 160 PetscCall(DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star)); 161 for (s = 0; s < starSize*2; s += 2) { 162 const PetscInt *closure = NULL; 163 PetscInt closureSize, c, q; 164 165 PetscCall(DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure)); 166 for (c = 0; c < closureSize*2; c += 2) { 167 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) { 168 if (closure[c] == adj[q]) break; 169 } 170 PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize); 171 } 172 PetscCall(DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure)); 173 } 174 PetscCall(DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star)); 175 *adjSize = numAdj; 176 PetscFunctionReturn(0); 177 } 178 179 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[]) 180 { 181 static PetscInt asiz = 0; 182 PetscInt maxAnchors = 1; 183 PetscInt aStart = -1, aEnd = -1; 184 PetscInt maxAdjSize; 185 PetscSection aSec = NULL; 186 IS aIS = NULL; 187 const PetscInt *anchors; 188 DM_Plex *mesh = (DM_Plex *)dm->data; 189 190 PetscFunctionBeginHot; 191 if (useAnchors) { 192 PetscCall(DMPlexGetAnchors(dm,&aSec,&aIS)); 193 if (aSec) { 194 PetscCall(PetscSectionGetMaxDof(aSec,&maxAnchors)); 195 maxAnchors = PetscMax(1,maxAnchors); 196 PetscCall(PetscSectionGetChart(aSec,&aStart,&aEnd)); 197 PetscCall(ISGetIndices(aIS,&anchors)); 198 } 199 } 200 if (!*adj) { 201 PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd; 202 203 PetscCall(DMPlexGetChart(dm, &pStart,&pEnd)); 204 PetscCall(DMPlexGetDepth(dm, &depth)); 205 depth = PetscMax(depth, -depth); 206 PetscCall(DMPlexGetMaxSizes(dm, &maxC, &maxS)); 207 coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1; 208 supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1; 209 asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries); 210 asiz *= maxAnchors; 211 asiz = PetscMin(asiz,pEnd-pStart); 212 PetscCall(PetscMalloc1(asiz,adj)); 213 } 214 if (*adjSize < 0) *adjSize = asiz; 215 maxAdjSize = *adjSize; 216 if (mesh->useradjacency) { 217 PetscCall((*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx)); 218 } else if (useTransitiveClosure) { 219 PetscCall(DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj)); 220 } else if (useCone) { 221 PetscCall(DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj)); 222 } else { 223 PetscCall(DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj)); 224 } 225 if (useAnchors && aSec) { 226 PetscInt origSize = *adjSize; 227 PetscInt numAdj = origSize; 228 PetscInt i = 0, j; 229 PetscInt *orig = *adj; 230 231 while (i < origSize) { 232 PetscInt p = orig[i]; 233 PetscInt aDof = 0; 234 235 if (p >= aStart && p < aEnd) { 236 PetscCall(PetscSectionGetDof(aSec,p,&aDof)); 237 } 238 if (aDof) { 239 PetscInt aOff; 240 PetscInt s, q; 241 242 for (j = i + 1; j < numAdj; j++) { 243 orig[j - 1] = orig[j]; 244 } 245 origSize--; 246 numAdj--; 247 PetscCall(PetscSectionGetOffset(aSec,p,&aOff)); 248 for (s = 0; s < aDof; ++s) { 249 for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) { 250 if (anchors[aOff+s] == orig[q]) break; 251 } 252 PetscCheck(numAdj <= maxAdjSize,PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%" PetscInt_FMT ")", maxAdjSize); 253 } 254 } 255 else { 256 i++; 257 } 258 } 259 *adjSize = numAdj; 260 PetscCall(ISRestoreIndices(aIS,&anchors)); 261 } 262 PetscFunctionReturn(0); 263 } 264 265 /*@ 266 DMPlexGetAdjacency - Return all points adjacent to the given point 267 268 Input Parameters: 269 + dm - The DM object 270 - p - The point 271 272 Input/Output Parameters: 273 + adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE; 274 on output the number of adjacent points 275 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize; 276 on output contains the adjacent points 277 278 Level: advanced 279 280 Notes: 281 The user must PetscFree the adj array if it was not passed in. 282 283 .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()` 284 @*/ 285 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[]) 286 { 287 PetscBool useCone, useClosure, useAnchors; 288 289 PetscFunctionBeginHot; 290 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 291 PetscValidIntPointer(adjSize,3); 292 PetscValidPointer(adj,4); 293 PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure)); 294 PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors)); 295 PetscCall(DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj)); 296 PetscFunctionReturn(0); 297 } 298 299 /*@ 300 DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity 301 302 Collective on dm 303 304 Input Parameters: 305 + dm - The DM 306 . sfPoint - The PetscSF which encodes point connectivity 307 . rootRankSection - 308 . rootRanks - 309 . leftRankSection - 310 - leafRanks - 311 312 Output Parameters: 313 + processRanks - A list of process neighbors, or NULL 314 - sfProcess - An SF encoding the two-sided process connectivity, or NULL 315 316 Level: developer 317 318 .seealso: `PetscSFCreate()`, `DMPlexCreateProcessSF()` 319 @*/ 320 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess) 321 { 322 const PetscSFNode *remotePoints; 323 PetscInt *localPointsNew; 324 PetscSFNode *remotePointsNew; 325 const PetscInt *nranks; 326 PetscInt *ranksNew; 327 PetscBT neighbors; 328 PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n; 329 PetscMPIInt size, proc, rank; 330 331 PetscFunctionBegin; 332 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 333 PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2); 334 if (processRanks) {PetscValidPointer(processRanks, 7);} 335 if (sfProcess) {PetscValidPointer(sfProcess, 8);} 336 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size)); 337 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank)); 338 PetscCall(PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints)); 339 PetscCall(PetscBTCreate(size, &neighbors)); 340 PetscCall(PetscBTMemzero(size, neighbors)); 341 /* Compute root-to-leaf process connectivity */ 342 PetscCall(PetscSectionGetChart(rootRankSection, &pStart, &pEnd)); 343 PetscCall(ISGetIndices(rootRanks, &nranks)); 344 for (p = pStart; p < pEnd; ++p) { 345 PetscInt ndof, noff, n; 346 347 PetscCall(PetscSectionGetDof(rootRankSection, p, &ndof)); 348 PetscCall(PetscSectionGetOffset(rootRankSection, p, &noff)); 349 for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff+n])); 350 } 351 PetscCall(ISRestoreIndices(rootRanks, &nranks)); 352 /* Compute leaf-to-neighbor process connectivity */ 353 PetscCall(PetscSectionGetChart(leafRankSection, &pStart, &pEnd)); 354 PetscCall(ISGetIndices(leafRanks, &nranks)); 355 for (p = pStart; p < pEnd; ++p) { 356 PetscInt ndof, noff, n; 357 358 PetscCall(PetscSectionGetDof(leafRankSection, p, &ndof)); 359 PetscCall(PetscSectionGetOffset(leafRankSection, p, &noff)); 360 for (n = 0; n < ndof; ++n) PetscCall(PetscBTSet(neighbors, nranks[noff+n])); 361 } 362 PetscCall(ISRestoreIndices(leafRanks, &nranks)); 363 /* Compute leaf-to-root process connectivity */ 364 for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);} 365 /* Calculate edges */ 366 PetscBTClear(neighbors, rank); 367 for (proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;} 368 PetscCall(PetscMalloc1(numNeighbors, &ranksNew)); 369 PetscCall(PetscMalloc1(numNeighbors, &localPointsNew)); 370 PetscCall(PetscMalloc1(numNeighbors, &remotePointsNew)); 371 for (proc = 0, n = 0; proc < size; ++proc) { 372 if (PetscBTLookup(neighbors, proc)) { 373 ranksNew[n] = proc; 374 localPointsNew[n] = proc; 375 remotePointsNew[n].index = rank; 376 remotePointsNew[n].rank = proc; 377 ++n; 378 } 379 } 380 PetscCall(PetscBTDestroy(&neighbors)); 381 if (processRanks) PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks)); 382 else PetscCall(PetscFree(ranksNew)); 383 if (sfProcess) { 384 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess)); 385 PetscCall(PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF")); 386 PetscCall(PetscSFSetFromOptions(*sfProcess)); 387 PetscCall(PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER)); 388 } 389 PetscFunctionReturn(0); 390 } 391 392 /*@ 393 DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF. 394 395 Collective on dm 396 397 Input Parameter: 398 . dm - The DM 399 400 Output Parameters: 401 + rootSection - The number of leaves for a given root point 402 . rootrank - The rank of each edge into the root point 403 . leafSection - The number of processes sharing a given leaf point 404 - leafrank - The rank of each process sharing a leaf point 405 406 Level: developer 407 408 .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()` 409 @*/ 410 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank) 411 { 412 MPI_Comm comm; 413 PetscSF sfPoint; 414 const PetscInt *rootdegree; 415 PetscInt *myrank, *remoterank; 416 PetscInt pStart, pEnd, p, nedges; 417 PetscMPIInt rank; 418 419 PetscFunctionBegin; 420 PetscCall(PetscObjectGetComm((PetscObject) dm, &comm)); 421 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 422 PetscCall(DMPlexGetChart(dm, &pStart, &pEnd)); 423 PetscCall(DMGetPointSF(dm, &sfPoint)); 424 /* Compute number of leaves for each root */ 425 PetscCall(PetscObjectSetName((PetscObject) rootSection, "Root Section")); 426 PetscCall(PetscSectionSetChart(rootSection, pStart, pEnd)); 427 PetscCall(PetscSFComputeDegreeBegin(sfPoint, &rootdegree)); 428 PetscCall(PetscSFComputeDegreeEnd(sfPoint, &rootdegree)); 429 for (p = pStart; p < pEnd; ++p) PetscCall(PetscSectionSetDof(rootSection, p, rootdegree[p-pStart])); 430 PetscCall(PetscSectionSetUp(rootSection)); 431 /* Gather rank of each leaf to root */ 432 PetscCall(PetscSectionGetStorageSize(rootSection, &nedges)); 433 PetscCall(PetscMalloc1(pEnd-pStart, &myrank)); 434 PetscCall(PetscMalloc1(nedges, &remoterank)); 435 for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank; 436 PetscCall(PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank)); 437 PetscCall(PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank)); 438 PetscCall(PetscFree(myrank)); 439 PetscCall(ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank)); 440 /* Distribute remote ranks to leaves */ 441 PetscCall(PetscObjectSetName((PetscObject) leafSection, "Leaf Section")); 442 PetscCall(DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank)); 443 PetscFunctionReturn(0); 444 } 445 446 #if 0 447 static PetscErrorCode DMPlexCopyOverlapLabels(DM dm, DM ndm) 448 { 449 DM_Plex *mesh = (DM_Plex *) dm->data; 450 DM_Plex *nmesh = (DM_Plex *) ndm->data; 451 452 PetscFunctionBegin; 453 if (mesh->numOvLabels) { 454 const char *name; 455 PetscInt l; 456 457 nmesh->numOvLabels = mesh->numOvLabels; 458 for (l = 0; l < mesh->numOvLabels; ++l) { 459 PetscCall(PetscObjectGetName((PetscObject) mesh->ovLabels[l], &name)); 460 PetscCall(DMGetLabel(ndm, name, &nmesh->ovLabels[l])); 461 nmesh->ovValues[l] = mesh->ovValues[l]; 462 } 463 PetscCall(PetscObjectGetName((PetscObject) mesh->ovExLabel, &name)); 464 PetscCall(DMGetLabel(ndm, name, &nmesh->ovExLabel)); 465 nmesh->ovExValue = mesh->ovExValue; 466 } 467 PetscFunctionReturn(0); 468 } 469 #endif 470 471 /*@C 472 DMPlexCreateOverlapLabel - Compute a label indicating what overlap points should be sent to new processes 473 474 Collective on dm 475 476 Input Parameters: 477 + dm - The DM 478 . levels - Number of overlap levels 479 . rootSection - The number of leaves for a given root point 480 . rootrank - The rank of each edge into the root point 481 . leafSection - The number of processes sharing a given leaf point 482 - leafrank - The rank of each process sharing a leaf point 483 484 Output Parameter: 485 . ovLabel - DMLabel containing remote overlap contributions as point/rank pairings 486 487 Level: developer 488 489 .seealso: `DMPlexCreateOverlapLabelFromLabels()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()` 490 @*/ 491 PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) 492 { 493 MPI_Comm comm; 494 DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */ 495 PetscSF sfPoint; 496 const PetscSFNode *remote; 497 const PetscInt *local; 498 const PetscInt *nrank, *rrank; 499 PetscInt *adj = NULL; 500 PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l; 501 PetscMPIInt rank, size; 502 PetscBool flg; 503 504 PetscFunctionBegin; 505 *ovLabel = NULL; 506 PetscCall(PetscObjectGetComm((PetscObject) dm, &comm)); 507 PetscCallMPI(MPI_Comm_size(comm, &size)); 508 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 509 if (size == 1) PetscFunctionReturn(0); 510 PetscCall(DMGetPointSF(dm, &sfPoint)); 511 PetscCall(DMPlexGetChart(dm, &pStart, &pEnd)); 512 if (!levels) { 513 /* Add owned points */ 514 PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel)); 515 for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank)); 516 PetscFunctionReturn(0); 517 } 518 PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd)); 519 PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote)); 520 PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank)); 521 /* Handle leaves: shared with the root point */ 522 for (l = 0; l < nleaves; ++l) { 523 PetscInt adjSize = PETSC_DETERMINE, a; 524 525 PetscCall(DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj)); 526 for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank)); 527 } 528 PetscCall(ISGetIndices(rootrank, &rrank)); 529 PetscCall(ISGetIndices(leafrank, &nrank)); 530 /* Handle roots */ 531 for (p = pStart; p < pEnd; ++p) { 532 PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a; 533 534 if ((p >= sStart) && (p < sEnd)) { 535 /* Some leaves share a root with other leaves on different processes */ 536 PetscCall(PetscSectionGetDof(leafSection, p, &neighbors)); 537 if (neighbors) { 538 PetscCall(PetscSectionGetOffset(leafSection, p, &noff)); 539 PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj)); 540 for (n = 0; n < neighbors; ++n) { 541 const PetscInt remoteRank = nrank[noff+n]; 542 543 if (remoteRank == rank) continue; 544 for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank)); 545 } 546 } 547 } 548 /* Roots are shared with leaves */ 549 PetscCall(PetscSectionGetDof(rootSection, p, &neighbors)); 550 if (!neighbors) continue; 551 PetscCall(PetscSectionGetOffset(rootSection, p, &noff)); 552 PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj)); 553 for (n = 0; n < neighbors; ++n) { 554 const PetscInt remoteRank = rrank[noff+n]; 555 556 if (remoteRank == rank) continue; 557 for (a = 0; a < adjSize; ++a) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank)); 558 } 559 } 560 PetscCall(PetscFree(adj)); 561 PetscCall(ISRestoreIndices(rootrank, &rrank)); 562 PetscCall(ISRestoreIndices(leafrank, &nrank)); 563 /* Add additional overlap levels */ 564 for (l = 1; l < levels; l++) { 565 /* Propagate point donations over SF to capture remote connections */ 566 PetscCall(DMPlexPartitionLabelPropagate(dm, ovAdjByRank)); 567 /* Add next level of point donations to the label */ 568 PetscCall(DMPlexPartitionLabelAdjacency(dm, ovAdjByRank)); 569 } 570 /* We require the closure in the overlap */ 571 PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank)); 572 PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg)); 573 if (flg) { 574 PetscViewer viewer; 575 PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer)); 576 PetscCall(DMLabelView(ovAdjByRank, viewer)); 577 } 578 /* Invert sender to receiver label */ 579 PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel)); 580 PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel)); 581 /* Add owned points, except for shared local points */ 582 for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank)); 583 for (l = 0; l < nleaves; ++l) { 584 PetscCall(DMLabelClearValue(*ovLabel, local[l], rank)); 585 PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank)); 586 } 587 /* Clean up */ 588 PetscCall(DMLabelDestroy(&ovAdjByRank)); 589 PetscFunctionReturn(0); 590 } 591 592 static PetscErrorCode HandlePoint_Private(DM dm, PetscInt p, PetscSection section, const PetscInt ranks[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], DMLabel ovAdjByRank) 593 { 594 PetscInt neighbors, el; 595 596 PetscFunctionBegin; 597 PetscCall(PetscSectionGetDof(section, p, &neighbors)); 598 if (neighbors) { 599 PetscInt *adj = NULL; 600 PetscInt adjSize = PETSC_DETERMINE, noff, n, a; 601 PetscMPIInt rank; 602 603 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank)); 604 PetscCall(PetscSectionGetOffset(section, p, &noff)); 605 PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj)); 606 for (n = 0; n < neighbors; ++n) { 607 const PetscInt remoteRank = ranks[noff+n]; 608 609 if (remoteRank == rank) continue; 610 for (a = 0; a < adjSize; ++a) { 611 PetscBool insert = PETSC_TRUE; 612 613 for (el = 0; el < numExLabels; ++el) { 614 PetscInt exVal; 615 PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal)); 616 if (exVal == exValue[el]) {insert = PETSC_FALSE; break;} 617 } 618 if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank)); 619 } 620 } 621 } 622 PetscFunctionReturn(0); 623 } 624 625 /*@C 626 DMPlexCreateOverlapLabelFromLabels - Compute a label indicating what overlap points should be sent to new processes 627 628 Collective on dm 629 630 Input Parameters: 631 + dm - The DM 632 . numLabels - The number of labels to draw candidate points from 633 . label - An array of labels containing candidate points 634 . value - An array of label values marking the candidate points 635 . numExLabels - The number of labels to use for exclusion 636 . exLabel - An array of labels indicating points to be excluded, or NULL 637 . exValue - An array of label values to be excluded, or NULL 638 . rootSection - The number of leaves for a given root point 639 . rootrank - The rank of each edge into the root point 640 . leafSection - The number of processes sharing a given leaf point 641 - leafrank - The rank of each process sharing a leaf point 642 643 Output Parameter: 644 . ovLabel - DMLabel containing remote overlap contributions as point/rank pairings 645 646 Note: 647 The candidate points are only added to the overlap if they are adjacent to a shared point 648 649 Level: developer 650 651 .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()` 652 @*/ 653 PetscErrorCode DMPlexCreateOverlapLabelFromLabels(DM dm, PetscInt numLabels, const DMLabel label[], const PetscInt value[], 654 PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) 655 { 656 MPI_Comm comm; 657 DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */ 658 PetscSF sfPoint; 659 const PetscSFNode *remote; 660 const PetscInt *local; 661 const PetscInt *nrank, *rrank; 662 PetscInt *adj = NULL; 663 PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l, el; 664 PetscMPIInt rank, size; 665 PetscBool flg; 666 667 PetscFunctionBegin; 668 *ovLabel = NULL; 669 PetscCall(PetscObjectGetComm((PetscObject) dm, &comm)); 670 PetscCallMPI(MPI_Comm_size(comm, &size)); 671 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 672 if (size == 1) PetscFunctionReturn(0); 673 PetscCall(DMGetPointSF(dm, &sfPoint)); 674 PetscCall(DMPlexGetChart(dm, &pStart, &pEnd)); 675 PetscCall(PetscSectionGetChart(leafSection, &sStart, &sEnd)); 676 PetscCall(PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote)); 677 PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank)); 678 PetscCall(ISGetIndices(rootrank, &rrank)); 679 PetscCall(ISGetIndices(leafrank, &nrank)); 680 for (l = 0; l < numLabels; ++l) { 681 IS valIS; 682 const PetscInt *points; 683 PetscInt n; 684 685 PetscCall(DMLabelGetStratumIS(label[l], value[l], &valIS)); 686 if (!valIS) continue; 687 PetscCall(ISGetIndices(valIS, &points)); 688 PetscCall(ISGetLocalSize(valIS, &n)); 689 for (PetscInt i = 0; i < n; ++i) { 690 const PetscInt p = points[i]; 691 692 if ((p >= sStart) && (p < sEnd)) { 693 PetscInt loc, adjSize = PETSC_DETERMINE; 694 695 /* Handle leaves: shared with the root point */ 696 if (local) PetscCall(PetscFindInt(p, nleaves, local, &loc)); 697 else loc = (p >= 0 && p < nleaves) ? p : -1; 698 if (loc >= 0) { 699 const PetscInt remoteRank = remote[loc].rank; 700 701 PetscCall(DMPlexGetAdjacency(dm, p, &adjSize, &adj)); 702 for (PetscInt a = 0; a < adjSize; ++a) { 703 PetscBool insert = PETSC_TRUE; 704 705 for (el = 0; el < numExLabels; ++el) { 706 PetscInt exVal; 707 PetscCall(DMLabelGetValue(exLabel[el], adj[a], &exVal)); 708 if (exVal == exValue[el]) {insert = PETSC_FALSE; break;} 709 } 710 if (insert) PetscCall(DMLabelSetValue(ovAdjByRank, adj[a], remoteRank)); 711 } 712 } 713 /* Some leaves share a root with other leaves on different processes */ 714 HandlePoint_Private(dm, p, leafSection, nrank, numExLabels, exLabel, exValue, ovAdjByRank); 715 } 716 /* Roots are shared with leaves */ 717 HandlePoint_Private(dm, p, rootSection, rrank, numExLabels, exLabel, exValue, ovAdjByRank); 718 } 719 PetscCall(ISRestoreIndices(valIS, &points)); 720 PetscCall(ISDestroy(&valIS)); 721 } 722 PetscCall(PetscFree(adj)); 723 PetscCall(ISRestoreIndices(rootrank, &rrank)); 724 PetscCall(ISRestoreIndices(leafrank, &nrank)); 725 /* We require the closure in the overlap */ 726 PetscCall(DMPlexPartitionLabelClosure(dm, ovAdjByRank)); 727 PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg)); 728 if (flg) { 729 PetscViewer viewer; 730 PetscCall(PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer)); 731 PetscCall(DMLabelView(ovAdjByRank, viewer)); 732 } 733 /* Invert sender to receiver label */ 734 PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel)); 735 PetscCall(DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel)); 736 /* Add owned points, except for shared local points */ 737 for (p = pStart; p < pEnd; ++p) PetscCall(DMLabelSetValue(*ovLabel, p, rank)); 738 for (l = 0; l < nleaves; ++l) { 739 PetscCall(DMLabelClearValue(*ovLabel, local[l], rank)); 740 PetscCall(DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank)); 741 } 742 /* Clean up */ 743 PetscCall(DMLabelDestroy(&ovAdjByRank)); 744 PetscFunctionReturn(0); 745 } 746 747 /*@C 748 DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF 749 750 Collective on dm 751 752 Input Parameters: 753 + dm - The DM 754 - overlapSF - The SF mapping ghost points in overlap to owner points on other processes 755 756 Output Parameter: 757 . migrationSF - An SF that maps original points in old locations to points in new locations 758 759 Level: developer 760 761 .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()` 762 @*/ 763 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF) 764 { 765 MPI_Comm comm; 766 PetscMPIInt rank, size; 767 PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints; 768 PetscInt *pointDepths, *remoteDepths, *ilocal; 769 PetscInt *depthRecv, *depthShift, *depthIdx; 770 PetscSFNode *iremote; 771 PetscSF pointSF; 772 const PetscInt *sharedLocal; 773 const PetscSFNode *overlapRemote, *sharedRemote; 774 775 PetscFunctionBegin; 776 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 777 PetscCall(PetscObjectGetComm((PetscObject)dm, &comm)); 778 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 779 PetscCallMPI(MPI_Comm_size(comm, &size)); 780 PetscCall(DMGetDimension(dm, &dim)); 781 782 /* Before building the migration SF we need to know the new stratum offsets */ 783 PetscCall(PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote)); 784 PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths)); 785 for (d=0; d<dim+1; d++) { 786 PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd)); 787 for (p=pStart; p<pEnd; p++) pointDepths[p] = d; 788 } 789 for (p=0; p<nleaves; p++) remoteDepths[p] = -1; 790 PetscCall(PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE)); 791 PetscCall(PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths,MPI_REPLACE)); 792 793 /* Count received points in each stratum and compute the internal strata shift */ 794 PetscCall(PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx)); 795 for (d=0; d<dim+1; d++) depthRecv[d]=0; 796 for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++; 797 depthShift[dim] = 0; 798 for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim]; 799 for (d=1; d<dim; d++) depthShift[d] += depthRecv[0]; 800 for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1]; 801 for (d=0; d<dim+1; d++) { 802 PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd)); 803 depthIdx[d] = pStart + depthShift[d]; 804 } 805 806 /* Form the overlap SF build an SF that describes the full overlap migration SF */ 807 PetscCall(DMPlexGetChart(dm, &pStart, &pEnd)); 808 newLeaves = pEnd - pStart + nleaves; 809 PetscCall(PetscMalloc1(newLeaves, &ilocal)); 810 PetscCall(PetscMalloc1(newLeaves, &iremote)); 811 /* First map local points to themselves */ 812 for (d=0; d<dim+1; d++) { 813 PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd)); 814 for (p=pStart; p<pEnd; p++) { 815 point = p + depthShift[d]; 816 ilocal[point] = point; 817 iremote[point].index = p; 818 iremote[point].rank = rank; 819 depthIdx[d]++; 820 } 821 } 822 823 /* Add in the remote roots for currently shared points */ 824 PetscCall(DMGetPointSF(dm, &pointSF)); 825 PetscCall(PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote)); 826 for (d=0; d<dim+1; d++) { 827 PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd)); 828 for (p=0; p<numSharedPoints; p++) { 829 if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) { 830 point = sharedLocal[p] + depthShift[d]; 831 iremote[point].index = sharedRemote[p].index; 832 iremote[point].rank = sharedRemote[p].rank; 833 } 834 } 835 } 836 837 /* Now add the incoming overlap points */ 838 for (p=0; p<nleaves; p++) { 839 point = depthIdx[remoteDepths[p]]; 840 ilocal[point] = point; 841 iremote[point].index = overlapRemote[p].index; 842 iremote[point].rank = overlapRemote[p].rank; 843 depthIdx[remoteDepths[p]]++; 844 } 845 PetscCall(PetscFree2(pointDepths,remoteDepths)); 846 847 PetscCall(PetscSFCreate(comm, migrationSF)); 848 PetscCall(PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF")); 849 PetscCall(PetscSFSetFromOptions(*migrationSF)); 850 PetscCall(DMPlexGetChart(dm, &pStart, &pEnd)); 851 PetscCall(PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER)); 852 853 PetscCall(PetscFree3(depthRecv, depthShift, depthIdx)); 854 PetscFunctionReturn(0); 855 } 856 857 /*@ 858 DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification. 859 860 Input Parameters: 861 + dm - The DM 862 - sf - A star forest with non-ordered leaves, usually defining a DM point migration 863 864 Output Parameter: 865 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified 866 867 Note: 868 This lexicographically sorts by (depth, cellType) 869 870 Level: developer 871 872 .seealso: `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()` 873 @*/ 874 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF) 875 { 876 MPI_Comm comm; 877 PetscMPIInt rank, size; 878 PetscInt d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves; 879 PetscSFNode *pointDepths, *remoteDepths; 880 PetscInt *ilocal; 881 PetscInt *depthRecv, *depthShift, *depthIdx; 882 PetscInt *ctRecv, *ctShift, *ctIdx; 883 const PetscSFNode *iremote; 884 885 PetscFunctionBegin; 886 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 887 PetscCall(PetscObjectGetComm((PetscObject) dm, &comm)); 888 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 889 PetscCallMPI(MPI_Comm_size(comm, &size)); 890 PetscCall(DMPlexGetDepth(dm, &ldepth)); 891 PetscCall(DMGetDimension(dm, &dim)); 892 PetscCall(MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm)); 893 PetscCheck(!(ldepth >= 0) || !(depth != ldepth),PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, ldepth, depth); 894 PetscCall(PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0)); 895 896 /* Before building the migration SF we need to know the new stratum offsets */ 897 PetscCall(PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote)); 898 PetscCall(PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths)); 899 for (d = 0; d < depth+1; ++d) { 900 PetscCall(DMPlexGetDepthStratum(dm, d, &pStart, &pEnd)); 901 for (p = pStart; p < pEnd; ++p) { 902 DMPolytopeType ct; 903 904 PetscCall(DMPlexGetCellType(dm, p, &ct)); 905 pointDepths[p].index = d; 906 pointDepths[p].rank = ct; 907 } 908 } 909 for (p = 0; p < nleaves; ++p) {remoteDepths[p].index = -1; remoteDepths[p].rank = -1;} 910 PetscCall(PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE)); 911 PetscCall(PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths,MPI_REPLACE)); 912 /* Count received points in each stratum and compute the internal strata shift */ 913 PetscCall(PetscCalloc6(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx)); 914 for (p = 0; p < nleaves; ++p) { 915 if (remoteDepths[p].rank < 0) { 916 ++depthRecv[remoteDepths[p].index]; 917 } else { 918 ++ctRecv[remoteDepths[p].rank]; 919 } 920 } 921 { 922 PetscInt depths[4], dims[4], shift = 0, i, c; 923 924 /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1) 925 Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells 926 */ 927 depths[0] = depth; depths[1] = 0; depths[2] = depth-1; depths[3] = 1; 928 dims[0] = dim; dims[1] = 0; dims[2] = dim-1; dims[3] = 1; 929 for (i = 0; i <= depth; ++i) { 930 const PetscInt dep = depths[i]; 931 const PetscInt dim = dims[i]; 932 933 for (c = 0; c < DM_NUM_POLYTOPES; ++c) { 934 if (DMPolytopeTypeGetDim((DMPolytopeType) c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue; 935 ctShift[c] = shift; 936 shift += ctRecv[c]; 937 } 938 depthShift[dep] = shift; 939 shift += depthRecv[dep]; 940 } 941 for (c = 0; c < DM_NUM_POLYTOPES; ++c) { 942 const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType) c); 943 944 if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) { 945 ctShift[c] = shift; 946 shift += ctRecv[c]; 947 } 948 } 949 } 950 /* Derive a new local permutation based on stratified indices */ 951 PetscCall(PetscMalloc1(nleaves, &ilocal)); 952 for (p = 0; p < nleaves; ++p) { 953 const PetscInt dep = remoteDepths[p].index; 954 const DMPolytopeType ct = (DMPolytopeType) remoteDepths[p].rank; 955 956 if ((PetscInt) ct < 0) { 957 ilocal[p] = depthShift[dep] + depthIdx[dep]; 958 ++depthIdx[dep]; 959 } else { 960 ilocal[p] = ctShift[ct] + ctIdx[ct]; 961 ++ctIdx[ct]; 962 } 963 } 964 PetscCall(PetscSFCreate(comm, migrationSF)); 965 PetscCall(PetscObjectSetName((PetscObject) *migrationSF, "Migration SF")); 966 PetscCall(PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode*)iremote, PETSC_COPY_VALUES)); 967 PetscCall(PetscFree2(pointDepths,remoteDepths)); 968 PetscCall(PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx)); 969 PetscCall(PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0)); 970 PetscFunctionReturn(0); 971 } 972 973 /*@ 974 DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 975 976 Collective on dm 977 978 Input Parameters: 979 + dm - The DMPlex object 980 . pointSF - The PetscSF describing the communication pattern 981 . originalSection - The PetscSection for existing data layout 982 - originalVec - The existing data in a local vector 983 984 Output Parameters: 985 + newSection - The PetscSF describing the new data layout 986 - newVec - The new data in a local vector 987 988 Level: developer 989 990 .seealso: `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()` 991 @*/ 992 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec) 993 { 994 PetscSF fieldSF; 995 PetscInt *remoteOffsets, fieldSize; 996 PetscScalar *originalValues, *newValues; 997 998 PetscFunctionBegin; 999 PetscCall(PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0)); 1000 PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection)); 1001 1002 PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize)); 1003 PetscCall(VecSetSizes(newVec, fieldSize, PETSC_DETERMINE)); 1004 PetscCall(VecSetType(newVec,dm->vectype)); 1005 1006 PetscCall(VecGetArray(originalVec, &originalValues)); 1007 PetscCall(VecGetArray(newVec, &newValues)); 1008 PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF)); 1009 PetscCall(PetscFree(remoteOffsets)); 1010 PetscCall(PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE)); 1011 PetscCall(PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues,MPI_REPLACE)); 1012 PetscCall(PetscSFDestroy(&fieldSF)); 1013 PetscCall(VecRestoreArray(newVec, &newValues)); 1014 PetscCall(VecRestoreArray(originalVec, &originalValues)); 1015 PetscCall(PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0)); 1016 PetscFunctionReturn(0); 1017 } 1018 1019 /*@ 1020 DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 1021 1022 Collective on dm 1023 1024 Input Parameters: 1025 + dm - The DMPlex object 1026 . pointSF - The PetscSF describing the communication pattern 1027 . originalSection - The PetscSection for existing data layout 1028 - originalIS - The existing data 1029 1030 Output Parameters: 1031 + newSection - The PetscSF describing the new data layout 1032 - newIS - The new data 1033 1034 Level: developer 1035 1036 .seealso: `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()` 1037 @*/ 1038 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS) 1039 { 1040 PetscSF fieldSF; 1041 PetscInt *newValues, *remoteOffsets, fieldSize; 1042 const PetscInt *originalValues; 1043 1044 PetscFunctionBegin; 1045 PetscCall(PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0)); 1046 PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection)); 1047 1048 PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize)); 1049 PetscCall(PetscMalloc1(fieldSize, &newValues)); 1050 1051 PetscCall(ISGetIndices(originalIS, &originalValues)); 1052 PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF)); 1053 PetscCall(PetscFree(remoteOffsets)); 1054 PetscCall(PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE)); 1055 PetscCall(PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues,MPI_REPLACE)); 1056 PetscCall(PetscSFDestroy(&fieldSF)); 1057 PetscCall(ISRestoreIndices(originalIS, &originalValues)); 1058 PetscCall(ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS)); 1059 PetscCall(PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0)); 1060 PetscFunctionReturn(0); 1061 } 1062 1063 /*@ 1064 DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 1065 1066 Collective on dm 1067 1068 Input Parameters: 1069 + dm - The DMPlex object 1070 . pointSF - The PetscSF describing the communication pattern 1071 . originalSection - The PetscSection for existing data layout 1072 . datatype - The type of data 1073 - originalData - The existing data 1074 1075 Output Parameters: 1076 + newSection - The PetscSection describing the new data layout 1077 - newData - The new data 1078 1079 Level: developer 1080 1081 .seealso: `DMPlexDistribute()`, `DMPlexDistributeField()` 1082 @*/ 1083 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData) 1084 { 1085 PetscSF fieldSF; 1086 PetscInt *remoteOffsets, fieldSize; 1087 PetscMPIInt dataSize; 1088 1089 PetscFunctionBegin; 1090 PetscCall(PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0)); 1091 PetscCall(PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection)); 1092 1093 PetscCall(PetscSectionGetStorageSize(newSection, &fieldSize)); 1094 PetscCallMPI(MPI_Type_size(datatype, &dataSize)); 1095 PetscCall(PetscMalloc(fieldSize * dataSize, newData)); 1096 1097 PetscCall(PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF)); 1098 PetscCall(PetscFree(remoteOffsets)); 1099 PetscCall(PetscSFBcastBegin(fieldSF, datatype, originalData, *newData,MPI_REPLACE)); 1100 PetscCall(PetscSFBcastEnd(fieldSF, datatype, originalData, *newData,MPI_REPLACE)); 1101 PetscCall(PetscSFDestroy(&fieldSF)); 1102 PetscCall(PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0)); 1103 PetscFunctionReturn(0); 1104 } 1105 1106 static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 1107 { 1108 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1109 MPI_Comm comm; 1110 PetscSF coneSF; 1111 PetscSection originalConeSection, newConeSection; 1112 PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize; 1113 PetscBool flg; 1114 1115 PetscFunctionBegin; 1116 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1117 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 1118 PetscCall(PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0)); 1119 /* Distribute cone section */ 1120 PetscCall(PetscObjectGetComm((PetscObject)dm, &comm)); 1121 PetscCall(DMPlexGetConeSection(dm, &originalConeSection)); 1122 PetscCall(DMPlexGetConeSection(dmParallel, &newConeSection)); 1123 PetscCall(PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection)); 1124 PetscCall(DMSetUp(dmParallel)); 1125 /* Communicate and renumber cones */ 1126 PetscCall(PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF)); 1127 PetscCall(PetscFree(remoteOffsets)); 1128 PetscCall(DMPlexGetCones(dm, &cones)); 1129 if (original) { 1130 PetscInt numCones; 1131 1132 PetscCall(PetscSectionGetStorageSize(originalConeSection,&numCones)); 1133 PetscCall(PetscMalloc1(numCones,&globCones)); 1134 PetscCall(ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones)); 1135 } else { 1136 globCones = cones; 1137 } 1138 PetscCall(DMPlexGetCones(dmParallel, &newCones)); 1139 PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE)); 1140 PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones,MPI_REPLACE)); 1141 if (original) PetscCall(PetscFree(globCones)); 1142 PetscCall(PetscSectionGetStorageSize(newConeSection, &newConesSize)); 1143 PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones)); 1144 if (PetscDefined(USE_DEBUG)) { 1145 PetscInt p; 1146 PetscBool valid = PETSC_TRUE; 1147 for (p = 0; p < newConesSize; ++p) { 1148 if (newCones[p] < 0) {valid = PETSC_FALSE; PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank,p));} 1149 } 1150 PetscCheck(valid,PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1151 } 1152 PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg)); 1153 if (flg) { 1154 PetscCall(PetscPrintf(comm, "Serial Cone Section:\n")); 1155 PetscCall(PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm))); 1156 PetscCall(PetscPrintf(comm, "Parallel Cone Section:\n")); 1157 PetscCall(PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm))); 1158 PetscCall(PetscSFView(coneSF, NULL)); 1159 } 1160 PetscCall(DMPlexGetConeOrientations(dm, &cones)); 1161 PetscCall(DMPlexGetConeOrientations(dmParallel, &newCones)); 1162 PetscCall(PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE)); 1163 PetscCall(PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones,MPI_REPLACE)); 1164 PetscCall(PetscSFDestroy(&coneSF)); 1165 PetscCall(PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0)); 1166 /* Create supports and stratify DMPlex */ 1167 { 1168 PetscInt pStart, pEnd; 1169 1170 PetscCall(PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd)); 1171 PetscCall(PetscSectionSetChart(pmesh->supportSection, pStart, pEnd)); 1172 } 1173 PetscCall(DMPlexSymmetrize(dmParallel)); 1174 PetscCall(DMPlexStratify(dmParallel)); 1175 { 1176 PetscBool useCone, useClosure, useAnchors; 1177 1178 PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure)); 1179 PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure)); 1180 PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors)); 1181 PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors)); 1182 } 1183 PetscFunctionReturn(0); 1184 } 1185 1186 static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel) 1187 { 1188 MPI_Comm comm; 1189 DM cdm, cdmParallel; 1190 PetscSection originalCoordSection, newCoordSection; 1191 Vec originalCoordinates, newCoordinates; 1192 PetscInt bs; 1193 PetscBool isper; 1194 const char *name; 1195 const PetscReal *maxCell, *L; 1196 const DMBoundaryType *bd; 1197 1198 PetscFunctionBegin; 1199 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1200 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1201 1202 PetscCall(PetscObjectGetComm((PetscObject)dm, &comm)); 1203 PetscCall(DMGetCoordinateSection(dm, &originalCoordSection)); 1204 PetscCall(DMGetCoordinateSection(dmParallel, &newCoordSection)); 1205 PetscCall(DMGetCoordinatesLocal(dm, &originalCoordinates)); 1206 if (originalCoordinates) { 1207 PetscCall(VecCreate(PETSC_COMM_SELF, &newCoordinates)); 1208 PetscCall(PetscObjectGetName((PetscObject) originalCoordinates, &name)); 1209 PetscCall(PetscObjectSetName((PetscObject) newCoordinates, name)); 1210 1211 PetscCall(DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates)); 1212 PetscCall(DMSetCoordinatesLocal(dmParallel, newCoordinates)); 1213 PetscCall(VecGetBlockSize(originalCoordinates, &bs)); 1214 PetscCall(VecSetBlockSize(newCoordinates, bs)); 1215 PetscCall(VecDestroy(&newCoordinates)); 1216 } 1217 PetscCall(DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd)); 1218 PetscCall(DMSetPeriodicity(dmParallel, isper, maxCell, L, bd)); 1219 PetscCall(DMGetCoordinateDM(dm, &cdm)); 1220 PetscCall(DMGetCoordinateDM(dmParallel, &cdmParallel)); 1221 PetscCall(DMCopyDisc(cdm, cdmParallel)); 1222 PetscFunctionReturn(0); 1223 } 1224 1225 static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel) 1226 { 1227 DM_Plex *mesh = (DM_Plex*) dm->data; 1228 MPI_Comm comm; 1229 DMLabel depthLabel; 1230 PetscMPIInt rank; 1231 PetscInt depth, d, numLabels, numLocalLabels, l; 1232 PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth; 1233 PetscObjectState depthState = -1; 1234 1235 PetscFunctionBegin; 1236 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1237 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1238 1239 PetscCall(PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0)); 1240 PetscCall(PetscObjectGetComm((PetscObject)dm, &comm)); 1241 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1242 1243 /* If the user has changed the depth label, communicate it instead */ 1244 PetscCall(DMPlexGetDepth(dm, &depth)); 1245 PetscCall(DMPlexGetDepthLabel(dm, &depthLabel)); 1246 if (depthLabel) PetscCall(PetscObjectStateGet((PetscObject) depthLabel, &depthState)); 1247 lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE; 1248 PetscCall(MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm)); 1249 if (sendDepth) { 1250 PetscCall(DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel)); 1251 PetscCall(DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE)); 1252 } 1253 /* Everyone must have either the same number of labels, or none */ 1254 PetscCall(DMGetNumLabels(dm, &numLocalLabels)); 1255 numLabels = numLocalLabels; 1256 PetscCallMPI(MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm)); 1257 if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE; 1258 for (l = 0; l < numLabels; ++l) { 1259 DMLabel label = NULL, labelNew = NULL; 1260 PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput; 1261 const char *name = NULL; 1262 1263 if (hasLabels) { 1264 PetscCall(DMGetLabelByNum(dm, l, &label)); 1265 /* Skip "depth" because it is recreated */ 1266 PetscCall(PetscObjectGetName((PetscObject) label, &name)); 1267 PetscCall(PetscStrcmp(name, "depth", &isDepth)); 1268 } 1269 PetscCallMPI(MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm)); 1270 if (isDepth && !sendDepth) continue; 1271 PetscCall(DMLabelDistribute(label, migrationSF, &labelNew)); 1272 if (isDepth) { 1273 /* Put in any missing strata which can occur if users are managing the depth label themselves */ 1274 PetscInt gdepth; 1275 1276 PetscCall(MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm)); 1277 PetscCheck(!(depth >= 0) || !(gdepth != depth),PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %" PetscInt_FMT " != %" PetscInt_FMT, depth, gdepth); 1278 for (d = 0; d <= gdepth; ++d) { 1279 PetscBool has; 1280 1281 PetscCall(DMLabelHasStratum(labelNew, d, &has)); 1282 if (!has) PetscCall(DMLabelAddStratum(labelNew, d)); 1283 } 1284 } 1285 PetscCall(DMAddLabel(dmParallel, labelNew)); 1286 /* Put the output flag in the new label */ 1287 if (hasLabels) PetscCall(DMGetLabelOutput(dm, name, &lisOutput)); 1288 PetscCall(MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm)); 1289 PetscCall(PetscObjectGetName((PetscObject) labelNew, &name)); 1290 PetscCall(DMSetLabelOutput(dmParallel, name, isOutput)); 1291 PetscCall(DMLabelDestroy(&labelNew)); 1292 } 1293 PetscCall(PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0)); 1294 PetscFunctionReturn(0); 1295 } 1296 1297 static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 1298 { 1299 DM_Plex *mesh = (DM_Plex*) dm->data; 1300 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1301 MPI_Comm comm; 1302 DM refTree; 1303 PetscSection origParentSection, newParentSection; 1304 PetscInt *origParents, *origChildIDs; 1305 PetscBool flg; 1306 1307 PetscFunctionBegin; 1308 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1309 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 1310 PetscCall(PetscObjectGetComm((PetscObject)dm, &comm)); 1311 1312 /* Set up tree */ 1313 PetscCall(DMPlexGetReferenceTree(dm,&refTree)); 1314 PetscCall(DMPlexSetReferenceTree(dmParallel,refTree)); 1315 PetscCall(DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL)); 1316 if (origParentSection) { 1317 PetscInt pStart, pEnd; 1318 PetscInt *newParents, *newChildIDs, *globParents; 1319 PetscInt *remoteOffsetsParents, newParentSize; 1320 PetscSF parentSF; 1321 1322 PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd)); 1323 PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection)); 1324 PetscCall(PetscSectionSetChart(newParentSection,pStart,pEnd)); 1325 PetscCall(PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection)); 1326 PetscCall(PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF)); 1327 PetscCall(PetscFree(remoteOffsetsParents)); 1328 PetscCall(PetscSectionGetStorageSize(newParentSection,&newParentSize)); 1329 PetscCall(PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs)); 1330 if (original) { 1331 PetscInt numParents; 1332 1333 PetscCall(PetscSectionGetStorageSize(origParentSection,&numParents)); 1334 PetscCall(PetscMalloc1(numParents,&globParents)); 1335 PetscCall(ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents)); 1336 } 1337 else { 1338 globParents = origParents; 1339 } 1340 PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE)); 1341 PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents,MPI_REPLACE)); 1342 if (original) PetscCall(PetscFree(globParents)); 1343 PetscCall(PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE)); 1344 PetscCall(PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs,MPI_REPLACE)); 1345 PetscCall(ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents)); 1346 if (PetscDefined(USE_DEBUG)) { 1347 PetscInt p; 1348 PetscBool valid = PETSC_TRUE; 1349 for (p = 0; p < newParentSize; ++p) { 1350 if (newParents[p] < 0) {valid = PETSC_FALSE; PetscCall(PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p));} 1351 } 1352 PetscCheck(valid,PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1353 } 1354 PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg)); 1355 if (flg) { 1356 PetscCall(PetscPrintf(comm, "Serial Parent Section: \n")); 1357 PetscCall(PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm))); 1358 PetscCall(PetscPrintf(comm, "Parallel Parent Section: \n")); 1359 PetscCall(PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm))); 1360 PetscCall(PetscSFView(parentSF, NULL)); 1361 } 1362 PetscCall(DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs)); 1363 PetscCall(PetscSectionDestroy(&newParentSection)); 1364 PetscCall(PetscFree2(newParents,newChildIDs)); 1365 PetscCall(PetscSFDestroy(&parentSF)); 1366 } 1367 pmesh->useAnchors = mesh->useAnchors; 1368 PetscFunctionReturn(0); 1369 } 1370 1371 PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel) 1372 { 1373 PetscMPIInt rank, size; 1374 MPI_Comm comm; 1375 1376 PetscFunctionBegin; 1377 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1378 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1379 1380 /* Create point SF for parallel mesh */ 1381 PetscCall(PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0)); 1382 PetscCall(PetscObjectGetComm((PetscObject)dm, &comm)); 1383 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1384 PetscCallMPI(MPI_Comm_size(comm, &size)); 1385 { 1386 const PetscInt *leaves; 1387 PetscSFNode *remotePoints, *rowners, *lowners; 1388 PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints; 1389 PetscInt pStart, pEnd; 1390 1391 PetscCall(DMPlexGetChart(dmParallel, &pStart, &pEnd)); 1392 PetscCall(PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL)); 1393 PetscCall(PetscMalloc2(numRoots,&rowners,numLeaves,&lowners)); 1394 for (p=0; p<numRoots; p++) { 1395 rowners[p].rank = -1; 1396 rowners[p].index = -1; 1397 } 1398 PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE)); 1399 PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE)); 1400 for (p = 0; p < numLeaves; ++p) { 1401 if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */ 1402 lowners[p].rank = rank; 1403 lowners[p].index = leaves ? leaves[p] : p; 1404 } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */ 1405 lowners[p].rank = -2; 1406 lowners[p].index = -2; 1407 } 1408 } 1409 for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */ 1410 rowners[p].rank = -3; 1411 rowners[p].index = -3; 1412 } 1413 PetscCall(PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC)); 1414 PetscCall(PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC)); 1415 PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE)); 1416 PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners,MPI_REPLACE)); 1417 for (p = 0; p < numLeaves; ++p) { 1418 PetscCheck(lowners[p].rank >= 0 && lowners[p].index >= 0,PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed"); 1419 if (lowners[p].rank != rank) ++numGhostPoints; 1420 } 1421 PetscCall(PetscMalloc1(numGhostPoints, &ghostPoints)); 1422 PetscCall(PetscMalloc1(numGhostPoints, &remotePoints)); 1423 for (p = 0, gp = 0; p < numLeaves; ++p) { 1424 if (lowners[p].rank != rank) { 1425 ghostPoints[gp] = leaves ? leaves[p] : p; 1426 remotePoints[gp].rank = lowners[p].rank; 1427 remotePoints[gp].index = lowners[p].index; 1428 ++gp; 1429 } 1430 } 1431 PetscCall(PetscFree2(rowners,lowners)); 1432 PetscCall(PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER)); 1433 PetscCall(PetscSFSetFromOptions((dmParallel)->sf)); 1434 } 1435 { 1436 PetscBool useCone, useClosure, useAnchors; 1437 1438 PetscCall(DMGetBasicAdjacency(dm, &useCone, &useClosure)); 1439 PetscCall(DMSetBasicAdjacency(dmParallel, useCone, useClosure)); 1440 PetscCall(DMPlexGetAdjacencyUseAnchors(dm, &useAnchors)); 1441 PetscCall(DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors)); 1442 } 1443 PetscCall(PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0)); 1444 PetscFunctionReturn(0); 1445 } 1446 1447 /*@ 1448 DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition? 1449 1450 Input Parameters: 1451 + dm - The DMPlex object 1452 - flg - Balance the partition? 1453 1454 Level: intermediate 1455 1456 .seealso: `DMPlexDistribute()`, `DMPlexGetPartitionBalance()` 1457 @*/ 1458 PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg) 1459 { 1460 DM_Plex *mesh = (DM_Plex *)dm->data; 1461 1462 PetscFunctionBegin; 1463 mesh->partitionBalance = flg; 1464 PetscFunctionReturn(0); 1465 } 1466 1467 /*@ 1468 DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition? 1469 1470 Input Parameter: 1471 . dm - The DMPlex object 1472 1473 Output Parameter: 1474 . flg - Balance the partition? 1475 1476 Level: intermediate 1477 1478 .seealso: `DMPlexDistribute()`, `DMPlexSetPartitionBalance()` 1479 @*/ 1480 PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg) 1481 { 1482 DM_Plex *mesh = (DM_Plex *)dm->data; 1483 1484 PetscFunctionBegin; 1485 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1486 PetscValidBoolPointer(flg, 2); 1487 *flg = mesh->partitionBalance; 1488 PetscFunctionReturn(0); 1489 } 1490 1491 typedef struct { 1492 PetscInt vote, rank, index; 1493 } Petsc3Int; 1494 1495 /* MaxLoc, but carry a third piece of information around */ 1496 static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype) 1497 { 1498 Petsc3Int *a = (Petsc3Int *)inout_; 1499 Petsc3Int *b = (Petsc3Int *)in_; 1500 PetscInt i, len = *len_; 1501 for (i = 0; i < len; i++) { 1502 if (a[i].vote < b[i].vote) { 1503 a[i].vote = b[i].vote; 1504 a[i].rank = b[i].rank; 1505 a[i].index = b[i].index; 1506 } else if (a[i].vote <= b[i].vote) { 1507 if (a[i].rank >= b[i].rank) { 1508 a[i].rank = b[i].rank; 1509 a[i].index = b[i].index; 1510 } 1511 } 1512 } 1513 } 1514 1515 /*@C 1516 DMPlexCreatePointSF - Build a point SF from an SF describing a point migration 1517 1518 Input Parameters: 1519 + dm - The source DMPlex object 1520 . migrationSF - The star forest that describes the parallel point remapping 1521 - ownership - Flag causing a vote to determine point ownership 1522 1523 Output Parameter: 1524 . pointSF - The star forest describing the point overlap in the remapped DM 1525 1526 Notes: 1527 Output pointSF is guaranteed to have the array of local indices (leaves) sorted. 1528 1529 Level: developer 1530 1531 .seealso: `DMPlexDistribute()`, `DMPlexDistributeOverlap()` 1532 @*/ 1533 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF) 1534 { 1535 PetscMPIInt rank, size; 1536 PetscInt p, nroots, nleaves, idx, npointLeaves; 1537 PetscInt *pointLocal; 1538 const PetscInt *leaves; 1539 const PetscSFNode *roots; 1540 PetscSFNode *rootNodes, *leafNodes, *pointRemote; 1541 Vec shifts; 1542 const PetscInt numShifts = 13759; 1543 const PetscScalar *shift = NULL; 1544 const PetscBool shiftDebug = PETSC_FALSE; 1545 PetscBool balance; 1546 1547 PetscFunctionBegin; 1548 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1549 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank)); 1550 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size)); 1551 PetscCall(PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0)); 1552 1553 PetscCall(DMPlexGetPartitionBalance(dm, &balance)); 1554 PetscCall(PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots)); 1555 PetscCall(PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes)); 1556 if (ownership) { 1557 MPI_Op op; 1558 MPI_Datatype datatype; 1559 Petsc3Int *rootVote = NULL, *leafVote = NULL; 1560 /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */ 1561 if (balance) { 1562 PetscRandom r; 1563 1564 PetscCall(PetscRandomCreate(PETSC_COMM_SELF, &r)); 1565 PetscCall(PetscRandomSetInterval(r, 0, 2467*size)); 1566 PetscCall(VecCreate(PETSC_COMM_SELF, &shifts)); 1567 PetscCall(VecSetSizes(shifts, numShifts, numShifts)); 1568 PetscCall(VecSetType(shifts, VECSTANDARD)); 1569 PetscCall(VecSetRandom(shifts, r)); 1570 PetscCall(PetscRandomDestroy(&r)); 1571 PetscCall(VecGetArrayRead(shifts, &shift)); 1572 } 1573 1574 PetscCall(PetscMalloc1(nroots, &rootVote)); 1575 PetscCall(PetscMalloc1(nleaves, &leafVote)); 1576 /* Point ownership vote: Process with highest rank owns shared points */ 1577 for (p = 0; p < nleaves; ++p) { 1578 if (shiftDebug) { 1579 PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size)); 1580 } 1581 /* Either put in a bid or we know we own it */ 1582 leafVote[p].vote = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size; 1583 leafVote[p].rank = rank; 1584 leafVote[p].index = p; 1585 } 1586 for (p = 0; p < nroots; p++) { 1587 /* Root must not participate in the reduction, flag so that MAXLOC does not use */ 1588 rootVote[p].vote = -3; 1589 rootVote[p].rank = -3; 1590 rootVote[p].index = -3; 1591 } 1592 PetscCallMPI(MPI_Type_contiguous(3, MPIU_INT, &datatype)); 1593 PetscCallMPI(MPI_Type_commit(&datatype)); 1594 PetscCallMPI(MPI_Op_create(&MaxLocCarry, 1, &op)); 1595 PetscCall(PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op)); 1596 PetscCall(PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op)); 1597 PetscCallMPI(MPI_Op_free(&op)); 1598 PetscCallMPI(MPI_Type_free(&datatype)); 1599 for (p = 0; p < nroots; p++) { 1600 rootNodes[p].rank = rootVote[p].rank; 1601 rootNodes[p].index = rootVote[p].index; 1602 } 1603 PetscCall(PetscFree(leafVote)); 1604 PetscCall(PetscFree(rootVote)); 1605 } else { 1606 for (p = 0; p < nroots; p++) { 1607 rootNodes[p].index = -1; 1608 rootNodes[p].rank = rank; 1609 } 1610 for (p = 0; p < nleaves; p++) { 1611 /* Write new local id into old location */ 1612 if (roots[p].rank == rank) { 1613 rootNodes[roots[p].index].index = leaves ? leaves[p] : p; 1614 } 1615 } 1616 } 1617 PetscCall(PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE)); 1618 PetscCall(PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes,MPI_REPLACE)); 1619 1620 for (npointLeaves = 0, p = 0; p < nleaves; p++) { 1621 if (leafNodes[p].rank != rank) npointLeaves++; 1622 } 1623 PetscCall(PetscMalloc1(npointLeaves, &pointLocal)); 1624 PetscCall(PetscMalloc1(npointLeaves, &pointRemote)); 1625 for (idx = 0, p = 0; p < nleaves; p++) { 1626 if (leafNodes[p].rank != rank) { 1627 /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */ 1628 pointLocal[idx] = p; 1629 pointRemote[idx] = leafNodes[p]; 1630 idx++; 1631 } 1632 } 1633 if (shift) { 1634 PetscCall(VecRestoreArrayRead(shifts, &shift)); 1635 PetscCall(VecDestroy(&shifts)); 1636 } 1637 if (shiftDebug) PetscCall(PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT)); 1638 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF)); 1639 PetscCall(PetscSFSetFromOptions(*pointSF)); 1640 PetscCall(PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER)); 1641 PetscCall(PetscFree2(rootNodes, leafNodes)); 1642 PetscCall(PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0)); 1643 if (PetscDefined(USE_DEBUG)) PetscCall(DMPlexCheckPointSF(dm, *pointSF)); 1644 PetscFunctionReturn(0); 1645 } 1646 1647 /*@C 1648 DMPlexMigrate - Migrates internal DM data over the supplied star forest 1649 1650 Collective on dm 1651 1652 Input Parameters: 1653 + dm - The source DMPlex object 1654 - sf - The star forest communication context describing the migration pattern 1655 1656 Output Parameter: 1657 . targetDM - The target DMPlex object 1658 1659 Level: intermediate 1660 1661 .seealso: `DMPlexDistribute()`, `DMPlexDistributeOverlap()` 1662 @*/ 1663 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM) 1664 { 1665 MPI_Comm comm; 1666 PetscInt dim, cdim, nroots; 1667 PetscSF sfPoint; 1668 ISLocalToGlobalMapping ltogMigration; 1669 ISLocalToGlobalMapping ltogOriginal = NULL; 1670 1671 PetscFunctionBegin; 1672 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1673 PetscCall(PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0)); 1674 PetscCall(PetscObjectGetComm((PetscObject) dm, &comm)); 1675 PetscCall(DMGetDimension(dm, &dim)); 1676 PetscCall(DMSetDimension(targetDM, dim)); 1677 PetscCall(DMGetCoordinateDim(dm, &cdim)); 1678 PetscCall(DMSetCoordinateDim(targetDM, cdim)); 1679 1680 /* Check for a one-to-all distribution pattern */ 1681 PetscCall(DMGetPointSF(dm, &sfPoint)); 1682 PetscCall(PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL)); 1683 if (nroots >= 0) { 1684 IS isOriginal; 1685 PetscInt n, size, nleaves; 1686 PetscInt *numbering_orig, *numbering_new; 1687 1688 /* Get the original point numbering */ 1689 PetscCall(DMPlexCreatePointNumbering(dm, &isOriginal)); 1690 PetscCall(ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal)); 1691 PetscCall(ISLocalToGlobalMappingGetSize(ltogOriginal, &size)); 1692 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig)); 1693 /* Convert to positive global numbers */ 1694 for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);} 1695 /* Derive the new local-to-global mapping from the old one */ 1696 PetscCall(PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL)); 1697 PetscCall(PetscMalloc1(nleaves, &numbering_new)); 1698 PetscCall(PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE)); 1699 PetscCall(PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new,MPI_REPLACE)); 1700 PetscCall(ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration)); 1701 PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig)); 1702 PetscCall(ISDestroy(&isOriginal)); 1703 } else { 1704 /* One-to-all distribution pattern: We can derive LToG from SF */ 1705 PetscCall(ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration)); 1706 } 1707 PetscCall(PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration")); 1708 PetscCall(ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view")); 1709 /* Migrate DM data to target DM */ 1710 PetscCall(DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM)); 1711 PetscCall(DMPlexDistributeLabels(dm, sf, targetDM)); 1712 PetscCall(DMPlexDistributeCoordinates(dm, sf, targetDM)); 1713 PetscCall(DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM)); 1714 PetscCall(ISLocalToGlobalMappingDestroy(<ogOriginal)); 1715 PetscCall(ISLocalToGlobalMappingDestroy(<ogMigration)); 1716 PetscCall(PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0)); 1717 PetscFunctionReturn(0); 1718 } 1719 1720 /*@C 1721 DMPlexDistribute - Distributes the mesh and any associated sections. 1722 1723 Collective on dm 1724 1725 Input Parameters: 1726 + dm - The original DMPlex object 1727 - overlap - The overlap of partitions, 0 is the default 1728 1729 Output Parameters: 1730 + sf - The PetscSF used for point distribution, or NULL if not needed 1731 - dmParallel - The distributed DMPlex object 1732 1733 Note: If the mesh was not distributed, the output dmParallel will be NULL. 1734 1735 The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function 1736 representation on the mesh. 1737 1738 Level: intermediate 1739 1740 .seealso: `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()` 1741 @*/ 1742 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel) 1743 { 1744 MPI_Comm comm; 1745 PetscPartitioner partitioner; 1746 IS cellPart; 1747 PetscSection cellPartSection; 1748 DM dmCoord; 1749 DMLabel lblPartition, lblMigration; 1750 PetscSF sfMigration, sfStratified, sfPoint; 1751 PetscBool flg, balance; 1752 PetscMPIInt rank, size; 1753 1754 PetscFunctionBegin; 1755 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1756 PetscValidLogicalCollectiveInt(dm, overlap, 2); 1757 if (sf) PetscValidPointer(sf,3); 1758 PetscValidPointer(dmParallel,4); 1759 1760 if (sf) *sf = NULL; 1761 *dmParallel = NULL; 1762 PetscCall(PetscObjectGetComm((PetscObject)dm,&comm)); 1763 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1764 PetscCallMPI(MPI_Comm_size(comm, &size)); 1765 if (size == 1) PetscFunctionReturn(0); 1766 1767 PetscCall(PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0)); 1768 /* Create cell partition */ 1769 PetscCall(PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0)); 1770 PetscCall(PetscSectionCreate(comm, &cellPartSection)); 1771 PetscCall(DMPlexGetPartitioner(dm, &partitioner)); 1772 PetscCall(PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart)); 1773 PetscCall(PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0)); 1774 { 1775 /* Convert partition to DMLabel */ 1776 IS is; 1777 PetscHSetI ht; 1778 const PetscInt *points; 1779 PetscInt *iranks; 1780 PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks; 1781 1782 PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition)); 1783 /* Preallocate strata */ 1784 PetscCall(PetscHSetICreate(&ht)); 1785 PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd)); 1786 for (proc = pStart; proc < pEnd; proc++) { 1787 PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints)); 1788 if (npoints) PetscCall(PetscHSetIAdd(ht, proc)); 1789 } 1790 PetscCall(PetscHSetIGetSize(ht, &nranks)); 1791 PetscCall(PetscMalloc1(nranks, &iranks)); 1792 PetscCall(PetscHSetIGetElems(ht, &poff, iranks)); 1793 PetscCall(PetscHSetIDestroy(&ht)); 1794 PetscCall(DMLabelAddStrata(lblPartition, nranks, iranks)); 1795 PetscCall(PetscFree(iranks)); 1796 /* Inline DMPlexPartitionLabelClosure() */ 1797 PetscCall(ISGetIndices(cellPart, &points)); 1798 PetscCall(PetscSectionGetChart(cellPartSection, &pStart, &pEnd)); 1799 for (proc = pStart; proc < pEnd; proc++) { 1800 PetscCall(PetscSectionGetDof(cellPartSection, proc, &npoints)); 1801 if (!npoints) continue; 1802 PetscCall(PetscSectionGetOffset(cellPartSection, proc, &poff)); 1803 PetscCall(DMPlexClosurePoints_Private(dm, npoints, points+poff, &is)); 1804 PetscCall(DMLabelSetStratumIS(lblPartition, proc, is)); 1805 PetscCall(ISDestroy(&is)); 1806 } 1807 PetscCall(ISRestoreIndices(cellPart, &points)); 1808 } 1809 PetscCall(PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0)); 1810 1811 PetscCall(DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration)); 1812 PetscCall(DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration)); 1813 PetscCall(DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration)); 1814 PetscCall(DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified)); 1815 PetscCall(PetscSFDestroy(&sfMigration)); 1816 sfMigration = sfStratified; 1817 PetscCall(PetscSFSetUp(sfMigration)); 1818 PetscCall(PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0)); 1819 PetscCall(PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg)); 1820 if (flg) { 1821 PetscCall(DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm))); 1822 PetscCall(PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm))); 1823 } 1824 1825 /* Create non-overlapping parallel DM and migrate internal data */ 1826 PetscCall(DMPlexCreate(comm, dmParallel)); 1827 PetscCall(PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh")); 1828 PetscCall(DMPlexMigrate(dm, sfMigration, *dmParallel)); 1829 1830 /* Build the point SF without overlap */ 1831 PetscCall(DMPlexGetPartitionBalance(dm, &balance)); 1832 PetscCall(DMPlexSetPartitionBalance(*dmParallel, balance)); 1833 PetscCall(DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint)); 1834 PetscCall(DMSetPointSF(*dmParallel, sfPoint)); 1835 PetscCall(DMGetCoordinateDM(*dmParallel, &dmCoord)); 1836 if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint)); 1837 if (flg) PetscCall(PetscSFView(sfPoint, NULL)); 1838 1839 if (overlap > 0) { 1840 DM dmOverlap; 1841 PetscInt nroots, nleaves, noldleaves, l; 1842 const PetscInt *oldLeaves; 1843 PetscSFNode *newRemote, *permRemote; 1844 const PetscSFNode *oldRemote; 1845 PetscSF sfOverlap, sfOverlapPoint; 1846 1847 /* Add the partition overlap to the distributed DM */ 1848 PetscCall(DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap)); 1849 PetscCall(DMDestroy(dmParallel)); 1850 *dmParallel = dmOverlap; 1851 if (flg) { 1852 PetscCall(PetscPrintf(comm, "Overlap Migration SF:\n")); 1853 PetscCall(PetscSFView(sfOverlap, NULL)); 1854 } 1855 1856 /* Re-map the migration SF to establish the full migration pattern */ 1857 PetscCall(PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote)); 1858 PetscCall(PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL)); 1859 PetscCall(PetscMalloc1(nleaves, &newRemote)); 1860 /* oldRemote: original root point mapping to original leaf point 1861 newRemote: original leaf point mapping to overlapped leaf point */ 1862 if (oldLeaves) { 1863 /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */ 1864 PetscCall(PetscMalloc1(noldleaves, &permRemote)); 1865 for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l]; 1866 oldRemote = permRemote; 1867 } 1868 PetscCall(PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE)); 1869 PetscCall(PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote,MPI_REPLACE)); 1870 if (oldLeaves) PetscCall(PetscFree(oldRemote)); 1871 PetscCall(PetscSFCreate(comm, &sfOverlapPoint)); 1872 PetscCall(PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER)); 1873 PetscCall(PetscSFDestroy(&sfOverlap)); 1874 PetscCall(PetscSFDestroy(&sfMigration)); 1875 sfMigration = sfOverlapPoint; 1876 } 1877 /* Cleanup Partition */ 1878 PetscCall(DMLabelDestroy(&lblPartition)); 1879 PetscCall(DMLabelDestroy(&lblMigration)); 1880 PetscCall(PetscSectionDestroy(&cellPartSection)); 1881 PetscCall(ISDestroy(&cellPart)); 1882 /* Copy discretization */ 1883 PetscCall(DMCopyDisc(dm, *dmParallel)); 1884 /* Create sfNatural */ 1885 if (dm->useNatural) { 1886 PetscSection section; 1887 1888 PetscCall(DMGetLocalSection(dm, §ion)); 1889 PetscCall(DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural)); 1890 PetscCall(DMSetUseNatural(*dmParallel, PETSC_TRUE)); 1891 } 1892 PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel)); 1893 /* Cleanup */ 1894 if (sf) {*sf = sfMigration;} 1895 else PetscCall(PetscSFDestroy(&sfMigration)); 1896 PetscCall(PetscSFDestroy(&sfPoint)); 1897 PetscCall(PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0)); 1898 PetscFunctionReturn(0); 1899 } 1900 1901 /*@C 1902 DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM. 1903 1904 Collective on dm 1905 1906 Input Parameters: 1907 + dm - The non-overlapping distributed DMPlex object 1908 - overlap - The overlap of partitions (the same on all ranks) 1909 1910 Output Parameters: 1911 + sf - The PetscSF used for point distribution 1912 - dmOverlap - The overlapping distributed DMPlex object, or NULL 1913 1914 Notes: 1915 If the mesh was not distributed, the return value is NULL. 1916 1917 The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function 1918 representation on the mesh. 1919 1920 Options Database Keys: 1921 + -dm_plex_overlap_labels <name1,name2,...> - List of overlap label names 1922 . -dm_plex_overlap_values <int1,int2,...> - List of overlap label values 1923 . -dm_plex_overlap_exclude_label <name> - Label used to exclude points from overlap 1924 - -dm_plex_overlap_exclude_value <int> - Label value used to exclude points from overlap 1925 1926 Level: advanced 1927 1928 .seealso: `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()` 1929 @*/ 1930 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap) 1931 { 1932 DM_Plex *mesh = (DM_Plex *) dm->data; 1933 MPI_Comm comm; 1934 PetscMPIInt size, rank; 1935 PetscSection rootSection, leafSection; 1936 IS rootrank, leafrank; 1937 DM dmCoord; 1938 DMLabel lblOverlap; 1939 PetscSF sfOverlap, sfStratified, sfPoint; 1940 1941 PetscFunctionBegin; 1942 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1943 PetscValidLogicalCollectiveInt(dm, overlap, 2); 1944 if (sf) PetscValidPointer(sf, 3); 1945 PetscValidPointer(dmOverlap, 4); 1946 1947 if (sf) *sf = NULL; 1948 *dmOverlap = NULL; 1949 PetscCall(PetscObjectGetComm((PetscObject)dm,&comm)); 1950 PetscCallMPI(MPI_Comm_size(comm, &size)); 1951 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1952 if (size == 1) PetscFunctionReturn(0); 1953 { 1954 // We need to get options for the _already_distributed mesh, so it must be done here 1955 PetscInt overlap; 1956 const char *prefix; 1957 char oldPrefix[PETSC_MAX_PATH_LEN]; 1958 1959 oldPrefix[0] = '\0'; 1960 PetscCall(PetscObjectGetOptionsPrefix((PetscObject) dm, &prefix)); 1961 PetscCall(PetscStrcpy(oldPrefix, prefix)); 1962 PetscCall(PetscObjectAppendOptionsPrefix((PetscObject) dm, "dist_")); 1963 PetscCall(DMPlexGetOverlap(dm, &overlap)); 1964 PetscObjectOptionsBegin((PetscObject) dm); 1965 PetscCall(DMSetFromOptions_Overlap_Plex(PetscOptionsObject, dm, &overlap)); 1966 PetscOptionsEnd(); 1967 PetscCall(PetscObjectSetOptionsPrefix((PetscObject) dm, oldPrefix[0] == '\0' ? NULL : oldPrefix)); 1968 } 1969 PetscCall(PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0)); 1970 /* Compute point overlap with neighbouring processes on the distributed DM */ 1971 PetscCall(PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0)); 1972 PetscCall(PetscSectionCreate(comm, &rootSection)); 1973 PetscCall(PetscSectionCreate(comm, &leafSection)); 1974 PetscCall(DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank)); 1975 if (mesh->numOvLabels) PetscCall(DMPlexCreateOverlapLabelFromLabels(dm, mesh->numOvLabels, mesh->ovLabels, mesh->ovValues, mesh->numOvExLabels, mesh->ovExLabels, mesh->ovExValues, rootSection, rootrank, leafSection, leafrank, &lblOverlap)); 1976 else PetscCall(DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap)); 1977 /* Convert overlap label to stratified migration SF */ 1978 PetscCall(DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap)); 1979 PetscCall(DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified)); 1980 PetscCall(PetscSFDestroy(&sfOverlap)); 1981 sfOverlap = sfStratified; 1982 PetscCall(PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF")); 1983 PetscCall(PetscSFSetFromOptions(sfOverlap)); 1984 1985 PetscCall(PetscSectionDestroy(&rootSection)); 1986 PetscCall(PetscSectionDestroy(&leafSection)); 1987 PetscCall(ISDestroy(&rootrank)); 1988 PetscCall(ISDestroy(&leafrank)); 1989 PetscCall(PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0)); 1990 1991 /* Build the overlapping DM */ 1992 PetscCall(DMPlexCreate(comm, dmOverlap)); 1993 PetscCall(PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh")); 1994 PetscCall(DMPlexMigrate(dm, sfOverlap, *dmOverlap)); 1995 /* Store the overlap in the new DM */ 1996 PetscCall(DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap)); 1997 /* Build the new point SF */ 1998 PetscCall(DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint)); 1999 PetscCall(DMSetPointSF(*dmOverlap, sfPoint)); 2000 PetscCall(DMGetCoordinateDM(*dmOverlap, &dmCoord)); 2001 if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint)); 2002 PetscCall(PetscSFDestroy(&sfPoint)); 2003 /* Cleanup overlap partition */ 2004 PetscCall(DMLabelDestroy(&lblOverlap)); 2005 if (sf) *sf = sfOverlap; 2006 else PetscCall(PetscSFDestroy(&sfOverlap)); 2007 PetscCall(PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0)); 2008 PetscFunctionReturn(0); 2009 } 2010 2011 PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap) 2012 { 2013 DM_Plex *mesh = (DM_Plex*) dm->data; 2014 2015 PetscFunctionBegin; 2016 *overlap = mesh->overlap; 2017 PetscFunctionReturn(0); 2018 } 2019 2020 PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap) 2021 { 2022 DM_Plex *mesh=NULL; 2023 DM_Plex *meshSrc=NULL; 2024 2025 PetscFunctionBegin; 2026 PetscValidHeaderSpecificType(dm, DM_CLASSID, 1, DMPLEX); 2027 mesh = (DM_Plex*) dm->data; 2028 mesh->overlap = overlap; 2029 if (dmSrc) { 2030 PetscValidHeaderSpecificType(dmSrc, DM_CLASSID, 2, DMPLEX); 2031 meshSrc = (DM_Plex*) dmSrc->data; 2032 mesh->overlap += meshSrc->overlap; 2033 } 2034 PetscFunctionReturn(0); 2035 } 2036 2037 /*@ 2038 DMPlexGetOverlap - Get the width of the cell overlap 2039 2040 Not collective 2041 2042 Input Parameter: 2043 . dm - The DM 2044 2045 Output Parameter: 2046 . overlap - the width of the cell overlap 2047 2048 Level: intermediate 2049 2050 .seealso: `DMPlexSetOverlap()`, `DMPlexDistribute()` 2051 @*/ 2052 PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap) 2053 { 2054 PetscFunctionBegin; 2055 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 2056 PetscValidIntPointer(overlap, 2); 2057 PetscUseMethod(dm, "DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap)); 2058 PetscFunctionReturn(0); 2059 } 2060 2061 /*@ 2062 DMPlexSetOverlap - Set the width of the cell overlap 2063 2064 Logically collective 2065 2066 Input Parameters: 2067 + dm - The DM 2068 . dmSrc - The DM that produced this one, or NULL 2069 - overlap - the width of the cell overlap 2070 2071 Note: 2072 The overlap from dmSrc is added to dm 2073 2074 Level: intermediate 2075 2076 .seealso: `DMPlexGetOverlap()`, `DMPlexDistribute()` 2077 @*/ 2078 PetscErrorCode DMPlexSetOverlap(DM dm, DM dmSrc, PetscInt overlap) 2079 { 2080 PetscFunctionBegin; 2081 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 2082 PetscValidLogicalCollectiveInt(dm, overlap, 3); 2083 PetscTryMethod(dm, "DMPlexSetOverlap_C",(DM,DM,PetscInt),(dm,dmSrc,overlap)); 2084 PetscFunctionReturn(0); 2085 } 2086 2087 PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist) 2088 { 2089 DM_Plex *mesh = (DM_Plex *) dm->data; 2090 2091 PetscFunctionBegin; 2092 mesh->distDefault = dist; 2093 PetscFunctionReturn(0); 2094 } 2095 2096 /*@ 2097 DMPlexDistributeSetDefault - Set flag indicating whether the DM should be distributed by default 2098 2099 Logically collective 2100 2101 Input Parameters: 2102 + dm - The DM 2103 - dist - Flag for distribution 2104 2105 Level: intermediate 2106 2107 .seealso: `DMPlexDistributeGetDefault()`, `DMPlexDistribute()` 2108 @*/ 2109 PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist) 2110 { 2111 PetscFunctionBegin; 2112 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 2113 PetscValidLogicalCollectiveBool(dm, dist, 2); 2114 PetscTryMethod(dm,"DMPlexDistributeSetDefault_C",(DM,PetscBool),(dm,dist)); 2115 PetscFunctionReturn(0); 2116 } 2117 2118 PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist) 2119 { 2120 DM_Plex *mesh = (DM_Plex *) dm->data; 2121 2122 PetscFunctionBegin; 2123 *dist = mesh->distDefault; 2124 PetscFunctionReturn(0); 2125 } 2126 2127 /*@ 2128 DMPlexDistributeGetDefault - Get flag indicating whether the DM should be distributed by default 2129 2130 Not collective 2131 2132 Input Parameter: 2133 . dm - The DM 2134 2135 Output Parameter: 2136 . dist - Flag for distribution 2137 2138 Level: intermediate 2139 2140 .seealso: `DMPlexDistributeSetDefault()`, `DMPlexDistribute()` 2141 @*/ 2142 PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist) 2143 { 2144 PetscFunctionBegin; 2145 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 2146 PetscValidBoolPointer(dist, 2); 2147 PetscUseMethod(dm,"DMPlexDistributeGetDefault_C",(DM,PetscBool*),(dm,dist)); 2148 PetscFunctionReturn(0); 2149 } 2150 2151 /*@C 2152 DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the 2153 root process of the original's communicator. 2154 2155 Collective on dm 2156 2157 Input Parameter: 2158 . dm - the original DMPlex object 2159 2160 Output Parameters: 2161 + sf - the PetscSF used for point distribution (optional) 2162 - gatherMesh - the gathered DM object, or NULL 2163 2164 Level: intermediate 2165 2166 .seealso: `DMPlexDistribute()`, `DMPlexGetRedundantDM()` 2167 @*/ 2168 PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh) 2169 { 2170 MPI_Comm comm; 2171 PetscMPIInt size; 2172 PetscPartitioner oldPart, gatherPart; 2173 2174 PetscFunctionBegin; 2175 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 2176 PetscValidPointer(gatherMesh,3); 2177 *gatherMesh = NULL; 2178 if (sf) *sf = NULL; 2179 comm = PetscObjectComm((PetscObject)dm); 2180 PetscCallMPI(MPI_Comm_size(comm,&size)); 2181 if (size == 1) PetscFunctionReturn(0); 2182 PetscCall(DMPlexGetPartitioner(dm,&oldPart)); 2183 PetscCall(PetscObjectReference((PetscObject)oldPart)); 2184 PetscCall(PetscPartitionerCreate(comm,&gatherPart)); 2185 PetscCall(PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER)); 2186 PetscCall(DMPlexSetPartitioner(dm,gatherPart)); 2187 PetscCall(DMPlexDistribute(dm,0,sf,gatherMesh)); 2188 2189 PetscCall(DMPlexSetPartitioner(dm,oldPart)); 2190 PetscCall(PetscPartitionerDestroy(&gatherPart)); 2191 PetscCall(PetscPartitionerDestroy(&oldPart)); 2192 PetscFunctionReturn(0); 2193 } 2194 2195 /*@C 2196 DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process. 2197 2198 Collective on dm 2199 2200 Input Parameter: 2201 . dm - the original DMPlex object 2202 2203 Output Parameters: 2204 + sf - the PetscSF used for point distribution (optional) 2205 - redundantMesh - the redundant DM object, or NULL 2206 2207 Level: intermediate 2208 2209 .seealso: `DMPlexDistribute()`, `DMPlexGetGatherDM()` 2210 @*/ 2211 PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh) 2212 { 2213 MPI_Comm comm; 2214 PetscMPIInt size, rank; 2215 PetscInt pStart, pEnd, p; 2216 PetscInt numPoints = -1; 2217 PetscSF migrationSF, sfPoint, gatherSF; 2218 DM gatherDM, dmCoord; 2219 PetscSFNode *points; 2220 2221 PetscFunctionBegin; 2222 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 2223 PetscValidPointer(redundantMesh,3); 2224 *redundantMesh = NULL; 2225 comm = PetscObjectComm((PetscObject)dm); 2226 PetscCallMPI(MPI_Comm_size(comm,&size)); 2227 if (size == 1) { 2228 PetscCall(PetscObjectReference((PetscObject) dm)); 2229 *redundantMesh = dm; 2230 if (sf) *sf = NULL; 2231 PetscFunctionReturn(0); 2232 } 2233 PetscCall(DMPlexGetGatherDM(dm,&gatherSF,&gatherDM)); 2234 if (!gatherDM) PetscFunctionReturn(0); 2235 PetscCallMPI(MPI_Comm_rank(comm,&rank)); 2236 PetscCall(DMPlexGetChart(gatherDM,&pStart,&pEnd)); 2237 numPoints = pEnd - pStart; 2238 PetscCallMPI(MPI_Bcast(&numPoints,1,MPIU_INT,0,comm)); 2239 PetscCall(PetscMalloc1(numPoints,&points)); 2240 PetscCall(PetscSFCreate(comm,&migrationSF)); 2241 for (p = 0; p < numPoints; p++) { 2242 points[p].index = p; 2243 points[p].rank = 0; 2244 } 2245 PetscCall(PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER)); 2246 PetscCall(DMPlexCreate(comm, redundantMesh)); 2247 PetscCall(PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh")); 2248 PetscCall(DMPlexMigrate(gatherDM, migrationSF, *redundantMesh)); 2249 /* This is to express that all point are in overlap */ 2250 PetscCall(DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_MAX_INT)); 2251 PetscCall(DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint)); 2252 PetscCall(DMSetPointSF(*redundantMesh, sfPoint)); 2253 PetscCall(DMGetCoordinateDM(*redundantMesh, &dmCoord)); 2254 if (dmCoord) PetscCall(DMSetPointSF(dmCoord, sfPoint)); 2255 PetscCall(PetscSFDestroy(&sfPoint)); 2256 if (sf) { 2257 PetscSF tsf; 2258 2259 PetscCall(PetscSFCompose(gatherSF,migrationSF,&tsf)); 2260 PetscCall(DMPlexStratifyMigrationSF(dm, tsf, sf)); 2261 PetscCall(PetscSFDestroy(&tsf)); 2262 } 2263 PetscCall(PetscSFDestroy(&migrationSF)); 2264 PetscCall(PetscSFDestroy(&gatherSF)); 2265 PetscCall(DMDestroy(&gatherDM)); 2266 PetscCall(DMCopyDisc(dm, *redundantMesh)); 2267 PetscCall(DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh)); 2268 PetscFunctionReturn(0); 2269 } 2270 2271 /*@ 2272 DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points. 2273 2274 Collective 2275 2276 Input Parameter: 2277 . dm - The DM object 2278 2279 Output Parameter: 2280 . distributed - Flag whether the DM is distributed 2281 2282 Level: intermediate 2283 2284 Notes: 2285 This currently finds out whether at least two ranks have any DAG points. 2286 This involves MPI_Allreduce() with one integer. 2287 The result is currently not stashed so every call to this routine involves this global communication. 2288 2289 .seealso: `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()` 2290 @*/ 2291 PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed) 2292 { 2293 PetscInt pStart, pEnd, count; 2294 MPI_Comm comm; 2295 PetscMPIInt size; 2296 2297 PetscFunctionBegin; 2298 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 2299 PetscValidBoolPointer(distributed,2); 2300 PetscCall(PetscObjectGetComm((PetscObject)dm,&comm)); 2301 PetscCallMPI(MPI_Comm_size(comm,&size)); 2302 if (size == 1) { *distributed = PETSC_FALSE; PetscFunctionReturn(0); } 2303 PetscCall(DMPlexGetChart(dm, &pStart, &pEnd)); 2304 count = (pEnd - pStart) > 0 ? 1 : 0; 2305 PetscCallMPI(MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm)); 2306 *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE; 2307 PetscFunctionReturn(0); 2308 } 2309