1 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 2 #include <petsc/private/dmlabelimpl.h> /*I "petscdmlabel.h" I*/ 3 4 /*@C 5 DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback 6 7 Input Parameters: 8 + dm - The DM object 9 . user - The user callback, may be NULL (to clear the callback) 10 - ctx - context for callback evaluation, may be NULL 11 12 Level: advanced 13 14 Notes: 15 The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency. 16 17 Any setting here overrides other configuration of DMPlex adjacency determination. 18 19 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser() 20 @*/ 21 PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx) 22 { 23 DM_Plex *mesh = (DM_Plex *)dm->data; 24 25 PetscFunctionBegin; 26 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 27 mesh->useradjacency = user; 28 mesh->useradjacencyctx = ctx; 29 PetscFunctionReturn(0); 30 } 31 32 /*@C 33 DMPlexGetAdjacencyUser - get the user-defined adjacency callback 34 35 Input Parameter: 36 . dm - The DM object 37 38 Output Parameters: 39 - user - The user callback 40 - ctx - context for callback evaluation 41 42 Level: advanced 43 44 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser() 45 @*/ 46 PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx) 47 { 48 DM_Plex *mesh = (DM_Plex *)dm->data; 49 50 PetscFunctionBegin; 51 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 52 if (user) *user = mesh->useradjacency; 53 if (ctx) *ctx = mesh->useradjacencyctx; 54 PetscFunctionReturn(0); 55 } 56 57 /*@ 58 DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints. 59 60 Input Parameters: 61 + dm - The DM object 62 - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 63 64 Level: intermediate 65 66 .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 67 @*/ 68 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors) 69 { 70 DM_Plex *mesh = (DM_Plex *) dm->data; 71 72 PetscFunctionBegin; 73 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 74 mesh->useAnchors = useAnchors; 75 PetscFunctionReturn(0); 76 } 77 78 /*@ 79 DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints. 80 81 Input Parameter: 82 . dm - The DM object 83 84 Output Parameter: 85 . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 86 87 Level: intermediate 88 89 .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 90 @*/ 91 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors) 92 { 93 DM_Plex *mesh = (DM_Plex *) dm->data; 94 95 PetscFunctionBegin; 96 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 97 PetscValidIntPointer(useAnchors, 2); 98 *useAnchors = mesh->useAnchors; 99 PetscFunctionReturn(0); 100 } 101 102 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 103 { 104 const PetscInt *cone = NULL; 105 PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c; 106 PetscErrorCode ierr; 107 108 PetscFunctionBeginHot; 109 ierr = DMPlexGetConeSize(dm, p, &coneSize);CHKERRQ(ierr); 110 ierr = DMPlexGetCone(dm, p, &cone);CHKERRQ(ierr); 111 for (c = 0; c <= coneSize; ++c) { 112 const PetscInt point = !c ? p : cone[c-1]; 113 const PetscInt *support = NULL; 114 PetscInt supportSize, s, q; 115 116 ierr = DMPlexGetSupportSize(dm, point, &supportSize);CHKERRQ(ierr); 117 ierr = DMPlexGetSupport(dm, point, &support);CHKERRQ(ierr); 118 for (s = 0; s < supportSize; ++s) { 119 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) { 120 if (support[s] == adj[q]) break; 121 } 122 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 123 } 124 } 125 *adjSize = numAdj; 126 PetscFunctionReturn(0); 127 } 128 129 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 130 { 131 const PetscInt *support = NULL; 132 PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s; 133 PetscErrorCode ierr; 134 135 PetscFunctionBeginHot; 136 ierr = DMPlexGetSupportSize(dm, p, &supportSize);CHKERRQ(ierr); 137 ierr = DMPlexGetSupport(dm, p, &support);CHKERRQ(ierr); 138 for (s = 0; s <= supportSize; ++s) { 139 const PetscInt point = !s ? p : support[s-1]; 140 const PetscInt *cone = NULL; 141 PetscInt coneSize, c, q; 142 143 ierr = DMPlexGetConeSize(dm, point, &coneSize);CHKERRQ(ierr); 144 ierr = DMPlexGetCone(dm, point, &cone);CHKERRQ(ierr); 145 for (c = 0; c < coneSize; ++c) { 146 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) { 147 if (cone[c] == adj[q]) break; 148 } 149 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 150 } 151 } 152 *adjSize = numAdj; 153 PetscFunctionReturn(0); 154 } 155 156 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[]) 157 { 158 PetscInt *star = NULL; 159 PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s; 160 PetscErrorCode ierr; 161 162 PetscFunctionBeginHot; 163 ierr = DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 164 for (s = 0; s < starSize*2; s += 2) { 165 const PetscInt *closure = NULL; 166 PetscInt closureSize, c, q; 167 168 ierr = DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 169 for (c = 0; c < closureSize*2; c += 2) { 170 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) { 171 if (closure[c] == adj[q]) break; 172 } 173 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 174 } 175 ierr = DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 176 } 177 ierr = DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 178 *adjSize = numAdj; 179 PetscFunctionReturn(0); 180 } 181 182 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[]) 183 { 184 static PetscInt asiz = 0; 185 PetscInt maxAnchors = 1; 186 PetscInt aStart = -1, aEnd = -1; 187 PetscInt maxAdjSize; 188 PetscSection aSec = NULL; 189 IS aIS = NULL; 190 const PetscInt *anchors; 191 DM_Plex *mesh = (DM_Plex *)dm->data; 192 PetscErrorCode ierr; 193 194 PetscFunctionBeginHot; 195 if (useAnchors) { 196 ierr = DMPlexGetAnchors(dm,&aSec,&aIS);CHKERRQ(ierr); 197 if (aSec) { 198 ierr = PetscSectionGetMaxDof(aSec,&maxAnchors);CHKERRQ(ierr); 199 maxAnchors = PetscMax(1,maxAnchors); 200 ierr = PetscSectionGetChart(aSec,&aStart,&aEnd);CHKERRQ(ierr); 201 ierr = ISGetIndices(aIS,&anchors);CHKERRQ(ierr); 202 } 203 } 204 if (!*adj) { 205 PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd; 206 207 ierr = DMPlexGetChart(dm, &pStart,&pEnd);CHKERRQ(ierr); 208 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 209 ierr = DMPlexGetMaxSizes(dm, &maxC, &maxS);CHKERRQ(ierr); 210 coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1; 211 supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1; 212 asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries); 213 asiz *= maxAnchors; 214 asiz = PetscMin(asiz,pEnd-pStart); 215 ierr = PetscMalloc1(asiz,adj);CHKERRQ(ierr); 216 } 217 if (*adjSize < 0) *adjSize = asiz; 218 maxAdjSize = *adjSize; 219 if (mesh->useradjacency) { 220 ierr = (*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx);CHKERRQ(ierr); 221 } else if (useTransitiveClosure) { 222 ierr = DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);CHKERRQ(ierr); 223 } else if (useCone) { 224 ierr = DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 225 } else { 226 ierr = DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 227 } 228 if (useAnchors && aSec) { 229 PetscInt origSize = *adjSize; 230 PetscInt numAdj = origSize; 231 PetscInt i = 0, j; 232 PetscInt *orig = *adj; 233 234 while (i < origSize) { 235 PetscInt p = orig[i]; 236 PetscInt aDof = 0; 237 238 if (p >= aStart && p < aEnd) { 239 ierr = PetscSectionGetDof(aSec,p,&aDof);CHKERRQ(ierr); 240 } 241 if (aDof) { 242 PetscInt aOff; 243 PetscInt s, q; 244 245 for (j = i + 1; j < numAdj; j++) { 246 orig[j - 1] = orig[j]; 247 } 248 origSize--; 249 numAdj--; 250 ierr = PetscSectionGetOffset(aSec,p,&aOff);CHKERRQ(ierr); 251 for (s = 0; s < aDof; ++s) { 252 for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) { 253 if (anchors[aOff+s] == orig[q]) break; 254 } 255 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 256 } 257 } 258 else { 259 i++; 260 } 261 } 262 *adjSize = numAdj; 263 ierr = ISRestoreIndices(aIS,&anchors);CHKERRQ(ierr); 264 } 265 PetscFunctionReturn(0); 266 } 267 268 /*@ 269 DMPlexGetAdjacency - Return all points adjacent to the given point 270 271 Input Parameters: 272 + dm - The DM object 273 . p - The point 274 . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE 275 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize 276 277 Output Parameters: 278 + adjSize - The number of adjacent points 279 - adj - The adjacent points 280 281 Level: advanced 282 283 Notes: 284 The user must PetscFree the adj array if it was not passed in. 285 286 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator() 287 @*/ 288 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[]) 289 { 290 PetscBool useCone, useClosure, useAnchors; 291 PetscErrorCode ierr; 292 293 PetscFunctionBeginHot; 294 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 295 PetscValidPointer(adjSize,3); 296 PetscValidPointer(adj,4); 297 ierr = DMGetBasicAdjacency(dm, &useCone, &useClosure);CHKERRQ(ierr); 298 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 299 ierr = DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);CHKERRQ(ierr); 300 PetscFunctionReturn(0); 301 } 302 303 /*@ 304 DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity 305 306 Collective on dm 307 308 Input Parameters: 309 + dm - The DM 310 - sfPoint - The PetscSF which encodes point connectivity 311 312 Output Parameters: 313 + processRanks - A list of process neighbors, or NULL 314 - sfProcess - An SF encoding the two-sided process connectivity, or NULL 315 316 Level: developer 317 318 .seealso: PetscSFCreate(), DMPlexCreateProcessSF() 319 @*/ 320 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess) 321 { 322 const PetscSFNode *remotePoints; 323 PetscInt *localPointsNew; 324 PetscSFNode *remotePointsNew; 325 const PetscInt *nranks; 326 PetscInt *ranksNew; 327 PetscBT neighbors; 328 PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n; 329 PetscMPIInt size, proc, rank; 330 PetscErrorCode ierr; 331 332 PetscFunctionBegin; 333 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 334 PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2); 335 if (processRanks) {PetscValidPointer(processRanks, 3);} 336 if (sfProcess) {PetscValidPointer(sfProcess, 4);} 337 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);CHKERRQ(ierr); 338 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 339 ierr = PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);CHKERRQ(ierr); 340 ierr = PetscBTCreate(size, &neighbors);CHKERRQ(ierr); 341 ierr = PetscBTMemzero(size, neighbors);CHKERRQ(ierr); 342 /* Compute root-to-leaf process connectivity */ 343 ierr = PetscSectionGetChart(rootRankSection, &pStart, &pEnd);CHKERRQ(ierr); 344 ierr = ISGetIndices(rootRanks, &nranks);CHKERRQ(ierr); 345 for (p = pStart; p < pEnd; ++p) { 346 PetscInt ndof, noff, n; 347 348 ierr = PetscSectionGetDof(rootRankSection, p, &ndof);CHKERRQ(ierr); 349 ierr = PetscSectionGetOffset(rootRankSection, p, &noff);CHKERRQ(ierr); 350 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 351 } 352 ierr = ISRestoreIndices(rootRanks, &nranks);CHKERRQ(ierr); 353 /* Compute leaf-to-neighbor process connectivity */ 354 ierr = PetscSectionGetChart(leafRankSection, &pStart, &pEnd);CHKERRQ(ierr); 355 ierr = ISGetIndices(leafRanks, &nranks);CHKERRQ(ierr); 356 for (p = pStart; p < pEnd; ++p) { 357 PetscInt ndof, noff, n; 358 359 ierr = PetscSectionGetDof(leafRankSection, p, &ndof);CHKERRQ(ierr); 360 ierr = PetscSectionGetOffset(leafRankSection, p, &noff);CHKERRQ(ierr); 361 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 362 } 363 ierr = ISRestoreIndices(leafRanks, &nranks);CHKERRQ(ierr); 364 /* Compute leaf-to-root process connectivity */ 365 for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);} 366 /* Calculate edges */ 367 PetscBTClear(neighbors, rank); 368 for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;} 369 ierr = PetscMalloc1(numNeighbors, &ranksNew);CHKERRQ(ierr); 370 ierr = PetscMalloc1(numNeighbors, &localPointsNew);CHKERRQ(ierr); 371 ierr = PetscMalloc1(numNeighbors, &remotePointsNew);CHKERRQ(ierr); 372 for(proc = 0, n = 0; proc < size; ++proc) { 373 if (PetscBTLookup(neighbors, proc)) { 374 ranksNew[n] = proc; 375 localPointsNew[n] = proc; 376 remotePointsNew[n].index = rank; 377 remotePointsNew[n].rank = proc; 378 ++n; 379 } 380 } 381 ierr = PetscBTDestroy(&neighbors);CHKERRQ(ierr); 382 if (processRanks) {ierr = ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);CHKERRQ(ierr);} 383 else {ierr = PetscFree(ranksNew);CHKERRQ(ierr);} 384 if (sfProcess) { 385 ierr = PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);CHKERRQ(ierr); 386 ierr = PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");CHKERRQ(ierr); 387 ierr = PetscSFSetFromOptions(*sfProcess);CHKERRQ(ierr); 388 ierr = PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);CHKERRQ(ierr); 389 } 390 PetscFunctionReturn(0); 391 } 392 393 /*@ 394 DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF. 395 396 Collective on dm 397 398 Input Parameter: 399 . dm - The DM 400 401 Output Parameters: 402 + rootSection - The number of leaves for a given root point 403 . rootrank - The rank of each edge into the root point 404 . leafSection - The number of processes sharing a given leaf point 405 - leafrank - The rank of each process sharing a leaf point 406 407 Level: developer 408 409 .seealso: DMPlexCreateOverlapLabel(), DMPlexDistribute(), DMPlexDistributeOverlap() 410 @*/ 411 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank) 412 { 413 MPI_Comm comm; 414 PetscSF sfPoint; 415 const PetscInt *rootdegree; 416 PetscInt *myrank, *remoterank; 417 PetscInt pStart, pEnd, p, nedges; 418 PetscMPIInt rank; 419 PetscErrorCode ierr; 420 421 PetscFunctionBegin; 422 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 423 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 424 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 425 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 426 /* Compute number of leaves for each root */ 427 ierr = PetscObjectSetName((PetscObject) rootSection, "Root Section");CHKERRQ(ierr); 428 ierr = PetscSectionSetChart(rootSection, pStart, pEnd);CHKERRQ(ierr); 429 ierr = PetscSFComputeDegreeBegin(sfPoint, &rootdegree);CHKERRQ(ierr); 430 ierr = PetscSFComputeDegreeEnd(sfPoint, &rootdegree);CHKERRQ(ierr); 431 for (p = pStart; p < pEnd; ++p) {ierr = PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);CHKERRQ(ierr);} 432 ierr = PetscSectionSetUp(rootSection);CHKERRQ(ierr); 433 /* Gather rank of each leaf to root */ 434 ierr = PetscSectionGetStorageSize(rootSection, &nedges);CHKERRQ(ierr); 435 ierr = PetscMalloc1(pEnd-pStart, &myrank);CHKERRQ(ierr); 436 ierr = PetscMalloc1(nedges, &remoterank);CHKERRQ(ierr); 437 for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank; 438 ierr = PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 439 ierr = PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 440 ierr = PetscFree(myrank);CHKERRQ(ierr); 441 ierr = ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);CHKERRQ(ierr); 442 /* Distribute remote ranks to leaves */ 443 ierr = PetscObjectSetName((PetscObject) leafSection, "Leaf Section");CHKERRQ(ierr); 444 ierr = DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);CHKERRQ(ierr); 445 PetscFunctionReturn(0); 446 } 447 448 /*@C 449 DMPlexCreateOverlapLabel - Compute owner information for shared points. This basically gets two-sided for an SF. 450 451 Collective on dm 452 453 Input Parameters: 454 + dm - The DM 455 . levels - Number of overlap levels 456 . rootSection - The number of leaves for a given root point 457 . rootrank - The rank of each edge into the root point 458 . leafSection - The number of processes sharing a given leaf point 459 - leafrank - The rank of each process sharing a leaf point 460 461 Output Parameter: 462 . ovLabel - DMLabel containing remote overlap contributions as point/rank pairings 463 464 Level: developer 465 466 .seealso: DMPlexDistributeOwnership(), DMPlexDistribute() 467 @*/ 468 PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) 469 { 470 MPI_Comm comm; 471 DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */ 472 PetscSF sfPoint; 473 const PetscSFNode *remote; 474 const PetscInt *local; 475 const PetscInt *nrank, *rrank; 476 PetscInt *adj = NULL; 477 PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l; 478 PetscMPIInt rank, size; 479 PetscBool flg; 480 PetscErrorCode ierr; 481 482 PetscFunctionBegin; 483 *ovLabel = NULL; 484 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 485 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 486 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 487 if (size == 1) PetscFunctionReturn(0); 488 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 489 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 490 ierr = PetscSectionGetChart(leafSection, &sStart, &sEnd);CHKERRQ(ierr); 491 ierr = PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);CHKERRQ(ierr); 492 ierr = DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);CHKERRQ(ierr); 493 /* Handle leaves: shared with the root point */ 494 for (l = 0; l < nleaves; ++l) { 495 PetscInt adjSize = PETSC_DETERMINE, a; 496 497 ierr = DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);CHKERRQ(ierr); 498 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);CHKERRQ(ierr);} 499 } 500 ierr = ISGetIndices(rootrank, &rrank);CHKERRQ(ierr); 501 ierr = ISGetIndices(leafrank, &nrank);CHKERRQ(ierr); 502 /* Handle roots */ 503 for (p = pStart; p < pEnd; ++p) { 504 PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a; 505 506 if ((p >= sStart) && (p < sEnd)) { 507 /* Some leaves share a root with other leaves on different processes */ 508 ierr = PetscSectionGetDof(leafSection, p, &neighbors);CHKERRQ(ierr); 509 if (neighbors) { 510 ierr = PetscSectionGetOffset(leafSection, p, &noff);CHKERRQ(ierr); 511 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 512 for (n = 0; n < neighbors; ++n) { 513 const PetscInt remoteRank = nrank[noff+n]; 514 515 if (remoteRank == rank) continue; 516 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 517 } 518 } 519 } 520 /* Roots are shared with leaves */ 521 ierr = PetscSectionGetDof(rootSection, p, &neighbors);CHKERRQ(ierr); 522 if (!neighbors) continue; 523 ierr = PetscSectionGetOffset(rootSection, p, &noff);CHKERRQ(ierr); 524 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 525 for (n = 0; n < neighbors; ++n) { 526 const PetscInt remoteRank = rrank[noff+n]; 527 528 if (remoteRank == rank) continue; 529 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 530 } 531 } 532 ierr = PetscFree(adj);CHKERRQ(ierr); 533 ierr = ISRestoreIndices(rootrank, &rrank);CHKERRQ(ierr); 534 ierr = ISRestoreIndices(leafrank, &nrank);CHKERRQ(ierr); 535 /* Add additional overlap levels */ 536 for (l = 1; l < levels; l++) { 537 /* Propagate point donations over SF to capture remote connections */ 538 ierr = DMPlexPartitionLabelPropagate(dm, ovAdjByRank);CHKERRQ(ierr); 539 /* Add next level of point donations to the label */ 540 ierr = DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);CHKERRQ(ierr); 541 } 542 /* We require the closure in the overlap */ 543 ierr = DMPlexPartitionLabelClosure(dm, ovAdjByRank);CHKERRQ(ierr); 544 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);CHKERRQ(ierr); 545 if (flg) { 546 PetscViewer viewer; 547 ierr = PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);CHKERRQ(ierr); 548 ierr = DMLabelView(ovAdjByRank, viewer);CHKERRQ(ierr); 549 } 550 /* Invert sender to receiver label */ 551 ierr = DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);CHKERRQ(ierr); 552 ierr = DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);CHKERRQ(ierr); 553 /* Add owned points, except for shared local points */ 554 for (p = pStart; p < pEnd; ++p) {ierr = DMLabelSetValue(*ovLabel, p, rank);CHKERRQ(ierr);} 555 for (l = 0; l < nleaves; ++l) { 556 ierr = DMLabelClearValue(*ovLabel, local[l], rank);CHKERRQ(ierr); 557 ierr = DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);CHKERRQ(ierr); 558 } 559 /* Clean up */ 560 ierr = DMLabelDestroy(&ovAdjByRank);CHKERRQ(ierr); 561 PetscFunctionReturn(0); 562 } 563 564 /*@C 565 DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF 566 567 Collective on dm 568 569 Input Parameters: 570 + dm - The DM 571 - overlapSF - The SF mapping ghost points in overlap to owner points on other processes 572 573 Output Parameter: 574 . migrationSF - An SF that maps original points in old locations to points in new locations 575 576 Level: developer 577 578 .seealso: DMPlexCreateOverlapLabel(), DMPlexDistributeOverlap(), DMPlexDistribute() 579 @*/ 580 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF) 581 { 582 MPI_Comm comm; 583 PetscMPIInt rank, size; 584 PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints; 585 PetscInt *pointDepths, *remoteDepths, *ilocal; 586 PetscInt *depthRecv, *depthShift, *depthIdx; 587 PetscSFNode *iremote; 588 PetscSF pointSF; 589 const PetscInt *sharedLocal; 590 const PetscSFNode *overlapRemote, *sharedRemote; 591 PetscErrorCode ierr; 592 593 PetscFunctionBegin; 594 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 595 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 596 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 597 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 598 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 599 600 /* Before building the migration SF we need to know the new stratum offsets */ 601 ierr = PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);CHKERRQ(ierr); 602 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 603 for (d=0; d<dim+1; d++) { 604 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 605 for (p=pStart; p<pEnd; p++) pointDepths[p] = d; 606 } 607 for (p=0; p<nleaves; p++) remoteDepths[p] = -1; 608 ierr = PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 609 ierr = PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 610 611 /* Count recevied points in each stratum and compute the internal strata shift */ 612 ierr = PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);CHKERRQ(ierr); 613 for (d=0; d<dim+1; d++) depthRecv[d]=0; 614 for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++; 615 depthShift[dim] = 0; 616 for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim]; 617 for (d=1; d<dim; d++) depthShift[d] += depthRecv[0]; 618 for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1]; 619 for (d=0; d<dim+1; d++) { 620 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 621 depthIdx[d] = pStart + depthShift[d]; 622 } 623 624 /* Form the overlap SF build an SF that describes the full overlap migration SF */ 625 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 626 newLeaves = pEnd - pStart + nleaves; 627 ierr = PetscMalloc1(newLeaves, &ilocal);CHKERRQ(ierr); 628 ierr = PetscMalloc1(newLeaves, &iremote);CHKERRQ(ierr); 629 /* First map local points to themselves */ 630 for (d=0; d<dim+1; d++) { 631 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 632 for (p=pStart; p<pEnd; p++) { 633 point = p + depthShift[d]; 634 ilocal[point] = point; 635 iremote[point].index = p; 636 iremote[point].rank = rank; 637 depthIdx[d]++; 638 } 639 } 640 641 /* Add in the remote roots for currently shared points */ 642 ierr = DMGetPointSF(dm, &pointSF);CHKERRQ(ierr); 643 ierr = PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);CHKERRQ(ierr); 644 for (d=0; d<dim+1; d++) { 645 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 646 for (p=0; p<numSharedPoints; p++) { 647 if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) { 648 point = sharedLocal[p] + depthShift[d]; 649 iremote[point].index = sharedRemote[p].index; 650 iremote[point].rank = sharedRemote[p].rank; 651 } 652 } 653 } 654 655 /* Now add the incoming overlap points */ 656 for (p=0; p<nleaves; p++) { 657 point = depthIdx[remoteDepths[p]]; 658 ilocal[point] = point; 659 iremote[point].index = overlapRemote[p].index; 660 iremote[point].rank = overlapRemote[p].rank; 661 depthIdx[remoteDepths[p]]++; 662 } 663 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 664 665 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 666 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");CHKERRQ(ierr); 667 ierr = PetscSFSetFromOptions(*migrationSF);CHKERRQ(ierr); 668 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 669 ierr = PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);CHKERRQ(ierr); 670 671 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 672 PetscFunctionReturn(0); 673 } 674 675 /*@ 676 DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification. 677 678 Input Parameters: 679 + dm - The DM 680 - sf - A star forest with non-ordered leaves, usually defining a DM point migration 681 682 Output Parameter: 683 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified 684 685 Level: developer 686 687 .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap() 688 @*/ 689 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF) 690 { 691 MPI_Comm comm; 692 PetscMPIInt rank, size; 693 PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves; 694 PetscInt *pointDepths, *remoteDepths, *ilocal; 695 PetscInt *depthRecv, *depthShift, *depthIdx; 696 PetscInt hybEnd[4]; 697 const PetscSFNode *iremote; 698 PetscErrorCode ierr; 699 700 PetscFunctionBegin; 701 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 702 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 703 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 704 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 705 ierr = DMPlexGetDepth(dm, &ldepth);CHKERRQ(ierr); 706 ierr = MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 707 if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth); 708 ierr = PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0);CHKERRQ(ierr); 709 710 /* Before building the migration SF we need to know the new stratum offsets */ 711 ierr = PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);CHKERRQ(ierr); 712 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 713 ierr = DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[PetscMax(depth-1,0)],&hybEnd[1],&hybEnd[0]);CHKERRQ(ierr); 714 for (d = 0; d < depth+1; ++d) { 715 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 716 for (p = pStart; p < pEnd; ++p) { 717 if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */ 718 pointDepths[p] = 2 * d; 719 } else { 720 pointDepths[p] = 2 * d + 1; 721 } 722 } 723 } 724 for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1; 725 ierr = PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 726 ierr = PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 727 /* Count received points in each stratum and compute the internal strata shift */ 728 ierr = PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);CHKERRQ(ierr); 729 for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0; 730 for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++; 731 depthShift[2*depth+1] = 0; 732 for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1]; 733 for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth]; 734 depthShift[0] += depthRecv[1]; 735 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1]; 736 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0]; 737 for (d = 2 * depth-1; d > 2; --d) { 738 PetscInt e; 739 740 for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d]; 741 } 742 for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;} 743 /* Derive a new local permutation based on stratified indices */ 744 ierr = PetscMalloc1(nleaves, &ilocal);CHKERRQ(ierr); 745 for (p = 0; p < nleaves; ++p) { 746 const PetscInt dep = remoteDepths[p]; 747 748 ilocal[p] = depthShift[dep] + depthIdx[dep]; 749 depthIdx[dep]++; 750 } 751 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 752 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");CHKERRQ(ierr); 753 ierr = PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);CHKERRQ(ierr); 754 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 755 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 756 ierr = PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0);CHKERRQ(ierr); 757 PetscFunctionReturn(0); 758 } 759 760 /*@ 761 DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 762 763 Collective on dm 764 765 Input Parameters: 766 + dm - The DMPlex object 767 . pointSF - The PetscSF describing the communication pattern 768 . originalSection - The PetscSection for existing data layout 769 - originalVec - The existing data in a local vector 770 771 Output Parameters: 772 + newSection - The PetscSF describing the new data layout 773 - newVec - The new data in a local vector 774 775 Level: developer 776 777 .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData() 778 @*/ 779 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec) 780 { 781 PetscSF fieldSF; 782 PetscInt *remoteOffsets, fieldSize; 783 PetscScalar *originalValues, *newValues; 784 PetscErrorCode ierr; 785 786 PetscFunctionBegin; 787 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 788 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 789 790 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 791 ierr = VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);CHKERRQ(ierr); 792 ierr = VecSetType(newVec,dm->vectype);CHKERRQ(ierr); 793 794 ierr = VecGetArray(originalVec, &originalValues);CHKERRQ(ierr); 795 ierr = VecGetArray(newVec, &newValues);CHKERRQ(ierr); 796 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 797 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 798 ierr = PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 799 ierr = PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 800 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 801 ierr = VecRestoreArray(newVec, &newValues);CHKERRQ(ierr); 802 ierr = VecRestoreArray(originalVec, &originalValues);CHKERRQ(ierr); 803 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 804 PetscFunctionReturn(0); 805 } 806 807 /*@ 808 DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 809 810 Collective on dm 811 812 Input Parameters: 813 + dm - The DMPlex object 814 . pointSF - The PetscSF describing the communication pattern 815 . originalSection - The PetscSection for existing data layout 816 - originalIS - The existing data 817 818 Output Parameters: 819 + newSection - The PetscSF describing the new data layout 820 - newIS - The new data 821 822 Level: developer 823 824 .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData() 825 @*/ 826 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS) 827 { 828 PetscSF fieldSF; 829 PetscInt *newValues, *remoteOffsets, fieldSize; 830 const PetscInt *originalValues; 831 PetscErrorCode ierr; 832 833 PetscFunctionBegin; 834 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 835 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 836 837 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 838 ierr = PetscMalloc1(fieldSize, &newValues);CHKERRQ(ierr); 839 840 ierr = ISGetIndices(originalIS, &originalValues);CHKERRQ(ierr); 841 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 842 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 843 ierr = PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 844 ierr = PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 845 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 846 ierr = ISRestoreIndices(originalIS, &originalValues);CHKERRQ(ierr); 847 ierr = ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);CHKERRQ(ierr); 848 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 849 PetscFunctionReturn(0); 850 } 851 852 /*@ 853 DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 854 855 Collective on dm 856 857 Input Parameters: 858 + dm - The DMPlex object 859 . pointSF - The PetscSF describing the communication pattern 860 . originalSection - The PetscSection for existing data layout 861 . datatype - The type of data 862 - originalData - The existing data 863 864 Output Parameters: 865 + newSection - The PetscSection describing the new data layout 866 - newData - The new data 867 868 Level: developer 869 870 .seealso: DMPlexDistribute(), DMPlexDistributeField() 871 @*/ 872 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData) 873 { 874 PetscSF fieldSF; 875 PetscInt *remoteOffsets, fieldSize; 876 PetscMPIInt dataSize; 877 PetscErrorCode ierr; 878 879 PetscFunctionBegin; 880 ierr = PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 881 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 882 883 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 884 ierr = MPI_Type_size(datatype, &dataSize);CHKERRQ(ierr); 885 ierr = PetscMalloc(fieldSize * dataSize, newData);CHKERRQ(ierr); 886 887 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 888 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 889 ierr = PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 890 ierr = PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 891 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 892 ierr = PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 893 PetscFunctionReturn(0); 894 } 895 896 static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 897 { 898 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 899 MPI_Comm comm; 900 PetscSF coneSF; 901 PetscSection originalConeSection, newConeSection; 902 PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize; 903 PetscBool flg; 904 PetscErrorCode ierr; 905 906 PetscFunctionBegin; 907 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 908 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 909 ierr = PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 910 /* Distribute cone section */ 911 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 912 ierr = DMPlexGetConeSection(dm, &originalConeSection);CHKERRQ(ierr); 913 ierr = DMPlexGetConeSection(dmParallel, &newConeSection);CHKERRQ(ierr); 914 ierr = PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);CHKERRQ(ierr); 915 ierr = DMSetUp(dmParallel);CHKERRQ(ierr); 916 { 917 PetscInt pStart, pEnd, p; 918 919 ierr = PetscSectionGetChart(newConeSection, &pStart, &pEnd);CHKERRQ(ierr); 920 for (p = pStart; p < pEnd; ++p) { 921 PetscInt coneSize; 922 ierr = PetscSectionGetDof(newConeSection, p, &coneSize);CHKERRQ(ierr); 923 pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize); 924 } 925 } 926 /* Communicate and renumber cones */ 927 ierr = PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);CHKERRQ(ierr); 928 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 929 ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); 930 if (original) { 931 PetscInt numCones; 932 933 ierr = PetscSectionGetStorageSize(originalConeSection,&numCones);CHKERRQ(ierr); 934 ierr = PetscMalloc1(numCones,&globCones);CHKERRQ(ierr); 935 ierr = ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);CHKERRQ(ierr); 936 } else { 937 globCones = cones; 938 } 939 ierr = DMPlexGetCones(dmParallel, &newCones);CHKERRQ(ierr); 940 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 941 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 942 if (original) { 943 ierr = PetscFree(globCones);CHKERRQ(ierr); 944 } 945 ierr = PetscSectionGetStorageSize(newConeSection, &newConesSize);CHKERRQ(ierr); 946 ierr = ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);CHKERRQ(ierr); 947 #if defined(PETSC_USE_DEBUG) 948 { 949 PetscInt p; 950 PetscBool valid = PETSC_TRUE; 951 for (p = 0; p < newConesSize; ++p) { 952 if (newCones[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);CHKERRQ(ierr);} 953 } 954 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 955 } 956 #endif 957 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);CHKERRQ(ierr); 958 if (flg) { 959 ierr = PetscPrintf(comm, "Serial Cone Section:\n");CHKERRQ(ierr); 960 ierr = PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm));CHKERRQ(ierr); 961 ierr = PetscPrintf(comm, "Parallel Cone Section:\n");CHKERRQ(ierr); 962 ierr = PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm));CHKERRQ(ierr); 963 ierr = PetscSFView(coneSF, NULL);CHKERRQ(ierr); 964 } 965 ierr = DMPlexGetConeOrientations(dm, &cones);CHKERRQ(ierr); 966 ierr = DMPlexGetConeOrientations(dmParallel, &newCones);CHKERRQ(ierr); 967 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 968 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 969 ierr = PetscSFDestroy(&coneSF);CHKERRQ(ierr); 970 ierr = PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 971 /* Create supports and stratify DMPlex */ 972 { 973 PetscInt pStart, pEnd; 974 975 ierr = PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);CHKERRQ(ierr); 976 ierr = PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);CHKERRQ(ierr); 977 } 978 ierr = DMPlexSymmetrize(dmParallel);CHKERRQ(ierr); 979 ierr = DMPlexStratify(dmParallel);CHKERRQ(ierr); 980 { 981 PetscBool useCone, useClosure, useAnchors; 982 983 ierr = DMGetBasicAdjacency(dm, &useCone, &useClosure);CHKERRQ(ierr); 984 ierr = DMSetBasicAdjacency(dmParallel, useCone, useClosure);CHKERRQ(ierr); 985 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 986 ierr = DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);CHKERRQ(ierr); 987 } 988 PetscFunctionReturn(0); 989 } 990 991 static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel) 992 { 993 MPI_Comm comm; 994 PetscSection originalCoordSection, newCoordSection; 995 Vec originalCoordinates, newCoordinates; 996 PetscInt bs; 997 PetscBool isper; 998 const char *name; 999 const PetscReal *maxCell, *L; 1000 const DMBoundaryType *bd; 1001 PetscErrorCode ierr; 1002 1003 PetscFunctionBegin; 1004 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1005 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1006 1007 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1008 ierr = DMGetCoordinateSection(dm, &originalCoordSection);CHKERRQ(ierr); 1009 ierr = DMGetCoordinateSection(dmParallel, &newCoordSection);CHKERRQ(ierr); 1010 ierr = DMGetCoordinatesLocal(dm, &originalCoordinates);CHKERRQ(ierr); 1011 if (originalCoordinates) { 1012 ierr = VecCreate(PETSC_COMM_SELF, &newCoordinates);CHKERRQ(ierr); 1013 ierr = PetscObjectGetName((PetscObject) originalCoordinates, &name);CHKERRQ(ierr); 1014 ierr = PetscObjectSetName((PetscObject) newCoordinates, name);CHKERRQ(ierr); 1015 1016 ierr = DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);CHKERRQ(ierr); 1017 ierr = DMSetCoordinatesLocal(dmParallel, newCoordinates);CHKERRQ(ierr); 1018 ierr = VecGetBlockSize(originalCoordinates, &bs);CHKERRQ(ierr); 1019 ierr = VecSetBlockSize(newCoordinates, bs);CHKERRQ(ierr); 1020 ierr = VecDestroy(&newCoordinates);CHKERRQ(ierr); 1021 } 1022 ierr = DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);CHKERRQ(ierr); 1023 ierr = DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);CHKERRQ(ierr); 1024 PetscFunctionReturn(0); 1025 } 1026 1027 static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel) 1028 { 1029 DM_Plex *mesh = (DM_Plex*) dm->data; 1030 MPI_Comm comm; 1031 DMLabel depthLabel; 1032 PetscMPIInt rank; 1033 PetscInt depth, d, numLabels, numLocalLabels, l; 1034 PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth; 1035 PetscObjectState depthState = -1; 1036 PetscErrorCode ierr; 1037 1038 PetscFunctionBegin; 1039 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1040 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1041 1042 ierr = PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1043 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1044 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1045 1046 /* If the user has changed the depth label, communicate it instead */ 1047 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1048 ierr = DMPlexGetDepthLabel(dm, &depthLabel);CHKERRQ(ierr); 1049 if (depthLabel) {ierr = PetscObjectStateGet((PetscObject) depthLabel, &depthState);CHKERRQ(ierr);} 1050 lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE; 1051 ierr = MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);CHKERRQ(ierr); 1052 if (sendDepth) { 1053 ierr = DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel);CHKERRQ(ierr); 1054 ierr = DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE);CHKERRQ(ierr); 1055 } 1056 /* Everyone must have either the same number of labels, or none */ 1057 ierr = DMGetNumLabels(dm, &numLocalLabels);CHKERRQ(ierr); 1058 numLabels = numLocalLabels; 1059 ierr = MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);CHKERRQ(ierr); 1060 if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE; 1061 for (l = 0; l < numLabels; ++l) { 1062 DMLabel label = NULL, labelNew = NULL; 1063 PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput; 1064 const char *name = NULL; 1065 1066 if (hasLabels) { 1067 ierr = DMGetLabelByNum(dm, l, &label);CHKERRQ(ierr); 1068 /* Skip "depth" because it is recreated */ 1069 ierr = PetscObjectGetName((PetscObject) label, &name);CHKERRQ(ierr); 1070 ierr = PetscStrcmp(name, "depth", &isDepth);CHKERRQ(ierr); 1071 } 1072 ierr = MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);CHKERRQ(ierr); 1073 if (isDepth && !sendDepth) continue; 1074 ierr = DMLabelDistribute(label, migrationSF, &labelNew);CHKERRQ(ierr); 1075 if (isDepth) { 1076 /* Put in any missing strata which can occur if users are managing the depth label themselves */ 1077 PetscInt gdepth; 1078 1079 ierr = MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 1080 if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth); 1081 for (d = 0; d <= gdepth; ++d) { 1082 PetscBool has; 1083 1084 ierr = DMLabelHasStratum(labelNew, d, &has);CHKERRQ(ierr); 1085 if (!has) {ierr = DMLabelAddStratum(labelNew, d);CHKERRQ(ierr);} 1086 } 1087 } 1088 ierr = DMAddLabel(dmParallel, labelNew);CHKERRQ(ierr); 1089 /* Put the output flag in the new label */ 1090 if (hasLabels) {ierr = DMGetLabelOutput(dm, name, &lisOutput);CHKERRQ(ierr);} 1091 ierr = MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);CHKERRQ(ierr); 1092 ierr = PetscObjectGetName((PetscObject) labelNew, &name);CHKERRQ(ierr); 1093 ierr = DMSetLabelOutput(dmParallel, name, isOutput);CHKERRQ(ierr); 1094 ierr = DMLabelDestroy(&labelNew);CHKERRQ(ierr); 1095 } 1096 ierr = PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1097 PetscFunctionReturn(0); 1098 } 1099 1100 /* Set hybrid and ghost state of points */ 1101 static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 1102 { 1103 DM_Plex *mesh = (DM_Plex*) dm->data; 1104 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1105 PetscInt *isHybrid, *isHybridParallel; /* 0 for normal, 1 for hybrid, 2 for ghost cell */ 1106 PetscInt dim, depth, d; 1107 PetscInt pStart, pEnd, pStartP, pEndP, gcStart, gcEnd; 1108 PetscErrorCode ierr; 1109 1110 PetscFunctionBegin; 1111 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1112 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1113 1114 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1115 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1116 ierr = DMPlexGetChart(dm,&pStart,&pEnd);CHKERRQ(ierr); 1117 ierr = DMPlexGetChart(dmParallel,&pStartP,&pEndP);CHKERRQ(ierr); 1118 ierr = DMPlexGetGhostCellStratum(dm, &gcStart, &gcEnd);CHKERRQ(ierr); 1119 ierr = PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);CHKERRQ(ierr); 1120 for (d = 0; d <= depth; d++) { 1121 PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d], p; 1122 1123 if (hybridMax >= 0) { 1124 PetscInt sStart, sEnd; 1125 1126 ierr = DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);CHKERRQ(ierr); 1127 for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = 1; 1128 } 1129 if (d == depth) for (p = gcStart; p < gcEnd; ++p) isHybrid[p-pStart] = 2; 1130 } 1131 ierr = PetscSFBcastBegin(migrationSF,MPIU_INT,isHybrid,isHybridParallel);CHKERRQ(ierr); 1132 ierr = PetscSFBcastEnd(migrationSF,MPIU_INT,isHybrid,isHybridParallel);CHKERRQ(ierr); 1133 for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1; 1134 for (d = 0; d <= depth; d++) { 1135 PetscInt sStart, sEnd, p, dd; 1136 1137 ierr = DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);CHKERRQ(ierr); 1138 dd = (depth == 1 && d == 1) ? dim : d; 1139 for (p = sStart; p < sEnd; p++) { 1140 if (isHybridParallel[p-pStartP] == 1) { 1141 pmesh->hybridPointMax[dd] = p; 1142 break; 1143 } 1144 if (d == depth && isHybridParallel[p-pStartP] == 2) { 1145 ierr = DMPlexSetGhostCellStratum(dmParallel, p, PETSC_DETERMINE);CHKERRQ(ierr); 1146 break; 1147 } 1148 } 1149 } 1150 ierr = PetscFree2(isHybrid,isHybridParallel);CHKERRQ(ierr); 1151 PetscFunctionReturn(0); 1152 } 1153 1154 static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 1155 { 1156 DM_Plex *mesh = (DM_Plex*) dm->data; 1157 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1158 MPI_Comm comm; 1159 DM refTree; 1160 PetscSection origParentSection, newParentSection; 1161 PetscInt *origParents, *origChildIDs; 1162 PetscBool flg; 1163 PetscErrorCode ierr; 1164 1165 PetscFunctionBegin; 1166 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1167 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 1168 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1169 1170 /* Set up tree */ 1171 ierr = DMPlexGetReferenceTree(dm,&refTree);CHKERRQ(ierr); 1172 ierr = DMPlexSetReferenceTree(dmParallel,refTree);CHKERRQ(ierr); 1173 ierr = DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);CHKERRQ(ierr); 1174 if (origParentSection) { 1175 PetscInt pStart, pEnd; 1176 PetscInt *newParents, *newChildIDs, *globParents; 1177 PetscInt *remoteOffsetsParents, newParentSize; 1178 PetscSF parentSF; 1179 1180 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1181 ierr = PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);CHKERRQ(ierr); 1182 ierr = PetscSectionSetChart(newParentSection,pStart,pEnd);CHKERRQ(ierr); 1183 ierr = PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);CHKERRQ(ierr); 1184 ierr = PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);CHKERRQ(ierr); 1185 ierr = PetscFree(remoteOffsetsParents);CHKERRQ(ierr); 1186 ierr = PetscSectionGetStorageSize(newParentSection,&newParentSize);CHKERRQ(ierr); 1187 ierr = PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);CHKERRQ(ierr); 1188 if (original) { 1189 PetscInt numParents; 1190 1191 ierr = PetscSectionGetStorageSize(origParentSection,&numParents);CHKERRQ(ierr); 1192 ierr = PetscMalloc1(numParents,&globParents);CHKERRQ(ierr); 1193 ierr = ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);CHKERRQ(ierr); 1194 } 1195 else { 1196 globParents = origParents; 1197 } 1198 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1199 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1200 if (original) { 1201 ierr = PetscFree(globParents);CHKERRQ(ierr); 1202 } 1203 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1204 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1205 ierr = ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);CHKERRQ(ierr); 1206 #if defined(PETSC_USE_DEBUG) 1207 { 1208 PetscInt p; 1209 PetscBool valid = PETSC_TRUE; 1210 for (p = 0; p < newParentSize; ++p) { 1211 if (newParents[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);CHKERRQ(ierr);} 1212 } 1213 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1214 } 1215 #endif 1216 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);CHKERRQ(ierr); 1217 if (flg) { 1218 ierr = PetscPrintf(comm, "Serial Parent Section: \n");CHKERRQ(ierr); 1219 ierr = PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm));CHKERRQ(ierr); 1220 ierr = PetscPrintf(comm, "Parallel Parent Section: \n");CHKERRQ(ierr); 1221 ierr = PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm));CHKERRQ(ierr); 1222 ierr = PetscSFView(parentSF, NULL);CHKERRQ(ierr); 1223 } 1224 ierr = DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);CHKERRQ(ierr); 1225 ierr = PetscSectionDestroy(&newParentSection);CHKERRQ(ierr); 1226 ierr = PetscFree2(newParents,newChildIDs);CHKERRQ(ierr); 1227 ierr = PetscSFDestroy(&parentSF);CHKERRQ(ierr); 1228 } 1229 pmesh->useAnchors = mesh->useAnchors; 1230 PetscFunctionReturn(0); 1231 } 1232 1233 PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel) 1234 { 1235 PetscMPIInt rank, size; 1236 MPI_Comm comm; 1237 PetscErrorCode ierr; 1238 1239 PetscFunctionBegin; 1240 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1241 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1242 1243 /* Create point SF for parallel mesh */ 1244 ierr = PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1245 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1246 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1247 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1248 { 1249 const PetscInt *leaves; 1250 PetscSFNode *remotePoints, *rowners, *lowners; 1251 PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints; 1252 PetscInt pStart, pEnd; 1253 1254 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1255 ierr = PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);CHKERRQ(ierr); 1256 ierr = PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);CHKERRQ(ierr); 1257 for (p=0; p<numRoots; p++) { 1258 rowners[p].rank = -1; 1259 rowners[p].index = -1; 1260 } 1261 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1262 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1263 for (p = 0; p < numLeaves; ++p) { 1264 if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */ 1265 lowners[p].rank = rank; 1266 lowners[p].index = leaves ? leaves[p] : p; 1267 } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */ 1268 lowners[p].rank = -2; 1269 lowners[p].index = -2; 1270 } 1271 } 1272 for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */ 1273 rowners[p].rank = -3; 1274 rowners[p].index = -3; 1275 } 1276 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1277 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1278 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1279 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1280 for (p = 0; p < numLeaves; ++p) { 1281 if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed"); 1282 if (lowners[p].rank != rank) ++numGhostPoints; 1283 } 1284 ierr = PetscMalloc1(numGhostPoints, &ghostPoints);CHKERRQ(ierr); 1285 ierr = PetscMalloc1(numGhostPoints, &remotePoints);CHKERRQ(ierr); 1286 for (p = 0, gp = 0; p < numLeaves; ++p) { 1287 if (lowners[p].rank != rank) { 1288 ghostPoints[gp] = leaves ? leaves[p] : p; 1289 remotePoints[gp].rank = lowners[p].rank; 1290 remotePoints[gp].index = lowners[p].index; 1291 ++gp; 1292 } 1293 } 1294 ierr = PetscFree2(rowners,lowners);CHKERRQ(ierr); 1295 ierr = PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);CHKERRQ(ierr); 1296 ierr = PetscSFSetFromOptions((dmParallel)->sf);CHKERRQ(ierr); 1297 } 1298 { 1299 PetscBool useCone, useClosure, useAnchors; 1300 1301 ierr = DMGetBasicAdjacency(dm, &useCone, &useClosure);CHKERRQ(ierr); 1302 ierr = DMSetBasicAdjacency(dmParallel, useCone, useClosure);CHKERRQ(ierr); 1303 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 1304 ierr = DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);CHKERRQ(ierr); 1305 } 1306 ierr = PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1307 PetscFunctionReturn(0); 1308 } 1309 1310 /*@ 1311 DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition? 1312 1313 Input Parameters: 1314 + dm - The DMPlex object 1315 - flg - Balance the partition? 1316 1317 Level: intermediate 1318 1319 .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance() 1320 @*/ 1321 PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg) 1322 { 1323 DM_Plex *mesh = (DM_Plex *)dm->data; 1324 1325 PetscFunctionBegin; 1326 mesh->partitionBalance = flg; 1327 PetscFunctionReturn(0); 1328 } 1329 1330 /*@ 1331 DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition? 1332 1333 Input Parameter: 1334 . dm - The DMPlex object 1335 1336 Output Parameter: 1337 . flg - Balance the partition? 1338 1339 Level: intermediate 1340 1341 .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance() 1342 @*/ 1343 PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg) 1344 { 1345 DM_Plex *mesh = (DM_Plex *)dm->data; 1346 1347 PetscFunctionBegin; 1348 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1349 PetscValidBoolPointer(flg, 2); 1350 *flg = mesh->partitionBalance; 1351 PetscFunctionReturn(0); 1352 } 1353 1354 typedef struct { 1355 PetscInt vote, rank, index; 1356 } Petsc3Int; 1357 1358 /* MaxLoc, but carry a third piece of information around */ 1359 static void MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype) 1360 { 1361 Petsc3Int *a = (Petsc3Int *)inout_; 1362 Petsc3Int *b = (Petsc3Int *)in_; 1363 PetscInt i, len = *len_; 1364 for (i = 0; i < len; i++) { 1365 if (a[i].vote < b[i].vote) { 1366 a[i].vote = b[i].vote; 1367 a[i].rank = b[i].rank; 1368 a[i].index = b[i].index; 1369 } else if (a[i].vote <= b[i].vote) { 1370 if (a[i].rank >= b[i].rank) { 1371 a[i].rank = b[i].rank; 1372 a[i].index = b[i].index; 1373 } 1374 } 1375 } 1376 } 1377 1378 /*@C 1379 DMPlexCreatePointSF - Build a point SF from an SF describing a point migration 1380 1381 Input Parameters: 1382 + dm - The source DMPlex object 1383 . migrationSF - The star forest that describes the parallel point remapping 1384 . ownership - Flag causing a vote to determine point ownership 1385 1386 Output Parameter: 1387 - pointSF - The star forest describing the point overlap in the remapped DM 1388 1389 Notes: 1390 Output pointSF is guaranteed to have the array of local indices (leaves) sorted. 1391 1392 Level: developer 1393 1394 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1395 @*/ 1396 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF) 1397 { 1398 PetscMPIInt rank, size; 1399 PetscInt p, nroots, nleaves, idx, npointLeaves; 1400 PetscInt *pointLocal; 1401 const PetscInt *leaves; 1402 const PetscSFNode *roots; 1403 PetscSFNode *rootNodes, *leafNodes, *pointRemote; 1404 Vec shifts; 1405 const PetscInt numShifts = 13759; 1406 const PetscScalar *shift = NULL; 1407 const PetscBool shiftDebug = PETSC_FALSE; 1408 PetscBool balance; 1409 PetscErrorCode ierr; 1410 1411 PetscFunctionBegin; 1412 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1413 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 1414 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);CHKERRQ(ierr); 1415 ierr = PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0);CHKERRQ(ierr); 1416 1417 ierr = DMPlexGetPartitionBalance(dm, &balance);CHKERRQ(ierr); 1418 ierr = PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);CHKERRQ(ierr); 1419 ierr = PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);CHKERRQ(ierr); 1420 if (ownership) { 1421 MPI_Op op; 1422 MPI_Datatype datatype; 1423 Petsc3Int *rootVote = NULL, *leafVote = NULL; 1424 /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */ 1425 if (balance) { 1426 PetscRandom r; 1427 1428 ierr = PetscRandomCreate(PETSC_COMM_SELF, &r);CHKERRQ(ierr); 1429 ierr = PetscRandomSetInterval(r, 0, 2467*size);CHKERRQ(ierr); 1430 ierr = VecCreate(PETSC_COMM_SELF, &shifts);CHKERRQ(ierr); 1431 ierr = VecSetSizes(shifts, numShifts, numShifts);CHKERRQ(ierr); 1432 ierr = VecSetType(shifts, VECSTANDARD);CHKERRQ(ierr); 1433 ierr = VecSetRandom(shifts, r);CHKERRQ(ierr); 1434 ierr = PetscRandomDestroy(&r);CHKERRQ(ierr); 1435 ierr = VecGetArrayRead(shifts, &shift);CHKERRQ(ierr); 1436 } 1437 1438 ierr = PetscMalloc1(nroots, &rootVote);CHKERRQ(ierr); 1439 ierr = PetscMalloc1(nleaves, &leafVote);CHKERRQ(ierr); 1440 /* Point ownership vote: Process with highest rank owns shared points */ 1441 for (p = 0; p < nleaves; ++p) { 1442 if (shiftDebug) { 1443 ierr = PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);CHKERRQ(ierr); 1444 } 1445 /* Either put in a bid or we know we own it */ 1446 leafVote[p].vote = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size; 1447 leafVote[p].rank = rank; 1448 leafVote[p].index = p; 1449 } 1450 for (p = 0; p < nroots; p++) { 1451 /* Root must not participate in the reduction, flag so that MAXLOC does not use */ 1452 rootVote[p].vote = -3; 1453 rootVote[p].rank = -3; 1454 rootVote[p].index = -3; 1455 } 1456 ierr = MPI_Type_contiguous(3, MPIU_INT, &datatype);CHKERRQ(ierr); 1457 ierr = MPI_Type_commit(&datatype);CHKERRQ(ierr); 1458 ierr = MPI_Op_create(&MaxLocCarry, 1, &op);CHKERRQ(ierr); 1459 ierr = PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op);CHKERRQ(ierr); 1460 ierr = PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op);CHKERRQ(ierr); 1461 ierr = MPI_Op_free(&op);CHKERRQ(ierr); 1462 ierr = MPI_Type_free(&datatype);CHKERRQ(ierr); 1463 for (p = 0; p < nroots; p++) { 1464 rootNodes[p].rank = rootVote[p].rank; 1465 rootNodes[p].index = rootVote[p].index; 1466 } 1467 ierr = PetscFree(leafVote);CHKERRQ(ierr); 1468 ierr = PetscFree(rootVote);CHKERRQ(ierr); 1469 } else { 1470 for (p = 0; p < nroots; p++) { 1471 rootNodes[p].index = -1; 1472 rootNodes[p].rank = rank; 1473 } 1474 for (p = 0; p < nleaves; p++) { 1475 /* Write new local id into old location */ 1476 if (roots[p].rank == rank) { 1477 rootNodes[roots[p].index].index = leaves ? leaves[p] : p; 1478 } 1479 } 1480 } 1481 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1482 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1483 1484 for (npointLeaves = 0, p = 0; p < nleaves; p++) { 1485 if (leafNodes[p].rank != rank) npointLeaves++; 1486 } 1487 ierr = PetscMalloc1(npointLeaves, &pointLocal);CHKERRQ(ierr); 1488 ierr = PetscMalloc1(npointLeaves, &pointRemote);CHKERRQ(ierr); 1489 for (idx = 0, p = 0; p < nleaves; p++) { 1490 if (leafNodes[p].rank != rank) { 1491 /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */ 1492 pointLocal[idx] = p; 1493 pointRemote[idx] = leafNodes[p]; 1494 idx++; 1495 } 1496 } 1497 if (shift) { 1498 ierr = VecRestoreArrayRead(shifts, &shift);CHKERRQ(ierr); 1499 ierr = VecDestroy(&shifts);CHKERRQ(ierr); 1500 } 1501 if (shiftDebug) {ierr = PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);CHKERRQ(ierr);} 1502 ierr = PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);CHKERRQ(ierr); 1503 ierr = PetscSFSetFromOptions(*pointSF);CHKERRQ(ierr); 1504 ierr = PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1505 ierr = PetscFree2(rootNodes, leafNodes);CHKERRQ(ierr); 1506 ierr = PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0);CHKERRQ(ierr); 1507 PetscFunctionReturn(0); 1508 } 1509 1510 /*@C 1511 DMPlexMigrate - Migrates internal DM data over the supplied star forest 1512 1513 Collective on dm 1514 1515 Input Parameters: 1516 + dm - The source DMPlex object 1517 . sf - The star forest communication context describing the migration pattern 1518 1519 Output Parameter: 1520 - targetDM - The target DMPlex object 1521 1522 Level: intermediate 1523 1524 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1525 @*/ 1526 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM) 1527 { 1528 MPI_Comm comm; 1529 PetscInt dim, cdim, nroots; 1530 PetscSF sfPoint; 1531 ISLocalToGlobalMapping ltogMigration; 1532 ISLocalToGlobalMapping ltogOriginal = NULL; 1533 PetscBool flg; 1534 PetscErrorCode ierr; 1535 1536 PetscFunctionBegin; 1537 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1538 ierr = PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1539 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 1540 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1541 ierr = DMSetDimension(targetDM, dim);CHKERRQ(ierr); 1542 ierr = DMGetCoordinateDim(dm, &cdim);CHKERRQ(ierr); 1543 ierr = DMSetCoordinateDim(targetDM, cdim);CHKERRQ(ierr); 1544 1545 /* Check for a one-to-all distribution pattern */ 1546 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 1547 ierr = PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);CHKERRQ(ierr); 1548 if (nroots >= 0) { 1549 IS isOriginal; 1550 PetscInt n, size, nleaves; 1551 PetscInt *numbering_orig, *numbering_new; 1552 1553 /* Get the original point numbering */ 1554 ierr = DMPlexCreatePointNumbering(dm, &isOriginal);CHKERRQ(ierr); 1555 ierr = ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);CHKERRQ(ierr); 1556 ierr = ISLocalToGlobalMappingGetSize(ltogOriginal, &size);CHKERRQ(ierr); 1557 ierr = ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1558 /* Convert to positive global numbers */ 1559 for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);} 1560 /* Derive the new local-to-global mapping from the old one */ 1561 ierr = PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1562 ierr = PetscMalloc1(nleaves, &numbering_new);CHKERRQ(ierr); 1563 ierr = PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new);CHKERRQ(ierr); 1564 ierr = PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new);CHKERRQ(ierr); 1565 ierr = ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, <ogMigration);CHKERRQ(ierr); 1566 ierr = ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1567 ierr = ISDestroy(&isOriginal);CHKERRQ(ierr); 1568 } else { 1569 /* One-to-all distribution pattern: We can derive LToG from SF */ 1570 ierr = ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);CHKERRQ(ierr); 1571 } 1572 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1573 if (flg) { 1574 ierr = PetscPrintf(comm, "Point renumbering for DM migration:\n");CHKERRQ(ierr); 1575 ierr = ISLocalToGlobalMappingView(ltogMigration, NULL);CHKERRQ(ierr); 1576 } 1577 /* Migrate DM data to target DM */ 1578 ierr = DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1579 ierr = DMPlexDistributeLabels(dm, sf, targetDM);CHKERRQ(ierr); 1580 ierr = DMPlexDistributeCoordinates(dm, sf, targetDM);CHKERRQ(ierr); 1581 ierr = DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1582 ierr = DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1583 ierr = ISLocalToGlobalMappingDestroy(<ogOriginal);CHKERRQ(ierr); 1584 ierr = ISLocalToGlobalMappingDestroy(<ogMigration);CHKERRQ(ierr); 1585 ierr = PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1586 PetscFunctionReturn(0); 1587 } 1588 1589 /*@C 1590 DMPlexDistribute - Distributes the mesh and any associated sections. 1591 1592 Collective on dm 1593 1594 Input Parameters: 1595 + dm - The original DMPlex object 1596 - overlap - The overlap of partitions, 0 is the default 1597 1598 Output Parameters: 1599 + sf - The PetscSF used for point distribution, or NULL if not needed 1600 - dmParallel - The distributed DMPlex object 1601 1602 Note: If the mesh was not distributed, the output dmParallel will be NULL. 1603 1604 The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function 1605 representation on the mesh. 1606 1607 Level: intermediate 1608 1609 .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexGetOverlap() 1610 @*/ 1611 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel) 1612 { 1613 MPI_Comm comm; 1614 PetscPartitioner partitioner; 1615 IS cellPart; 1616 PetscSection cellPartSection; 1617 DM dmCoord; 1618 DMLabel lblPartition, lblMigration; 1619 PetscSF sfMigration, sfStratified, sfPoint; 1620 PetscBool flg, balance; 1621 PetscMPIInt rank, size; 1622 PetscErrorCode ierr; 1623 1624 PetscFunctionBegin; 1625 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1626 PetscValidLogicalCollectiveInt(dm, overlap, 2); 1627 if (sf) PetscValidPointer(sf,3); 1628 PetscValidPointer(dmParallel,4); 1629 1630 if (sf) *sf = NULL; 1631 *dmParallel = NULL; 1632 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1633 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1634 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1635 if (size == 1) PetscFunctionReturn(0); 1636 1637 ierr = PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1638 /* Create cell partition */ 1639 ierr = PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1640 ierr = PetscSectionCreate(comm, &cellPartSection);CHKERRQ(ierr); 1641 ierr = DMPlexGetPartitioner(dm, &partitioner);CHKERRQ(ierr); 1642 ierr = PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart);CHKERRQ(ierr); 1643 ierr = PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0);CHKERRQ(ierr); 1644 { 1645 /* Convert partition to DMLabel */ 1646 IS is; 1647 PetscHSetI ht; 1648 const PetscInt *points; 1649 PetscInt *iranks; 1650 PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks; 1651 1652 ierr = DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);CHKERRQ(ierr); 1653 /* Preallocate strata */ 1654 ierr = PetscHSetICreate(&ht);CHKERRQ(ierr); 1655 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1656 for (proc = pStart; proc < pEnd; proc++) { 1657 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1658 if (npoints) {ierr = PetscHSetIAdd(ht, proc);CHKERRQ(ierr);} 1659 } 1660 ierr = PetscHSetIGetSize(ht, &nranks);CHKERRQ(ierr); 1661 ierr = PetscMalloc1(nranks, &iranks);CHKERRQ(ierr); 1662 ierr = PetscHSetIGetElems(ht, &poff, iranks);CHKERRQ(ierr); 1663 ierr = PetscHSetIDestroy(&ht);CHKERRQ(ierr); 1664 ierr = DMLabelAddStrata(lblPartition, nranks, iranks);CHKERRQ(ierr); 1665 ierr = PetscFree(iranks);CHKERRQ(ierr); 1666 /* Inline DMPlexPartitionLabelClosure() */ 1667 ierr = ISGetIndices(cellPart, &points);CHKERRQ(ierr); 1668 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1669 for (proc = pStart; proc < pEnd; proc++) { 1670 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1671 if (!npoints) continue; 1672 ierr = PetscSectionGetOffset(cellPartSection, proc, &poff);CHKERRQ(ierr); 1673 ierr = DMPlexClosurePoints_Private(dm, npoints, points+poff, &is);CHKERRQ(ierr); 1674 ierr = DMLabelSetStratumIS(lblPartition, proc, is);CHKERRQ(ierr); 1675 ierr = ISDestroy(&is);CHKERRQ(ierr); 1676 } 1677 ierr = ISRestoreIndices(cellPart, &points);CHKERRQ(ierr); 1678 } 1679 ierr = PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0);CHKERRQ(ierr); 1680 1681 ierr = DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);CHKERRQ(ierr); 1682 ierr = DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);CHKERRQ(ierr); 1683 ierr = DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);CHKERRQ(ierr); 1684 ierr = DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);CHKERRQ(ierr); 1685 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1686 sfMigration = sfStratified; 1687 ierr = PetscSFSetUp(sfMigration);CHKERRQ(ierr); 1688 ierr = PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1689 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1690 if (flg) { 1691 ierr = DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm));CHKERRQ(ierr); 1692 ierr = PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm));CHKERRQ(ierr); 1693 } 1694 1695 /* Create non-overlapping parallel DM and migrate internal data */ 1696 ierr = DMPlexCreate(comm, dmParallel);CHKERRQ(ierr); 1697 ierr = PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");CHKERRQ(ierr); 1698 ierr = DMPlexMigrate(dm, sfMigration, *dmParallel);CHKERRQ(ierr); 1699 1700 /* Build the point SF without overlap */ 1701 ierr = DMPlexGetPartitionBalance(dm, &balance);CHKERRQ(ierr); 1702 ierr = DMPlexSetPartitionBalance(*dmParallel, balance);CHKERRQ(ierr); 1703 ierr = DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);CHKERRQ(ierr); 1704 ierr = DMSetPointSF(*dmParallel, sfPoint);CHKERRQ(ierr); 1705 ierr = DMGetCoordinateDM(*dmParallel, &dmCoord);CHKERRQ(ierr); 1706 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1707 if (flg) {ierr = PetscSFView(sfPoint, NULL);CHKERRQ(ierr);} 1708 1709 if (overlap > 0) { 1710 DM dmOverlap; 1711 PetscInt nroots, nleaves, noldleaves, l; 1712 const PetscInt *oldLeaves; 1713 PetscSFNode *newRemote, *permRemote; 1714 const PetscSFNode *oldRemote; 1715 PetscSF sfOverlap, sfOverlapPoint; 1716 1717 /* Add the partition overlap to the distributed DM */ 1718 ierr = DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);CHKERRQ(ierr); 1719 ierr = DMDestroy(dmParallel);CHKERRQ(ierr); 1720 *dmParallel = dmOverlap; 1721 if (flg) { 1722 ierr = PetscPrintf(comm, "Overlap Migration SF:\n");CHKERRQ(ierr); 1723 ierr = PetscSFView(sfOverlap, NULL);CHKERRQ(ierr); 1724 } 1725 1726 /* Re-map the migration SF to establish the full migration pattern */ 1727 ierr = PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote);CHKERRQ(ierr); 1728 ierr = PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1729 ierr = PetscMalloc1(nleaves, &newRemote);CHKERRQ(ierr); 1730 /* oldRemote: original root point mapping to original leaf point 1731 newRemote: original leaf point mapping to overlapped leaf point */ 1732 if (oldLeaves) { 1733 /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */ 1734 ierr = PetscMalloc1(noldleaves, &permRemote);CHKERRQ(ierr); 1735 for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l]; 1736 oldRemote = permRemote; 1737 } 1738 ierr = PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1739 ierr = PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1740 if (oldLeaves) {ierr = PetscFree(oldRemote);CHKERRQ(ierr);} 1741 ierr = PetscSFCreate(comm, &sfOverlapPoint);CHKERRQ(ierr); 1742 ierr = PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1743 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1744 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1745 sfMigration = sfOverlapPoint; 1746 } 1747 /* Cleanup Partition */ 1748 ierr = DMLabelDestroy(&lblPartition);CHKERRQ(ierr); 1749 ierr = DMLabelDestroy(&lblMigration);CHKERRQ(ierr); 1750 ierr = PetscSectionDestroy(&cellPartSection);CHKERRQ(ierr); 1751 ierr = ISDestroy(&cellPart);CHKERRQ(ierr); 1752 /* Copy BC */ 1753 ierr = DMCopyBoundary(dm, *dmParallel);CHKERRQ(ierr); 1754 /* Create sfNatural */ 1755 if (dm->useNatural) { 1756 PetscSection section; 1757 1758 ierr = DMGetLocalSection(dm, §ion);CHKERRQ(ierr); 1759 ierr = DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);CHKERRQ(ierr); 1760 ierr = DMSetUseNatural(*dmParallel, PETSC_TRUE);CHKERRQ(ierr); 1761 } 1762 /* Cleanup */ 1763 if (sf) {*sf = sfMigration;} 1764 else {ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr);} 1765 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1766 ierr = PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1767 PetscFunctionReturn(0); 1768 } 1769 1770 /*@C 1771 DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM. 1772 1773 Collective on dm 1774 1775 Input Parameters: 1776 + dm - The non-overlapping distributed DMPlex object 1777 - overlap - The overlap of partitions (the same on all ranks) 1778 1779 Output Parameters: 1780 + sf - The PetscSF used for point distribution 1781 - dmOverlap - The overlapping distributed DMPlex object, or NULL 1782 1783 Notes: 1784 If the mesh was not distributed, the return value is NULL. 1785 1786 The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function 1787 representation on the mesh. 1788 1789 Level: advanced 1790 1791 .seealso: DMPlexCreate(), DMSetAdjacency(), DMPlexDistribute(), DMPlexCreateOverlapLabel(), DMPlexGetOverlap() 1792 @*/ 1793 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap) 1794 { 1795 MPI_Comm comm; 1796 PetscMPIInt size, rank; 1797 PetscSection rootSection, leafSection; 1798 IS rootrank, leafrank; 1799 DM dmCoord; 1800 DMLabel lblOverlap; 1801 PetscSF sfOverlap, sfStratified, sfPoint; 1802 PetscErrorCode ierr; 1803 1804 PetscFunctionBegin; 1805 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1806 PetscValidLogicalCollectiveInt(dm, overlap, 2); 1807 if (sf) PetscValidPointer(sf, 3); 1808 PetscValidPointer(dmOverlap, 4); 1809 1810 if (sf) *sf = NULL; 1811 *dmOverlap = NULL; 1812 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1813 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1814 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1815 if (size == 1) PetscFunctionReturn(0); 1816 1817 ierr = PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1818 /* Compute point overlap with neighbouring processes on the distributed DM */ 1819 ierr = PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1820 ierr = PetscSectionCreate(comm, &rootSection);CHKERRQ(ierr); 1821 ierr = PetscSectionCreate(comm, &leafSection);CHKERRQ(ierr); 1822 ierr = DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);CHKERRQ(ierr); 1823 ierr = DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);CHKERRQ(ierr); 1824 /* Convert overlap label to stratified migration SF */ 1825 ierr = DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);CHKERRQ(ierr); 1826 ierr = DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);CHKERRQ(ierr); 1827 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1828 sfOverlap = sfStratified; 1829 ierr = PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");CHKERRQ(ierr); 1830 ierr = PetscSFSetFromOptions(sfOverlap);CHKERRQ(ierr); 1831 1832 ierr = PetscSectionDestroy(&rootSection);CHKERRQ(ierr); 1833 ierr = PetscSectionDestroy(&leafSection);CHKERRQ(ierr); 1834 ierr = ISDestroy(&rootrank);CHKERRQ(ierr); 1835 ierr = ISDestroy(&leafrank);CHKERRQ(ierr); 1836 ierr = PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1837 1838 /* Build the overlapping DM */ 1839 ierr = DMPlexCreate(comm, dmOverlap);CHKERRQ(ierr); 1840 ierr = PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");CHKERRQ(ierr); 1841 ierr = DMPlexMigrate(dm, sfOverlap, *dmOverlap);CHKERRQ(ierr); 1842 /* Store the overlap in the new DM */ 1843 ((DM_Plex*)(*dmOverlap)->data)->overlap = overlap + ((DM_Plex*)dm->data)->overlap; 1844 /* Build the new point SF */ 1845 ierr = DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1846 ierr = DMSetPointSF(*dmOverlap, sfPoint);CHKERRQ(ierr); 1847 ierr = DMGetCoordinateDM(*dmOverlap, &dmCoord);CHKERRQ(ierr); 1848 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1849 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1850 /* Cleanup overlap partition */ 1851 ierr = DMLabelDestroy(&lblOverlap);CHKERRQ(ierr); 1852 if (sf) *sf = sfOverlap; 1853 else {ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr);} 1854 ierr = PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1855 PetscFunctionReturn(0); 1856 } 1857 1858 PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap) 1859 { 1860 DM_Plex *mesh = (DM_Plex*) dm->data; 1861 1862 PetscFunctionBegin; 1863 *overlap = mesh->overlap; 1864 PetscFunctionReturn(0); 1865 } 1866 1867 /*@ 1868 DMPlexGetOverlap - Get the DMPlex partition overlap. 1869 1870 Not collective 1871 1872 Input Parameter: 1873 . dm - The DM 1874 1875 Output Parameter: 1876 . overlap - The overlap of this DM 1877 1878 Level: intermediate 1879 1880 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap(), DMPlexCreateOverlapLabel() 1881 @*/ 1882 PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap) 1883 { 1884 PetscErrorCode ierr; 1885 1886 PetscFunctionBegin; 1887 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1888 ierr = PetscUseMethod(dm,"DMPlexGetOverlap_C",(DM,PetscInt*),(dm,overlap));CHKERRQ(ierr); 1889 PetscFunctionReturn(0); 1890 } 1891 1892 1893 /*@C 1894 DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the 1895 root process of the original's communicator. 1896 1897 Collective on dm 1898 1899 Input Parameter: 1900 . dm - the original DMPlex object 1901 1902 Output Parameters: 1903 + sf - the PetscSF used for point distribution (optional) 1904 - gatherMesh - the gathered DM object, or NULL 1905 1906 Level: intermediate 1907 1908 .seealso: DMPlexDistribute(), DMPlexGetRedundantDM() 1909 @*/ 1910 PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh) 1911 { 1912 MPI_Comm comm; 1913 PetscMPIInt size; 1914 PetscPartitioner oldPart, gatherPart; 1915 PetscErrorCode ierr; 1916 1917 PetscFunctionBegin; 1918 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 1919 PetscValidPointer(gatherMesh,2); 1920 *gatherMesh = NULL; 1921 if (sf) *sf = NULL; 1922 comm = PetscObjectComm((PetscObject)dm); 1923 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1924 if (size == 1) PetscFunctionReturn(0); 1925 ierr = DMPlexGetPartitioner(dm,&oldPart);CHKERRQ(ierr); 1926 ierr = PetscObjectReference((PetscObject)oldPart);CHKERRQ(ierr); 1927 ierr = PetscPartitionerCreate(comm,&gatherPart);CHKERRQ(ierr); 1928 ierr = PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);CHKERRQ(ierr); 1929 ierr = DMPlexSetPartitioner(dm,gatherPart);CHKERRQ(ierr); 1930 ierr = DMPlexDistribute(dm,0,sf,gatherMesh);CHKERRQ(ierr); 1931 1932 ierr = DMPlexSetPartitioner(dm,oldPart);CHKERRQ(ierr); 1933 ierr = PetscPartitionerDestroy(&gatherPart);CHKERRQ(ierr); 1934 ierr = PetscPartitionerDestroy(&oldPart);CHKERRQ(ierr); 1935 PetscFunctionReturn(0); 1936 } 1937 1938 /*@C 1939 DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process. 1940 1941 Collective on dm 1942 1943 Input Parameter: 1944 . dm - the original DMPlex object 1945 1946 Output Parameters: 1947 + sf - the PetscSF used for point distribution (optional) 1948 - redundantMesh - the redundant DM object, or NULL 1949 1950 Level: intermediate 1951 1952 .seealso: DMPlexDistribute(), DMPlexGetGatherDM() 1953 @*/ 1954 PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh) 1955 { 1956 MPI_Comm comm; 1957 PetscMPIInt size, rank; 1958 PetscInt pStart, pEnd, p; 1959 PetscInt numPoints = -1; 1960 PetscSF migrationSF, sfPoint, gatherSF; 1961 DM gatherDM, dmCoord; 1962 PetscSFNode *points; 1963 PetscErrorCode ierr; 1964 1965 PetscFunctionBegin; 1966 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 1967 PetscValidPointer(redundantMesh,2); 1968 *redundantMesh = NULL; 1969 comm = PetscObjectComm((PetscObject)dm); 1970 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1971 if (size == 1) { 1972 ierr = PetscObjectReference((PetscObject) dm);CHKERRQ(ierr); 1973 *redundantMesh = dm; 1974 if (sf) *sf = NULL; 1975 PetscFunctionReturn(0); 1976 } 1977 ierr = DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);CHKERRQ(ierr); 1978 if (!gatherDM) PetscFunctionReturn(0); 1979 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1980 ierr = DMPlexGetChart(gatherDM,&pStart,&pEnd);CHKERRQ(ierr); 1981 numPoints = pEnd - pStart; 1982 ierr = MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);CHKERRQ(ierr); 1983 ierr = PetscMalloc1(numPoints,&points);CHKERRQ(ierr); 1984 ierr = PetscSFCreate(comm,&migrationSF);CHKERRQ(ierr); 1985 for (p = 0; p < numPoints; p++) { 1986 points[p].index = p; 1987 points[p].rank = 0; 1988 } 1989 ierr = PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);CHKERRQ(ierr); 1990 ierr = DMPlexCreate(comm, redundantMesh);CHKERRQ(ierr); 1991 ierr = PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");CHKERRQ(ierr); 1992 ierr = DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);CHKERRQ(ierr); 1993 ierr = DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1994 ierr = DMSetPointSF(*redundantMesh, sfPoint);CHKERRQ(ierr); 1995 ierr = DMGetCoordinateDM(*redundantMesh, &dmCoord);CHKERRQ(ierr); 1996 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1997 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1998 if (sf) { 1999 PetscSF tsf; 2000 2001 ierr = PetscSFCompose(gatherSF,migrationSF,&tsf);CHKERRQ(ierr); 2002 ierr = DMPlexStratifyMigrationSF(dm, tsf, sf);CHKERRQ(ierr); 2003 ierr = PetscSFDestroy(&tsf);CHKERRQ(ierr); 2004 } 2005 ierr = PetscSFDestroy(&migrationSF);CHKERRQ(ierr); 2006 ierr = PetscSFDestroy(&gatherSF);CHKERRQ(ierr); 2007 ierr = DMDestroy(&gatherDM);CHKERRQ(ierr); 2008 PetscFunctionReturn(0); 2009 } 2010 2011 /*@ 2012 DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points. 2013 2014 Collective 2015 2016 Input Parameter: 2017 . dm - The DM object 2018 2019 Output Parameter: 2020 . distributed - Flag whether the DM is distributed 2021 2022 Level: intermediate 2023 2024 Notes: 2025 This currently finds out whether at least two ranks have any DAG points. 2026 This involves MPI_Allreduce() with one integer. 2027 The result is currently not stashed so every call to this routine involves this global communication. 2028 2029 .seealso: DMPlexDistribute(), DMPlexGetOverlap(), DMPlexIsInterpolated() 2030 @*/ 2031 PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed) 2032 { 2033 PetscInt pStart, pEnd, count; 2034 MPI_Comm comm; 2035 PetscErrorCode ierr; 2036 2037 PetscFunctionBegin; 2038 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 2039 PetscValidPointer(distributed,2); 2040 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 2041 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 2042 count = !!(pEnd - pStart); 2043 ierr = MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm);CHKERRQ(ierr); 2044 *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE; 2045 PetscFunctionReturn(0); 2046 } 2047