1 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 2 #include <petsc/private/dmlabelimpl.h> /*I "petscdmlabel.h" I*/ 3 4 /*@C 5 DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback 6 7 Input Parameters: 8 + dm - The DM object 9 . user - The user callback, may be NULL (to clear the callback) 10 - ctx - context for callback evaluation, may be NULL 11 12 Level: advanced 13 14 Notes: 15 The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency. 16 17 Any setting here overrides other configuration of DMPlex adjacency determination. 18 19 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser() 20 @*/ 21 PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx) 22 { 23 DM_Plex *mesh = (DM_Plex *)dm->data; 24 25 PetscFunctionBegin; 26 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 27 mesh->useradjacency = user; 28 mesh->useradjacencyctx = ctx; 29 PetscFunctionReturn(0); 30 } 31 32 /*@C 33 DMPlexGetAdjacencyUser - get the user-defined adjacency callback 34 35 Input Parameter: 36 . dm - The DM object 37 38 Output Parameters: 39 - user - The user callback 40 - ctx - context for callback evaluation 41 42 Level: advanced 43 44 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser() 45 @*/ 46 PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx) 47 { 48 DM_Plex *mesh = (DM_Plex *)dm->data; 49 50 PetscFunctionBegin; 51 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 52 if (user) *user = mesh->useradjacency; 53 if (ctx) *ctx = mesh->useradjacencyctx; 54 PetscFunctionReturn(0); 55 } 56 57 /*@ 58 DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints. 59 60 Input Parameters: 61 + dm - The DM object 62 - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 63 64 Level: intermediate 65 66 .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 67 @*/ 68 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors) 69 { 70 DM_Plex *mesh = (DM_Plex *) dm->data; 71 72 PetscFunctionBegin; 73 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 74 mesh->useAnchors = useAnchors; 75 PetscFunctionReturn(0); 76 } 77 78 /*@ 79 DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints. 80 81 Input Parameter: 82 . dm - The DM object 83 84 Output Parameter: 85 . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 86 87 Level: intermediate 88 89 .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 90 @*/ 91 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors) 92 { 93 DM_Plex *mesh = (DM_Plex *) dm->data; 94 95 PetscFunctionBegin; 96 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 97 PetscValidIntPointer(useAnchors, 2); 98 *useAnchors = mesh->useAnchors; 99 PetscFunctionReturn(0); 100 } 101 102 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 103 { 104 const PetscInt *cone = NULL; 105 PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c; 106 PetscErrorCode ierr; 107 108 PetscFunctionBeginHot; 109 ierr = DMPlexGetConeSize(dm, p, &coneSize);CHKERRQ(ierr); 110 ierr = DMPlexGetCone(dm, p, &cone);CHKERRQ(ierr); 111 for (c = 0; c <= coneSize; ++c) { 112 const PetscInt point = !c ? p : cone[c-1]; 113 const PetscInt *support = NULL; 114 PetscInt supportSize, s, q; 115 116 ierr = DMPlexGetSupportSize(dm, point, &supportSize);CHKERRQ(ierr); 117 ierr = DMPlexGetSupport(dm, point, &support);CHKERRQ(ierr); 118 for (s = 0; s < supportSize; ++s) { 119 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) { 120 if (support[s] == adj[q]) break; 121 } 122 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 123 } 124 } 125 *adjSize = numAdj; 126 PetscFunctionReturn(0); 127 } 128 129 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 130 { 131 const PetscInt *support = NULL; 132 PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s; 133 PetscErrorCode ierr; 134 135 PetscFunctionBeginHot; 136 ierr = DMPlexGetSupportSize(dm, p, &supportSize);CHKERRQ(ierr); 137 ierr = DMPlexGetSupport(dm, p, &support);CHKERRQ(ierr); 138 for (s = 0; s <= supportSize; ++s) { 139 const PetscInt point = !s ? p : support[s-1]; 140 const PetscInt *cone = NULL; 141 PetscInt coneSize, c, q; 142 143 ierr = DMPlexGetConeSize(dm, point, &coneSize);CHKERRQ(ierr); 144 ierr = DMPlexGetCone(dm, point, &cone);CHKERRQ(ierr); 145 for (c = 0; c < coneSize; ++c) { 146 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) { 147 if (cone[c] == adj[q]) break; 148 } 149 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 150 } 151 } 152 *adjSize = numAdj; 153 PetscFunctionReturn(0); 154 } 155 156 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[]) 157 { 158 PetscInt *star = NULL; 159 PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s; 160 PetscErrorCode ierr; 161 162 PetscFunctionBeginHot; 163 ierr = DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 164 for (s = 0; s < starSize*2; s += 2) { 165 const PetscInt *closure = NULL; 166 PetscInt closureSize, c, q; 167 168 ierr = DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 169 for (c = 0; c < closureSize*2; c += 2) { 170 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) { 171 if (closure[c] == adj[q]) break; 172 } 173 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 174 } 175 ierr = DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 176 } 177 ierr = DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 178 *adjSize = numAdj; 179 PetscFunctionReturn(0); 180 } 181 182 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[]) 183 { 184 static PetscInt asiz = 0; 185 PetscInt maxAnchors = 1; 186 PetscInt aStart = -1, aEnd = -1; 187 PetscInt maxAdjSize; 188 PetscSection aSec = NULL; 189 IS aIS = NULL; 190 const PetscInt *anchors; 191 DM_Plex *mesh = (DM_Plex *)dm->data; 192 PetscErrorCode ierr; 193 194 PetscFunctionBeginHot; 195 if (useAnchors) { 196 ierr = DMPlexGetAnchors(dm,&aSec,&aIS);CHKERRQ(ierr); 197 if (aSec) { 198 ierr = PetscSectionGetMaxDof(aSec,&maxAnchors);CHKERRQ(ierr); 199 maxAnchors = PetscMax(1,maxAnchors); 200 ierr = PetscSectionGetChart(aSec,&aStart,&aEnd);CHKERRQ(ierr); 201 ierr = ISGetIndices(aIS,&anchors);CHKERRQ(ierr); 202 } 203 } 204 if (!*adj) { 205 PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd; 206 207 ierr = DMPlexGetChart(dm, &pStart,&pEnd);CHKERRQ(ierr); 208 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 209 ierr = DMPlexGetMaxSizes(dm, &maxC, &maxS);CHKERRQ(ierr); 210 coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1; 211 supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1; 212 asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries); 213 asiz *= maxAnchors; 214 asiz = PetscMin(asiz,pEnd-pStart); 215 ierr = PetscMalloc1(asiz,adj);CHKERRQ(ierr); 216 } 217 if (*adjSize < 0) *adjSize = asiz; 218 maxAdjSize = *adjSize; 219 if (mesh->useradjacency) { 220 ierr = mesh->useradjacency(dm, p, adjSize, *adj, mesh->useradjacencyctx);CHKERRQ(ierr); 221 } else if (useTransitiveClosure) { 222 ierr = DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);CHKERRQ(ierr); 223 } else if (useCone) { 224 ierr = DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 225 } else { 226 ierr = DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 227 } 228 if (useAnchors && aSec) { 229 PetscInt origSize = *adjSize; 230 PetscInt numAdj = origSize; 231 PetscInt i = 0, j; 232 PetscInt *orig = *adj; 233 234 while (i < origSize) { 235 PetscInt p = orig[i]; 236 PetscInt aDof = 0; 237 238 if (p >= aStart && p < aEnd) { 239 ierr = PetscSectionGetDof(aSec,p,&aDof);CHKERRQ(ierr); 240 } 241 if (aDof) { 242 PetscInt aOff; 243 PetscInt s, q; 244 245 for (j = i + 1; j < numAdj; j++) { 246 orig[j - 1] = orig[j]; 247 } 248 origSize--; 249 numAdj--; 250 ierr = PetscSectionGetOffset(aSec,p,&aOff);CHKERRQ(ierr); 251 for (s = 0; s < aDof; ++s) { 252 for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) { 253 if (anchors[aOff+s] == orig[q]) break; 254 } 255 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 256 } 257 } 258 else { 259 i++; 260 } 261 } 262 *adjSize = numAdj; 263 ierr = ISRestoreIndices(aIS,&anchors);CHKERRQ(ierr); 264 } 265 PetscFunctionReturn(0); 266 } 267 268 /*@ 269 DMPlexGetAdjacency - Return all points adjacent to the given point 270 271 Input Parameters: 272 + dm - The DM object 273 . p - The point 274 . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE 275 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize 276 277 Output Parameters: 278 + adjSize - The number of adjacent points 279 - adj - The adjacent points 280 281 Level: advanced 282 283 Notes: 284 The user must PetscFree the adj array if it was not passed in. 285 286 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator() 287 @*/ 288 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[]) 289 { 290 PetscBool useCone, useClosure, useAnchors; 291 PetscErrorCode ierr; 292 293 PetscFunctionBeginHot; 294 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 295 PetscValidPointer(adjSize,3); 296 PetscValidPointer(adj,4); 297 ierr = DMGetBasicAdjacency(dm, &useCone, &useClosure);CHKERRQ(ierr); 298 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 299 ierr = DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);CHKERRQ(ierr); 300 PetscFunctionReturn(0); 301 } 302 303 /*@ 304 DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity 305 306 Collective on DM 307 308 Input Parameters: 309 + dm - The DM 310 - sfPoint - The PetscSF which encodes point connectivity 311 312 Output Parameters: 313 + processRanks - A list of process neighbors, or NULL 314 - sfProcess - An SF encoding the two-sided process connectivity, or NULL 315 316 Level: developer 317 318 .seealso: PetscSFCreate(), DMPlexCreateProcessSF() 319 @*/ 320 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess) 321 { 322 const PetscSFNode *remotePoints; 323 PetscInt *localPointsNew; 324 PetscSFNode *remotePointsNew; 325 const PetscInt *nranks; 326 PetscInt *ranksNew; 327 PetscBT neighbors; 328 PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n; 329 PetscMPIInt size, proc, rank; 330 PetscErrorCode ierr; 331 332 PetscFunctionBegin; 333 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 334 PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2); 335 if (processRanks) {PetscValidPointer(processRanks, 3);} 336 if (sfProcess) {PetscValidPointer(sfProcess, 4);} 337 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);CHKERRQ(ierr); 338 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 339 ierr = PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);CHKERRQ(ierr); 340 ierr = PetscBTCreate(size, &neighbors);CHKERRQ(ierr); 341 ierr = PetscBTMemzero(size, neighbors);CHKERRQ(ierr); 342 /* Compute root-to-leaf process connectivity */ 343 ierr = PetscSectionGetChart(rootRankSection, &pStart, &pEnd);CHKERRQ(ierr); 344 ierr = ISGetIndices(rootRanks, &nranks);CHKERRQ(ierr); 345 for (p = pStart; p < pEnd; ++p) { 346 PetscInt ndof, noff, n; 347 348 ierr = PetscSectionGetDof(rootRankSection, p, &ndof);CHKERRQ(ierr); 349 ierr = PetscSectionGetOffset(rootRankSection, p, &noff);CHKERRQ(ierr); 350 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 351 } 352 ierr = ISRestoreIndices(rootRanks, &nranks);CHKERRQ(ierr); 353 /* Compute leaf-to-neighbor process connectivity */ 354 ierr = PetscSectionGetChart(leafRankSection, &pStart, &pEnd);CHKERRQ(ierr); 355 ierr = ISGetIndices(leafRanks, &nranks);CHKERRQ(ierr); 356 for (p = pStart; p < pEnd; ++p) { 357 PetscInt ndof, noff, n; 358 359 ierr = PetscSectionGetDof(leafRankSection, p, &ndof);CHKERRQ(ierr); 360 ierr = PetscSectionGetOffset(leafRankSection, p, &noff);CHKERRQ(ierr); 361 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 362 } 363 ierr = ISRestoreIndices(leafRanks, &nranks);CHKERRQ(ierr); 364 /* Compute leaf-to-root process connectivity */ 365 for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);} 366 /* Calculate edges */ 367 PetscBTClear(neighbors, rank); 368 for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;} 369 ierr = PetscMalloc1(numNeighbors, &ranksNew);CHKERRQ(ierr); 370 ierr = PetscMalloc1(numNeighbors, &localPointsNew);CHKERRQ(ierr); 371 ierr = PetscMalloc1(numNeighbors, &remotePointsNew);CHKERRQ(ierr); 372 for(proc = 0, n = 0; proc < size; ++proc) { 373 if (PetscBTLookup(neighbors, proc)) { 374 ranksNew[n] = proc; 375 localPointsNew[n] = proc; 376 remotePointsNew[n].index = rank; 377 remotePointsNew[n].rank = proc; 378 ++n; 379 } 380 } 381 ierr = PetscBTDestroy(&neighbors);CHKERRQ(ierr); 382 if (processRanks) {ierr = ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);CHKERRQ(ierr);} 383 else {ierr = PetscFree(ranksNew);CHKERRQ(ierr);} 384 if (sfProcess) { 385 ierr = PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);CHKERRQ(ierr); 386 ierr = PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");CHKERRQ(ierr); 387 ierr = PetscSFSetFromOptions(*sfProcess);CHKERRQ(ierr); 388 ierr = PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);CHKERRQ(ierr); 389 } 390 PetscFunctionReturn(0); 391 } 392 393 /*@ 394 DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF. 395 396 Collective on DM 397 398 Input Parameter: 399 . dm - The DM 400 401 Output Parameters: 402 + rootSection - The number of leaves for a given root point 403 . rootrank - The rank of each edge into the root point 404 . leafSection - The number of processes sharing a given leaf point 405 - leafrank - The rank of each process sharing a leaf point 406 407 Level: developer 408 409 .seealso: DMPlexCreateOverlap() 410 @*/ 411 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank) 412 { 413 MPI_Comm comm; 414 PetscSF sfPoint; 415 const PetscInt *rootdegree; 416 PetscInt *myrank, *remoterank; 417 PetscInt pStart, pEnd, p, nedges; 418 PetscMPIInt rank; 419 PetscErrorCode ierr; 420 421 PetscFunctionBegin; 422 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 423 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 424 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 425 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 426 /* Compute number of leaves for each root */ 427 ierr = PetscObjectSetName((PetscObject) rootSection, "Root Section");CHKERRQ(ierr); 428 ierr = PetscSectionSetChart(rootSection, pStart, pEnd);CHKERRQ(ierr); 429 ierr = PetscSFComputeDegreeBegin(sfPoint, &rootdegree);CHKERRQ(ierr); 430 ierr = PetscSFComputeDegreeEnd(sfPoint, &rootdegree);CHKERRQ(ierr); 431 for (p = pStart; p < pEnd; ++p) {ierr = PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);CHKERRQ(ierr);} 432 ierr = PetscSectionSetUp(rootSection);CHKERRQ(ierr); 433 /* Gather rank of each leaf to root */ 434 ierr = PetscSectionGetStorageSize(rootSection, &nedges);CHKERRQ(ierr); 435 ierr = PetscMalloc1(pEnd-pStart, &myrank);CHKERRQ(ierr); 436 ierr = PetscMalloc1(nedges, &remoterank);CHKERRQ(ierr); 437 for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank; 438 ierr = PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 439 ierr = PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 440 ierr = PetscFree(myrank);CHKERRQ(ierr); 441 ierr = ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);CHKERRQ(ierr); 442 /* Distribute remote ranks to leaves */ 443 ierr = PetscObjectSetName((PetscObject) leafSection, "Leaf Section");CHKERRQ(ierr); 444 ierr = DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);CHKERRQ(ierr); 445 PetscFunctionReturn(0); 446 } 447 448 /*@C 449 DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF. 450 451 Collective on DM 452 453 Input Parameters: 454 + dm - The DM 455 . levels - Number of overlap levels 456 . rootSection - The number of leaves for a given root point 457 . rootrank - The rank of each edge into the root point 458 . leafSection - The number of processes sharing a given leaf point 459 - leafrank - The rank of each process sharing a leaf point 460 461 Output Parameters: 462 + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings 463 464 Level: developer 465 466 .seealso: DMPlexDistributeOwnership(), DMPlexDistribute() 467 @*/ 468 PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) 469 { 470 MPI_Comm comm; 471 DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */ 472 PetscSF sfPoint; 473 const PetscSFNode *remote; 474 const PetscInt *local; 475 const PetscInt *nrank, *rrank; 476 PetscInt *adj = NULL; 477 PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l; 478 PetscMPIInt rank, size; 479 PetscBool flg; 480 PetscErrorCode ierr; 481 482 PetscFunctionBegin; 483 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 484 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 485 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 486 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 487 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 488 ierr = PetscSectionGetChart(leafSection, &sStart, &sEnd);CHKERRQ(ierr); 489 ierr = PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);CHKERRQ(ierr); 490 ierr = DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);CHKERRQ(ierr); 491 /* Handle leaves: shared with the root point */ 492 for (l = 0; l < nleaves; ++l) { 493 PetscInt adjSize = PETSC_DETERMINE, a; 494 495 ierr = DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);CHKERRQ(ierr); 496 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);CHKERRQ(ierr);} 497 } 498 ierr = ISGetIndices(rootrank, &rrank);CHKERRQ(ierr); 499 ierr = ISGetIndices(leafrank, &nrank);CHKERRQ(ierr); 500 /* Handle roots */ 501 for (p = pStart; p < pEnd; ++p) { 502 PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a; 503 504 if ((p >= sStart) && (p < sEnd)) { 505 /* Some leaves share a root with other leaves on different processes */ 506 ierr = PetscSectionGetDof(leafSection, p, &neighbors);CHKERRQ(ierr); 507 if (neighbors) { 508 ierr = PetscSectionGetOffset(leafSection, p, &noff);CHKERRQ(ierr); 509 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 510 for (n = 0; n < neighbors; ++n) { 511 const PetscInt remoteRank = nrank[noff+n]; 512 513 if (remoteRank == rank) continue; 514 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 515 } 516 } 517 } 518 /* Roots are shared with leaves */ 519 ierr = PetscSectionGetDof(rootSection, p, &neighbors);CHKERRQ(ierr); 520 if (!neighbors) continue; 521 ierr = PetscSectionGetOffset(rootSection, p, &noff);CHKERRQ(ierr); 522 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 523 for (n = 0; n < neighbors; ++n) { 524 const PetscInt remoteRank = rrank[noff+n]; 525 526 if (remoteRank == rank) continue; 527 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 528 } 529 } 530 ierr = PetscFree(adj);CHKERRQ(ierr); 531 ierr = ISRestoreIndices(rootrank, &rrank);CHKERRQ(ierr); 532 ierr = ISRestoreIndices(leafrank, &nrank);CHKERRQ(ierr); 533 /* Add additional overlap levels */ 534 for (l = 1; l < levels; l++) { 535 /* Propagate point donations over SF to capture remote connections */ 536 ierr = DMPlexPartitionLabelPropagate(dm, ovAdjByRank);CHKERRQ(ierr); 537 /* Add next level of point donations to the label */ 538 ierr = DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);CHKERRQ(ierr); 539 } 540 /* We require the closure in the overlap */ 541 ierr = DMPlexPartitionLabelClosure(dm, ovAdjByRank);CHKERRQ(ierr); 542 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);CHKERRQ(ierr); 543 if (flg) { 544 PetscViewer viewer; 545 ierr = PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);CHKERRQ(ierr); 546 ierr = DMLabelView(ovAdjByRank, viewer);CHKERRQ(ierr); 547 } 548 /* Invert sender to receiver label */ 549 ierr = DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);CHKERRQ(ierr); 550 ierr = DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);CHKERRQ(ierr); 551 /* Add owned points, except for shared local points */ 552 for (p = pStart; p < pEnd; ++p) {ierr = DMLabelSetValue(*ovLabel, p, rank);CHKERRQ(ierr);} 553 for (l = 0; l < nleaves; ++l) { 554 ierr = DMLabelClearValue(*ovLabel, local[l], rank);CHKERRQ(ierr); 555 ierr = DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);CHKERRQ(ierr); 556 } 557 /* Clean up */ 558 ierr = DMLabelDestroy(&ovAdjByRank);CHKERRQ(ierr); 559 PetscFunctionReturn(0); 560 } 561 562 /*@C 563 DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF 564 565 Collective on DM 566 567 Input Parameters: 568 + dm - The DM 569 - overlapSF - The SF mapping ghost points in overlap to owner points on other processes 570 571 Output Parameters: 572 + migrationSF - An SF that maps original points in old locations to points in new locations 573 574 Level: developer 575 576 .seealso: DMPlexCreateOverlap(), DMPlexDistribute() 577 @*/ 578 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF) 579 { 580 MPI_Comm comm; 581 PetscMPIInt rank, size; 582 PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints; 583 PetscInt *pointDepths, *remoteDepths, *ilocal; 584 PetscInt *depthRecv, *depthShift, *depthIdx; 585 PetscSFNode *iremote; 586 PetscSF pointSF; 587 const PetscInt *sharedLocal; 588 const PetscSFNode *overlapRemote, *sharedRemote; 589 PetscErrorCode ierr; 590 591 PetscFunctionBegin; 592 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 593 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 594 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 595 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 596 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 597 598 /* Before building the migration SF we need to know the new stratum offsets */ 599 ierr = PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);CHKERRQ(ierr); 600 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 601 for (d=0; d<dim+1; d++) { 602 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 603 for (p=pStart; p<pEnd; p++) pointDepths[p] = d; 604 } 605 for (p=0; p<nleaves; p++) remoteDepths[p] = -1; 606 ierr = PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 607 ierr = PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 608 609 /* Count recevied points in each stratum and compute the internal strata shift */ 610 ierr = PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);CHKERRQ(ierr); 611 for (d=0; d<dim+1; d++) depthRecv[d]=0; 612 for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++; 613 depthShift[dim] = 0; 614 for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim]; 615 for (d=1; d<dim; d++) depthShift[d] += depthRecv[0]; 616 for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1]; 617 for (d=0; d<dim+1; d++) { 618 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 619 depthIdx[d] = pStart + depthShift[d]; 620 } 621 622 /* Form the overlap SF build an SF that describes the full overlap migration SF */ 623 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 624 newLeaves = pEnd - pStart + nleaves; 625 ierr = PetscMalloc1(newLeaves, &ilocal);CHKERRQ(ierr); 626 ierr = PetscMalloc1(newLeaves, &iremote);CHKERRQ(ierr); 627 /* First map local points to themselves */ 628 for (d=0; d<dim+1; d++) { 629 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 630 for (p=pStart; p<pEnd; p++) { 631 point = p + depthShift[d]; 632 ilocal[point] = point; 633 iremote[point].index = p; 634 iremote[point].rank = rank; 635 depthIdx[d]++; 636 } 637 } 638 639 /* Add in the remote roots for currently shared points */ 640 ierr = DMGetPointSF(dm, &pointSF);CHKERRQ(ierr); 641 ierr = PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);CHKERRQ(ierr); 642 for (d=0; d<dim+1; d++) { 643 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 644 for (p=0; p<numSharedPoints; p++) { 645 if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) { 646 point = sharedLocal[p] + depthShift[d]; 647 iremote[point].index = sharedRemote[p].index; 648 iremote[point].rank = sharedRemote[p].rank; 649 } 650 } 651 } 652 653 /* Now add the incoming overlap points */ 654 for (p=0; p<nleaves; p++) { 655 point = depthIdx[remoteDepths[p]]; 656 ilocal[point] = point; 657 iremote[point].index = overlapRemote[p].index; 658 iremote[point].rank = overlapRemote[p].rank; 659 depthIdx[remoteDepths[p]]++; 660 } 661 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 662 663 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 664 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");CHKERRQ(ierr); 665 ierr = PetscSFSetFromOptions(*migrationSF);CHKERRQ(ierr); 666 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 667 ierr = PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);CHKERRQ(ierr); 668 669 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 670 PetscFunctionReturn(0); 671 } 672 673 /*@ 674 DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification. 675 676 Input Parameter: 677 + dm - The DM 678 - sf - A star forest with non-ordered leaves, usually defining a DM point migration 679 680 Output Parameter: 681 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified 682 683 Level: developer 684 685 .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap() 686 @*/ 687 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF) 688 { 689 MPI_Comm comm; 690 PetscMPIInt rank, size; 691 PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves; 692 PetscInt *pointDepths, *remoteDepths, *ilocal; 693 PetscInt *depthRecv, *depthShift, *depthIdx; 694 PetscInt hybEnd[4]; 695 const PetscSFNode *iremote; 696 PetscErrorCode ierr; 697 698 PetscFunctionBegin; 699 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 700 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 701 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 702 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 703 ierr = DMPlexGetDepth(dm, &ldepth);CHKERRQ(ierr); 704 ierr = MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 705 if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth); 706 707 /* Before building the migration SF we need to know the new stratum offsets */ 708 ierr = PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);CHKERRQ(ierr); 709 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 710 ierr = DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[PetscMax(depth-1,0)],&hybEnd[1],&hybEnd[0]);CHKERRQ(ierr); 711 for (d = 0; d < depth+1; ++d) { 712 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 713 for (p = pStart; p < pEnd; ++p) { 714 if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */ 715 pointDepths[p] = 2 * d; 716 } else { 717 pointDepths[p] = 2 * d + 1; 718 } 719 } 720 } 721 for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1; 722 ierr = PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 723 ierr = PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 724 /* Count recevied points in each stratum and compute the internal strata shift */ 725 ierr = PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);CHKERRQ(ierr); 726 for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0; 727 for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++; 728 depthShift[2*depth+1] = 0; 729 for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1]; 730 for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth]; 731 depthShift[0] += depthRecv[1]; 732 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1]; 733 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0]; 734 for (d = 2 * depth-1; d > 2; --d) { 735 PetscInt e; 736 737 for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d]; 738 } 739 for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;} 740 /* Derive a new local permutation based on stratified indices */ 741 ierr = PetscMalloc1(nleaves, &ilocal);CHKERRQ(ierr); 742 for (p = 0; p < nleaves; ++p) { 743 const PetscInt dep = remoteDepths[p]; 744 745 ilocal[p] = depthShift[dep] + depthIdx[dep]; 746 depthIdx[dep]++; 747 } 748 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 749 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");CHKERRQ(ierr); 750 ierr = PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);CHKERRQ(ierr); 751 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 752 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 753 PetscFunctionReturn(0); 754 } 755 756 /*@ 757 DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 758 759 Collective on DM 760 761 Input Parameters: 762 + dm - The DMPlex object 763 . pointSF - The PetscSF describing the communication pattern 764 . originalSection - The PetscSection for existing data layout 765 - originalVec - The existing data 766 767 Output Parameters: 768 + newSection - The PetscSF describing the new data layout 769 - newVec - The new data 770 771 Level: developer 772 773 .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData() 774 @*/ 775 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec) 776 { 777 PetscSF fieldSF; 778 PetscInt *remoteOffsets, fieldSize; 779 PetscScalar *originalValues, *newValues; 780 PetscErrorCode ierr; 781 782 PetscFunctionBegin; 783 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 784 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 785 786 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 787 ierr = VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);CHKERRQ(ierr); 788 ierr = VecSetType(newVec,dm->vectype);CHKERRQ(ierr); 789 790 ierr = VecGetArray(originalVec, &originalValues);CHKERRQ(ierr); 791 ierr = VecGetArray(newVec, &newValues);CHKERRQ(ierr); 792 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 793 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 794 ierr = PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 795 ierr = PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 796 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 797 ierr = VecRestoreArray(newVec, &newValues);CHKERRQ(ierr); 798 ierr = VecRestoreArray(originalVec, &originalValues);CHKERRQ(ierr); 799 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 800 PetscFunctionReturn(0); 801 } 802 803 /*@ 804 DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 805 806 Collective on DM 807 808 Input Parameters: 809 + dm - The DMPlex object 810 . pointSF - The PetscSF describing the communication pattern 811 . originalSection - The PetscSection for existing data layout 812 - originalIS - The existing data 813 814 Output Parameters: 815 + newSection - The PetscSF describing the new data layout 816 - newIS - The new data 817 818 Level: developer 819 820 .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData() 821 @*/ 822 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS) 823 { 824 PetscSF fieldSF; 825 PetscInt *newValues, *remoteOffsets, fieldSize; 826 const PetscInt *originalValues; 827 PetscErrorCode ierr; 828 829 PetscFunctionBegin; 830 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 831 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 832 833 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 834 ierr = PetscMalloc1(fieldSize, &newValues);CHKERRQ(ierr); 835 836 ierr = ISGetIndices(originalIS, &originalValues);CHKERRQ(ierr); 837 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 838 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 839 ierr = PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 840 ierr = PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 841 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 842 ierr = ISRestoreIndices(originalIS, &originalValues);CHKERRQ(ierr); 843 ierr = ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);CHKERRQ(ierr); 844 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 845 PetscFunctionReturn(0); 846 } 847 848 /*@ 849 DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 850 851 Collective on DM 852 853 Input Parameters: 854 + dm - The DMPlex object 855 . pointSF - The PetscSF describing the communication pattern 856 . originalSection - The PetscSection for existing data layout 857 . datatype - The type of data 858 - originalData - The existing data 859 860 Output Parameters: 861 + newSection - The PetscSection describing the new data layout 862 - newData - The new data 863 864 Level: developer 865 866 .seealso: DMPlexDistribute(), DMPlexDistributeField() 867 @*/ 868 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData) 869 { 870 PetscSF fieldSF; 871 PetscInt *remoteOffsets, fieldSize; 872 PetscMPIInt dataSize; 873 PetscErrorCode ierr; 874 875 PetscFunctionBegin; 876 ierr = PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 877 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 878 879 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 880 ierr = MPI_Type_size(datatype, &dataSize);CHKERRQ(ierr); 881 ierr = PetscMalloc(fieldSize * dataSize, newData);CHKERRQ(ierr); 882 883 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 884 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 885 ierr = PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 886 ierr = PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 887 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 888 ierr = PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 889 PetscFunctionReturn(0); 890 } 891 892 static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 893 { 894 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 895 MPI_Comm comm; 896 PetscSF coneSF; 897 PetscSection originalConeSection, newConeSection; 898 PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize; 899 PetscBool flg; 900 PetscErrorCode ierr; 901 902 PetscFunctionBegin; 903 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 904 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 905 906 ierr = PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 907 /* Distribute cone section */ 908 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 909 ierr = DMPlexGetConeSection(dm, &originalConeSection);CHKERRQ(ierr); 910 ierr = DMPlexGetConeSection(dmParallel, &newConeSection);CHKERRQ(ierr); 911 ierr = PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);CHKERRQ(ierr); 912 ierr = DMSetUp(dmParallel);CHKERRQ(ierr); 913 { 914 PetscInt pStart, pEnd, p; 915 916 ierr = PetscSectionGetChart(newConeSection, &pStart, &pEnd);CHKERRQ(ierr); 917 for (p = pStart; p < pEnd; ++p) { 918 PetscInt coneSize; 919 ierr = PetscSectionGetDof(newConeSection, p, &coneSize);CHKERRQ(ierr); 920 pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize); 921 } 922 } 923 /* Communicate and renumber cones */ 924 ierr = PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);CHKERRQ(ierr); 925 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 926 ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); 927 if (original) { 928 PetscInt numCones; 929 930 ierr = PetscSectionGetStorageSize(originalConeSection,&numCones);CHKERRQ(ierr); 931 ierr = PetscMalloc1(numCones,&globCones);CHKERRQ(ierr); 932 ierr = ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);CHKERRQ(ierr); 933 } else { 934 globCones = cones; 935 } 936 ierr = DMPlexGetCones(dmParallel, &newCones);CHKERRQ(ierr); 937 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 938 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 939 if (original) { 940 ierr = PetscFree(globCones);CHKERRQ(ierr); 941 } 942 ierr = PetscSectionGetStorageSize(newConeSection, &newConesSize);CHKERRQ(ierr); 943 ierr = ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);CHKERRQ(ierr); 944 #if defined(PETSC_USE_DEBUG) 945 { 946 PetscInt p; 947 PetscBool valid = PETSC_TRUE; 948 for (p = 0; p < newConesSize; ++p) { 949 if (newCones[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);CHKERRQ(ierr);} 950 } 951 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 952 } 953 #endif 954 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);CHKERRQ(ierr); 955 if (flg) { 956 ierr = PetscPrintf(comm, "Serial Cone Section:\n");CHKERRQ(ierr); 957 ierr = PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 958 ierr = PetscPrintf(comm, "Parallel Cone Section:\n");CHKERRQ(ierr); 959 ierr = PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 960 ierr = PetscSFView(coneSF, NULL);CHKERRQ(ierr); 961 } 962 ierr = DMPlexGetConeOrientations(dm, &cones);CHKERRQ(ierr); 963 ierr = DMPlexGetConeOrientations(dmParallel, &newCones);CHKERRQ(ierr); 964 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 965 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 966 ierr = PetscSFDestroy(&coneSF);CHKERRQ(ierr); 967 ierr = PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 968 /* Create supports and stratify DMPlex */ 969 { 970 PetscInt pStart, pEnd; 971 972 ierr = PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);CHKERRQ(ierr); 973 ierr = PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);CHKERRQ(ierr); 974 } 975 ierr = DMPlexSymmetrize(dmParallel);CHKERRQ(ierr); 976 ierr = DMPlexStratify(dmParallel);CHKERRQ(ierr); 977 { 978 PetscBool useCone, useClosure, useAnchors; 979 980 ierr = DMGetBasicAdjacency(dm, &useCone, &useClosure);CHKERRQ(ierr); 981 ierr = DMSetBasicAdjacency(dmParallel, useCone, useClosure);CHKERRQ(ierr); 982 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 983 ierr = DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);CHKERRQ(ierr); 984 } 985 PetscFunctionReturn(0); 986 } 987 988 static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel) 989 { 990 MPI_Comm comm; 991 PetscSection originalCoordSection, newCoordSection; 992 Vec originalCoordinates, newCoordinates; 993 PetscInt bs; 994 PetscBool isper; 995 const char *name; 996 const PetscReal *maxCell, *L; 997 const DMBoundaryType *bd; 998 PetscErrorCode ierr; 999 1000 PetscFunctionBegin; 1001 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1002 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1003 1004 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1005 ierr = DMGetCoordinateSection(dm, &originalCoordSection);CHKERRQ(ierr); 1006 ierr = DMGetCoordinateSection(dmParallel, &newCoordSection);CHKERRQ(ierr); 1007 ierr = DMGetCoordinatesLocal(dm, &originalCoordinates);CHKERRQ(ierr); 1008 if (originalCoordinates) { 1009 ierr = VecCreate(PETSC_COMM_SELF, &newCoordinates);CHKERRQ(ierr); 1010 ierr = PetscObjectGetName((PetscObject) originalCoordinates, &name);CHKERRQ(ierr); 1011 ierr = PetscObjectSetName((PetscObject) newCoordinates, name);CHKERRQ(ierr); 1012 1013 ierr = DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);CHKERRQ(ierr); 1014 ierr = DMSetCoordinatesLocal(dmParallel, newCoordinates);CHKERRQ(ierr); 1015 ierr = VecGetBlockSize(originalCoordinates, &bs);CHKERRQ(ierr); 1016 ierr = VecSetBlockSize(newCoordinates, bs);CHKERRQ(ierr); 1017 ierr = VecDestroy(&newCoordinates);CHKERRQ(ierr); 1018 } 1019 ierr = DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);CHKERRQ(ierr); 1020 ierr = DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);CHKERRQ(ierr); 1021 PetscFunctionReturn(0); 1022 } 1023 1024 /* Here we are assuming that process 0 always has everything */ 1025 static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel) 1026 { 1027 DM_Plex *mesh = (DM_Plex*) dm->data; 1028 MPI_Comm comm; 1029 DMLabel depthLabel; 1030 PetscMPIInt rank; 1031 PetscInt depth, d, numLabels, numLocalLabels, l; 1032 PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth; 1033 PetscObjectState depthState = -1; 1034 PetscErrorCode ierr; 1035 1036 PetscFunctionBegin; 1037 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1038 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1039 1040 ierr = PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1041 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1042 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1043 1044 /* If the user has changed the depth label, communicate it instead */ 1045 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1046 ierr = DMPlexGetDepthLabel(dm, &depthLabel);CHKERRQ(ierr); 1047 if (depthLabel) {ierr = PetscObjectStateGet((PetscObject) depthLabel, &depthState);CHKERRQ(ierr);} 1048 lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE; 1049 ierr = MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);CHKERRQ(ierr); 1050 if (sendDepth) { 1051 ierr = DMRemoveLabel(dmParallel, "depth", &depthLabel);CHKERRQ(ierr); 1052 ierr = DMLabelDestroy(&depthLabel);CHKERRQ(ierr); 1053 } 1054 /* Everyone must have either the same number of labels, or none */ 1055 ierr = DMGetNumLabels(dm, &numLocalLabels);CHKERRQ(ierr); 1056 numLabels = numLocalLabels; 1057 ierr = MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);CHKERRQ(ierr); 1058 if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE; 1059 for (l = numLabels-1; l >= 0; --l) { 1060 DMLabel label = NULL, labelNew = NULL; 1061 PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput; 1062 const char *name = NULL; 1063 1064 if (hasLabels) { 1065 ierr = DMGetLabelByNum(dm, l, &label);CHKERRQ(ierr); 1066 /* Skip "depth" because it is recreated */ 1067 ierr = PetscObjectGetName((PetscObject) label, &name);CHKERRQ(ierr); 1068 ierr = PetscStrcmp(name, "depth", &isDepth);CHKERRQ(ierr); 1069 } 1070 ierr = MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);CHKERRQ(ierr); 1071 if (isDepth && !sendDepth) continue; 1072 ierr = DMLabelDistribute(label, migrationSF, &labelNew);CHKERRQ(ierr); 1073 if (isDepth) { 1074 /* Put in any missing strata which can occur if users are managing the depth label themselves */ 1075 PetscInt gdepth; 1076 1077 ierr = MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 1078 if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth); 1079 for (d = 0; d <= gdepth; ++d) { 1080 PetscBool has; 1081 1082 ierr = DMLabelHasStratum(labelNew, d, &has);CHKERRQ(ierr); 1083 if (!has) {ierr = DMLabelAddStratum(labelNew, d);CHKERRQ(ierr);} 1084 } 1085 } 1086 ierr = DMAddLabel(dmParallel, labelNew);CHKERRQ(ierr); 1087 /* Put the output flag in the new label */ 1088 if (hasLabels) {ierr = DMGetLabelOutput(dm, name, &lisOutput);CHKERRQ(ierr);} 1089 ierr = MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);CHKERRQ(ierr); 1090 ierr = PetscObjectGetName((PetscObject) labelNew, &name);CHKERRQ(ierr); 1091 ierr = DMSetLabelOutput(dmParallel, name, isOutput);CHKERRQ(ierr); 1092 } 1093 ierr = PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1094 PetscFunctionReturn(0); 1095 } 1096 1097 static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 1098 { 1099 DM_Plex *mesh = (DM_Plex*) dm->data; 1100 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1101 PetscBool *isHybrid, *isHybridParallel; 1102 PetscInt dim, depth, d; 1103 PetscInt pStart, pEnd, pStartP, pEndP; 1104 PetscErrorCode ierr; 1105 1106 PetscFunctionBegin; 1107 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1108 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1109 1110 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1111 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1112 ierr = DMPlexGetChart(dm,&pStart,&pEnd);CHKERRQ(ierr); 1113 ierr = DMPlexGetChart(dmParallel,&pStartP,&pEndP);CHKERRQ(ierr); 1114 ierr = PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);CHKERRQ(ierr); 1115 for (d = 0; d <= depth; d++) { 1116 PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d]; 1117 1118 if (hybridMax >= 0) { 1119 PetscInt sStart, sEnd, p; 1120 1121 ierr = DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);CHKERRQ(ierr); 1122 for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE; 1123 } 1124 } 1125 ierr = PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);CHKERRQ(ierr); 1126 ierr = PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);CHKERRQ(ierr); 1127 for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1; 1128 for (d = 0; d <= depth; d++) { 1129 PetscInt sStart, sEnd, p, dd; 1130 1131 ierr = DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);CHKERRQ(ierr); 1132 dd = (depth == 1 && d == 1) ? dim : d; 1133 for (p = sStart; p < sEnd; p++) { 1134 if (isHybridParallel[p-pStartP]) { 1135 pmesh->hybridPointMax[dd] = p; 1136 break; 1137 } 1138 } 1139 } 1140 ierr = PetscFree2(isHybrid,isHybridParallel);CHKERRQ(ierr); 1141 PetscFunctionReturn(0); 1142 } 1143 1144 static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 1145 { 1146 DM_Plex *mesh = (DM_Plex*) dm->data; 1147 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1148 MPI_Comm comm; 1149 DM refTree; 1150 PetscSection origParentSection, newParentSection; 1151 PetscInt *origParents, *origChildIDs; 1152 PetscBool flg; 1153 PetscErrorCode ierr; 1154 1155 PetscFunctionBegin; 1156 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1157 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 1158 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1159 1160 /* Set up tree */ 1161 ierr = DMPlexGetReferenceTree(dm,&refTree);CHKERRQ(ierr); 1162 ierr = DMPlexSetReferenceTree(dmParallel,refTree);CHKERRQ(ierr); 1163 ierr = DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);CHKERRQ(ierr); 1164 if (origParentSection) { 1165 PetscInt pStart, pEnd; 1166 PetscInt *newParents, *newChildIDs, *globParents; 1167 PetscInt *remoteOffsetsParents, newParentSize; 1168 PetscSF parentSF; 1169 1170 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1171 ierr = PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);CHKERRQ(ierr); 1172 ierr = PetscSectionSetChart(newParentSection,pStart,pEnd);CHKERRQ(ierr); 1173 ierr = PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);CHKERRQ(ierr); 1174 ierr = PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);CHKERRQ(ierr); 1175 ierr = PetscFree(remoteOffsetsParents);CHKERRQ(ierr); 1176 ierr = PetscSectionGetStorageSize(newParentSection,&newParentSize);CHKERRQ(ierr); 1177 ierr = PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);CHKERRQ(ierr); 1178 if (original) { 1179 PetscInt numParents; 1180 1181 ierr = PetscSectionGetStorageSize(origParentSection,&numParents);CHKERRQ(ierr); 1182 ierr = PetscMalloc1(numParents,&globParents);CHKERRQ(ierr); 1183 ierr = ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);CHKERRQ(ierr); 1184 } 1185 else { 1186 globParents = origParents; 1187 } 1188 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1189 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1190 if (original) { 1191 ierr = PetscFree(globParents);CHKERRQ(ierr); 1192 } 1193 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1194 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1195 ierr = ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);CHKERRQ(ierr); 1196 #if defined(PETSC_USE_DEBUG) 1197 { 1198 PetscInt p; 1199 PetscBool valid = PETSC_TRUE; 1200 for (p = 0; p < newParentSize; ++p) { 1201 if (newParents[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);CHKERRQ(ierr);} 1202 } 1203 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1204 } 1205 #endif 1206 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);CHKERRQ(ierr); 1207 if (flg) { 1208 ierr = PetscPrintf(comm, "Serial Parent Section: \n");CHKERRQ(ierr); 1209 ierr = PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1210 ierr = PetscPrintf(comm, "Parallel Parent Section: \n");CHKERRQ(ierr); 1211 ierr = PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1212 ierr = PetscSFView(parentSF, NULL);CHKERRQ(ierr); 1213 } 1214 ierr = DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);CHKERRQ(ierr); 1215 ierr = PetscSectionDestroy(&newParentSection);CHKERRQ(ierr); 1216 ierr = PetscFree2(newParents,newChildIDs);CHKERRQ(ierr); 1217 ierr = PetscSFDestroy(&parentSF);CHKERRQ(ierr); 1218 } 1219 pmesh->useAnchors = mesh->useAnchors; 1220 PetscFunctionReturn(0); 1221 } 1222 1223 PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel) 1224 { 1225 PetscMPIInt rank, size; 1226 MPI_Comm comm; 1227 PetscErrorCode ierr; 1228 1229 PetscFunctionBegin; 1230 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1231 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1232 1233 /* Create point SF for parallel mesh */ 1234 ierr = PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1235 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1236 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1237 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1238 { 1239 const PetscInt *leaves; 1240 PetscSFNode *remotePoints, *rowners, *lowners; 1241 PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints; 1242 PetscInt pStart, pEnd; 1243 1244 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1245 ierr = PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);CHKERRQ(ierr); 1246 ierr = PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);CHKERRQ(ierr); 1247 for (p=0; p<numRoots; p++) { 1248 rowners[p].rank = -1; 1249 rowners[p].index = -1; 1250 } 1251 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1252 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1253 for (p = 0; p < numLeaves; ++p) { 1254 if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */ 1255 lowners[p].rank = rank; 1256 lowners[p].index = leaves ? leaves[p] : p; 1257 } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */ 1258 lowners[p].rank = -2; 1259 lowners[p].index = -2; 1260 } 1261 } 1262 for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */ 1263 rowners[p].rank = -3; 1264 rowners[p].index = -3; 1265 } 1266 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1267 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1268 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1269 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1270 for (p = 0; p < numLeaves; ++p) { 1271 if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed"); 1272 if (lowners[p].rank != rank) ++numGhostPoints; 1273 } 1274 ierr = PetscMalloc1(numGhostPoints, &ghostPoints);CHKERRQ(ierr); 1275 ierr = PetscMalloc1(numGhostPoints, &remotePoints);CHKERRQ(ierr); 1276 for (p = 0, gp = 0; p < numLeaves; ++p) { 1277 if (lowners[p].rank != rank) { 1278 ghostPoints[gp] = leaves ? leaves[p] : p; 1279 remotePoints[gp].rank = lowners[p].rank; 1280 remotePoints[gp].index = lowners[p].index; 1281 ++gp; 1282 } 1283 } 1284 ierr = PetscFree2(rowners,lowners);CHKERRQ(ierr); 1285 ierr = PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);CHKERRQ(ierr); 1286 ierr = PetscSFSetFromOptions((dmParallel)->sf);CHKERRQ(ierr); 1287 } 1288 { 1289 PetscBool useCone, useClosure, useAnchors; 1290 1291 ierr = DMGetBasicAdjacency(dm, &useCone, &useClosure);CHKERRQ(ierr); 1292 ierr = DMSetBasicAdjacency(dmParallel, useCone, useClosure);CHKERRQ(ierr); 1293 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 1294 ierr = DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);CHKERRQ(ierr); 1295 } 1296 ierr = PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1297 PetscFunctionReturn(0); 1298 } 1299 1300 /*@ 1301 DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition? 1302 1303 Input Parameters: 1304 + dm - The DMPlex object 1305 - flg - Balance the partition? 1306 1307 Level: intermediate 1308 1309 .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance() 1310 @*/ 1311 PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg) 1312 { 1313 DM_Plex *mesh = (DM_Plex *)dm->data; 1314 1315 PetscFunctionBegin; 1316 mesh->partitionBalance = flg; 1317 PetscFunctionReturn(0); 1318 } 1319 1320 /*@ 1321 DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition? 1322 1323 Input Parameter: 1324 + dm - The DMPlex object 1325 1326 Output Parameter: 1327 + flg - Balance the partition? 1328 1329 Level: intermediate 1330 1331 .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance() 1332 @*/ 1333 PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg) 1334 { 1335 DM_Plex *mesh = (DM_Plex *)dm->data; 1336 1337 PetscFunctionBegin; 1338 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1339 PetscValidIntPointer(flg, 2); 1340 *flg = mesh->partitionBalance; 1341 PetscFunctionReturn(0); 1342 } 1343 1344 /*@C 1345 DMPlexDerivePointSF - Build a point SF from an SF describing a point migration 1346 1347 Input Parameter: 1348 + dm - The source DMPlex object 1349 . migrationSF - The star forest that describes the parallel point remapping 1350 . ownership - Flag causing a vote to determine point ownership 1351 1352 Output Parameter: 1353 - pointSF - The star forest describing the point overlap in the remapped DM 1354 1355 Level: developer 1356 1357 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1358 @*/ 1359 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF) 1360 { 1361 PetscMPIInt rank, size; 1362 PetscInt p, nroots, nleaves, idx, npointLeaves; 1363 PetscInt *pointLocal; 1364 const PetscInt *leaves; 1365 const PetscSFNode *roots; 1366 PetscSFNode *rootNodes, *leafNodes, *pointRemote; 1367 Vec shifts; 1368 const PetscInt numShifts = 13759; 1369 const PetscScalar *shift = NULL; 1370 const PetscBool shiftDebug = PETSC_FALSE; 1371 PetscBool balance; 1372 PetscErrorCode ierr; 1373 1374 PetscFunctionBegin; 1375 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1376 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 1377 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);CHKERRQ(ierr); 1378 1379 ierr = DMPlexGetPartitionBalance(dm, &balance);CHKERRQ(ierr); 1380 ierr = PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);CHKERRQ(ierr); 1381 ierr = PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);CHKERRQ(ierr); 1382 if (ownership) { 1383 /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */ 1384 if (balance) { 1385 PetscRandom r; 1386 1387 ierr = PetscRandomCreate(PETSC_COMM_SELF, &r);CHKERRQ(ierr); 1388 ierr = PetscRandomSetInterval(r, 0, 2467*size);CHKERRQ(ierr); 1389 ierr = VecCreate(PETSC_COMM_SELF, &shifts);CHKERRQ(ierr); 1390 ierr = VecSetSizes(shifts, numShifts, numShifts);CHKERRQ(ierr); 1391 ierr = VecSetType(shifts, VECSTANDARD);CHKERRQ(ierr); 1392 ierr = VecSetRandom(shifts, r);CHKERRQ(ierr); 1393 ierr = PetscRandomDestroy(&r);CHKERRQ(ierr); 1394 ierr = VecGetArrayRead(shifts, &shift);CHKERRQ(ierr); 1395 } 1396 1397 /* Point ownership vote: Process with highest rank owns shared points */ 1398 for (p = 0; p < nleaves; ++p) { 1399 if (shiftDebug) { 1400 ierr = PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);CHKERRQ(ierr); 1401 } 1402 /* Either put in a bid or we know we own it */ 1403 leafNodes[p].rank = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size; 1404 leafNodes[p].index = p; 1405 } 1406 for (p = 0; p < nroots; p++) { 1407 /* Root must not participate in the reduction, flag so that MAXLOC does not use */ 1408 rootNodes[p].rank = -3; 1409 rootNodes[p].index = -3; 1410 } 1411 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1412 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1413 if (balance) { 1414 /* We've voted, now we need to get the rank. When we're balancing the partition, the "rank" in rootNotes is not 1415 * the rank but rather (rank + random)%size. So we do another reduction, voting the same way, but sending the 1416 * rank instead of the index. */ 1417 PetscSFNode *rootRanks = NULL; 1418 ierr = PetscMalloc1(nroots, &rootRanks);CHKERRQ(ierr); 1419 for (p = 0; p < nroots; p++) { 1420 rootRanks[p].rank = -3; 1421 rootRanks[p].index = -3; 1422 } 1423 for (p = 0; p < nleaves; p++) leafNodes[p].index = rank; 1424 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);CHKERRQ(ierr); 1425 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);CHKERRQ(ierr); 1426 for (p = 0; p < nroots; p++) rootNodes[p].rank = rootRanks[p].index; 1427 ierr = PetscFree(rootRanks);CHKERRQ(ierr); 1428 } 1429 } else { 1430 for (p = 0; p < nroots; p++) { 1431 rootNodes[p].index = -1; 1432 rootNodes[p].rank = rank; 1433 }; 1434 for (p = 0; p < nleaves; p++) { 1435 /* Write new local id into old location */ 1436 if (roots[p].rank == rank) { 1437 rootNodes[roots[p].index].index = leaves ? leaves[p] : p; 1438 } 1439 } 1440 } 1441 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1442 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1443 1444 for (npointLeaves = 0, p = 0; p < nleaves; p++) { 1445 if (leafNodes[p].rank != rank) npointLeaves++; 1446 } 1447 ierr = PetscMalloc1(npointLeaves, &pointLocal);CHKERRQ(ierr); 1448 ierr = PetscMalloc1(npointLeaves, &pointRemote);CHKERRQ(ierr); 1449 for (idx = 0, p = 0; p < nleaves; p++) { 1450 if (leafNodes[p].rank != rank) { 1451 pointLocal[idx] = p; 1452 pointRemote[idx] = leafNodes[p]; 1453 idx++; 1454 } 1455 } 1456 if (shift) { 1457 ierr = VecRestoreArrayRead(shifts, &shift);CHKERRQ(ierr); 1458 ierr = VecDestroy(&shifts);CHKERRQ(ierr); 1459 } 1460 if (shiftDebug) {ierr = PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);CHKERRQ(ierr);} 1461 ierr = PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);CHKERRQ(ierr); 1462 ierr = PetscSFSetFromOptions(*pointSF);CHKERRQ(ierr); 1463 ierr = PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1464 ierr = PetscFree2(rootNodes, leafNodes);CHKERRQ(ierr); 1465 PetscFunctionReturn(0); 1466 } 1467 1468 /*@C 1469 DMPlexMigrate - Migrates internal DM data over the supplied star forest 1470 1471 Collective on DM and PetscSF 1472 1473 Input Parameter: 1474 + dm - The source DMPlex object 1475 . sf - The star forest communication context describing the migration pattern 1476 1477 Output Parameter: 1478 - targetDM - The target DMPlex object 1479 1480 Level: intermediate 1481 1482 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1483 @*/ 1484 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM) 1485 { 1486 MPI_Comm comm; 1487 PetscInt dim, cdim, nroots; 1488 PetscSF sfPoint; 1489 ISLocalToGlobalMapping ltogMigration; 1490 ISLocalToGlobalMapping ltogOriginal = NULL; 1491 PetscBool flg; 1492 PetscErrorCode ierr; 1493 1494 PetscFunctionBegin; 1495 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1496 ierr = PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1497 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 1498 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1499 ierr = DMSetDimension(targetDM, dim);CHKERRQ(ierr); 1500 ierr = DMGetCoordinateDim(dm, &cdim);CHKERRQ(ierr); 1501 ierr = DMSetCoordinateDim(targetDM, cdim);CHKERRQ(ierr); 1502 1503 /* Check for a one-to-all distribution pattern */ 1504 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 1505 ierr = PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);CHKERRQ(ierr); 1506 if (nroots >= 0) { 1507 IS isOriginal; 1508 PetscInt n, size, nleaves; 1509 PetscInt *numbering_orig, *numbering_new; 1510 1511 /* Get the original point numbering */ 1512 ierr = DMPlexCreatePointNumbering(dm, &isOriginal);CHKERRQ(ierr); 1513 ierr = ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);CHKERRQ(ierr); 1514 ierr = ISLocalToGlobalMappingGetSize(ltogOriginal, &size);CHKERRQ(ierr); 1515 ierr = ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1516 /* Convert to positive global numbers */ 1517 for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);} 1518 /* Derive the new local-to-global mapping from the old one */ 1519 ierr = PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1520 ierr = PetscMalloc1(nleaves, &numbering_new);CHKERRQ(ierr); 1521 ierr = PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1522 ierr = PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1523 ierr = ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);CHKERRQ(ierr); 1524 ierr = ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1525 ierr = ISDestroy(&isOriginal);CHKERRQ(ierr); 1526 } else { 1527 /* One-to-all distribution pattern: We can derive LToG from SF */ 1528 ierr = ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);CHKERRQ(ierr); 1529 } 1530 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1531 if (flg) { 1532 ierr = PetscPrintf(comm, "Point renumbering for DM migration:\n");CHKERRQ(ierr); 1533 ierr = ISLocalToGlobalMappingView(ltogMigration, NULL);CHKERRQ(ierr); 1534 } 1535 /* Migrate DM data to target DM */ 1536 ierr = DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1537 ierr = DMPlexDistributeLabels(dm, sf, targetDM);CHKERRQ(ierr); 1538 ierr = DMPlexDistributeCoordinates(dm, sf, targetDM);CHKERRQ(ierr); 1539 ierr = DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1540 ierr = DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1541 ierr = ISLocalToGlobalMappingDestroy(<ogOriginal);CHKERRQ(ierr); 1542 ierr = ISLocalToGlobalMappingDestroy(<ogMigration);CHKERRQ(ierr); 1543 ierr = PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1544 PetscFunctionReturn(0); 1545 } 1546 1547 PETSC_INTERN PetscErrorCode DMPlexPartitionLabelClosure_Private(DM,DMLabel,PetscInt,PetscInt,const PetscInt[],IS*); 1548 1549 /*@C 1550 DMPlexDistribute - Distributes the mesh and any associated sections. 1551 1552 Collective on DM 1553 1554 Input Parameter: 1555 + dm - The original DMPlex object 1556 - overlap - The overlap of partitions, 0 is the default 1557 1558 Output Parameter: 1559 + sf - The PetscSF used for point distribution, or NULL if not needed 1560 - dmParallel - The distributed DMPlex object 1561 1562 Note: If the mesh was not distributed, the output dmParallel will be NULL. 1563 1564 The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function 1565 representation on the mesh. 1566 1567 Level: intermediate 1568 1569 .keywords: mesh, elements 1570 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMSetAdjacency() 1571 @*/ 1572 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel) 1573 { 1574 MPI_Comm comm; 1575 PetscPartitioner partitioner; 1576 IS cellPart; 1577 PetscSection cellPartSection; 1578 DM dmCoord; 1579 DMLabel lblPartition, lblMigration; 1580 PetscSF sfMigration, sfStratified, sfPoint; 1581 PetscBool flg, balance; 1582 PetscMPIInt rank, size; 1583 PetscErrorCode ierr; 1584 1585 PetscFunctionBegin; 1586 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1587 PetscValidLogicalCollectiveInt(dm, overlap, 2); 1588 if (sf) PetscValidPointer(sf,3); 1589 PetscValidPointer(dmParallel,4); 1590 1591 if (sf) *sf = NULL; 1592 *dmParallel = NULL; 1593 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1594 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1595 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1596 if (size == 1) PetscFunctionReturn(0); 1597 1598 ierr = PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1599 /* Create cell partition */ 1600 ierr = PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1601 ierr = PetscSectionCreate(comm, &cellPartSection);CHKERRQ(ierr); 1602 ierr = DMPlexGetPartitioner(dm, &partitioner);CHKERRQ(ierr); 1603 ierr = PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);CHKERRQ(ierr); 1604 { 1605 /* Convert partition to DMLabel */ 1606 IS is; 1607 PetscHSetI ht; 1608 PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks, *iranks; 1609 const PetscInt *points; 1610 1611 ierr = DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);CHKERRQ(ierr); 1612 /* Preallocate strata */ 1613 ierr = PetscHSetICreate(&ht);CHKERRQ(ierr); 1614 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1615 for (proc = pStart; proc < pEnd; proc++) { 1616 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1617 if (npoints) {ierr = PetscHSetIAdd(ht, proc);CHKERRQ(ierr);} 1618 } 1619 ierr = PetscHSetIGetSize(ht, &nranks);CHKERRQ(ierr); 1620 ierr = PetscMalloc1(nranks, &iranks);CHKERRQ(ierr); 1621 ierr = PetscHSetIGetElems(ht, &poff, iranks);CHKERRQ(ierr); 1622 ierr = PetscHSetIDestroy(&ht);CHKERRQ(ierr); 1623 ierr = DMLabelAddStrata(lblPartition, nranks, iranks);CHKERRQ(ierr); 1624 ierr = PetscFree(iranks);CHKERRQ(ierr); 1625 /* Inline DMPlexPartitionLabelClosure() */ 1626 ierr = ISGetIndices(cellPart, &points);CHKERRQ(ierr); 1627 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1628 for (proc = pStart; proc < pEnd; proc++) { 1629 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1630 if (!npoints) continue; 1631 ierr = PetscSectionGetOffset(cellPartSection, proc, &poff);CHKERRQ(ierr); 1632 ierr = DMPlexPartitionLabelClosure_Private(dm, lblPartition, proc, npoints, points+poff, &is);CHKERRQ(ierr); 1633 ierr = DMLabelSetStratumIS(lblPartition, proc, is);CHKERRQ(ierr); 1634 ierr = ISDestroy(&is);CHKERRQ(ierr); 1635 } 1636 ierr = ISRestoreIndices(cellPart, &points);CHKERRQ(ierr); 1637 } 1638 ierr = DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);CHKERRQ(ierr); 1639 ierr = DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);CHKERRQ(ierr); 1640 ierr = DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);CHKERRQ(ierr); 1641 /* Stratify the SF in case we are migrating an already parallel plex */ 1642 ierr = DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);CHKERRQ(ierr); 1643 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1644 sfMigration = sfStratified; 1645 ierr = PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1646 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1647 if (flg) { 1648 ierr = DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1649 ierr = PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1650 } 1651 1652 /* Create non-overlapping parallel DM and migrate internal data */ 1653 ierr = DMPlexCreate(comm, dmParallel);CHKERRQ(ierr); 1654 ierr = PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");CHKERRQ(ierr); 1655 ierr = DMPlexMigrate(dm, sfMigration, *dmParallel);CHKERRQ(ierr); 1656 1657 /* Build the point SF without overlap */ 1658 ierr = DMPlexGetPartitionBalance(dm, &balance);CHKERRQ(ierr); 1659 ierr = DMPlexSetPartitionBalance(*dmParallel, balance);CHKERRQ(ierr); 1660 ierr = DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);CHKERRQ(ierr); 1661 ierr = DMSetPointSF(*dmParallel, sfPoint);CHKERRQ(ierr); 1662 ierr = DMGetCoordinateDM(*dmParallel, &dmCoord);CHKERRQ(ierr); 1663 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1664 if (flg) {ierr = PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);} 1665 1666 if (overlap > 0) { 1667 DM dmOverlap; 1668 PetscInt nroots, nleaves; 1669 PetscSFNode *newRemote; 1670 const PetscSFNode *oldRemote; 1671 PetscSF sfOverlap, sfOverlapPoint; 1672 /* Add the partition overlap to the distributed DM */ 1673 ierr = DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);CHKERRQ(ierr); 1674 ierr = DMDestroy(dmParallel);CHKERRQ(ierr); 1675 *dmParallel = dmOverlap; 1676 if (flg) { 1677 ierr = PetscPrintf(comm, "Overlap Migration SF:\n");CHKERRQ(ierr); 1678 ierr = PetscSFView(sfOverlap, NULL);CHKERRQ(ierr); 1679 } 1680 1681 /* Re-map the migration SF to establish the full migration pattern */ 1682 ierr = PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);CHKERRQ(ierr); 1683 ierr = PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1684 ierr = PetscMalloc1(nleaves, &newRemote);CHKERRQ(ierr); 1685 ierr = PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1686 ierr = PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1687 ierr = PetscSFCreate(comm, &sfOverlapPoint);CHKERRQ(ierr); 1688 ierr = PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1689 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1690 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1691 sfMigration = sfOverlapPoint; 1692 } 1693 /* Cleanup Partition */ 1694 ierr = DMLabelDestroy(&lblPartition);CHKERRQ(ierr); 1695 ierr = DMLabelDestroy(&lblMigration);CHKERRQ(ierr); 1696 ierr = PetscSectionDestroy(&cellPartSection);CHKERRQ(ierr); 1697 ierr = ISDestroy(&cellPart);CHKERRQ(ierr); 1698 /* Copy BC */ 1699 ierr = DMCopyBoundary(dm, *dmParallel);CHKERRQ(ierr); 1700 /* Create sfNatural */ 1701 if (dm->useNatural) { 1702 PetscSection section; 1703 1704 ierr = DMGetSection(dm, §ion);CHKERRQ(ierr); 1705 ierr = DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);CHKERRQ(ierr); 1706 ierr = DMSetUseNatural(*dmParallel, PETSC_TRUE);CHKERRQ(ierr); 1707 } 1708 /* Cleanup */ 1709 if (sf) {*sf = sfMigration;} 1710 else {ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr);} 1711 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1712 ierr = PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1713 PetscFunctionReturn(0); 1714 } 1715 1716 /*@C 1717 DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM. 1718 1719 Collective on DM 1720 1721 Input Parameter: 1722 + dm - The non-overlapping distrbuted DMPlex object 1723 - overlap - The overlap of partitions 1724 1725 Output Parameter: 1726 + sf - The PetscSF used for point distribution 1727 - dmOverlap - The overlapping distributed DMPlex object, or NULL 1728 1729 Note: If the mesh was not distributed, the return value is NULL. 1730 1731 The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function 1732 representation on the mesh. 1733 1734 Level: intermediate 1735 1736 .keywords: mesh, elements 1737 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMSetAdjacency() 1738 @*/ 1739 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap) 1740 { 1741 MPI_Comm comm; 1742 PetscMPIInt size, rank; 1743 PetscSection rootSection, leafSection; 1744 IS rootrank, leafrank; 1745 DM dmCoord; 1746 DMLabel lblOverlap; 1747 PetscSF sfOverlap, sfStratified, sfPoint; 1748 PetscErrorCode ierr; 1749 1750 PetscFunctionBegin; 1751 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1752 if (sf) PetscValidPointer(sf, 3); 1753 PetscValidPointer(dmOverlap, 4); 1754 1755 if (sf) *sf = NULL; 1756 *dmOverlap = NULL; 1757 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1758 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1759 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1760 if (size == 1) PetscFunctionReturn(0); 1761 1762 ierr = PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1763 /* Compute point overlap with neighbouring processes on the distributed DM */ 1764 ierr = PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1765 ierr = PetscSectionCreate(comm, &rootSection);CHKERRQ(ierr); 1766 ierr = PetscSectionCreate(comm, &leafSection);CHKERRQ(ierr); 1767 ierr = DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);CHKERRQ(ierr); 1768 ierr = DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);CHKERRQ(ierr); 1769 /* Convert overlap label to stratified migration SF */ 1770 ierr = DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);CHKERRQ(ierr); 1771 ierr = DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);CHKERRQ(ierr); 1772 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1773 sfOverlap = sfStratified; 1774 ierr = PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");CHKERRQ(ierr); 1775 ierr = PetscSFSetFromOptions(sfOverlap);CHKERRQ(ierr); 1776 1777 ierr = PetscSectionDestroy(&rootSection);CHKERRQ(ierr); 1778 ierr = PetscSectionDestroy(&leafSection);CHKERRQ(ierr); 1779 ierr = ISDestroy(&rootrank);CHKERRQ(ierr); 1780 ierr = ISDestroy(&leafrank);CHKERRQ(ierr); 1781 ierr = PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1782 1783 /* Build the overlapping DM */ 1784 ierr = DMPlexCreate(comm, dmOverlap);CHKERRQ(ierr); 1785 ierr = PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");CHKERRQ(ierr); 1786 ierr = DMPlexMigrate(dm, sfOverlap, *dmOverlap);CHKERRQ(ierr); 1787 /* Build the new point SF */ 1788 ierr = DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1789 ierr = DMSetPointSF(*dmOverlap, sfPoint);CHKERRQ(ierr); 1790 ierr = DMGetCoordinateDM(*dmOverlap, &dmCoord);CHKERRQ(ierr); 1791 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1792 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1793 /* Cleanup overlap partition */ 1794 ierr = DMLabelDestroy(&lblOverlap);CHKERRQ(ierr); 1795 if (sf) *sf = sfOverlap; 1796 else {ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr);} 1797 ierr = PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1798 PetscFunctionReturn(0); 1799 } 1800 1801 /*@C 1802 DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the 1803 root process of the original's communicator. 1804 1805 Collective on DM 1806 1807 Input Parameters: 1808 . dm - the original DMPlex object 1809 1810 Output Parameters: 1811 + sf - the PetscSF used for point distribution (optional) 1812 - gatherMesh - the gathered DM object, or NULL 1813 1814 Level: intermediate 1815 1816 .keywords: mesh 1817 .seealso: DMPlexDistribute(), DMPlexGetRedundantDM() 1818 @*/ 1819 PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh) 1820 { 1821 MPI_Comm comm; 1822 PetscMPIInt size; 1823 PetscPartitioner oldPart, gatherPart; 1824 PetscErrorCode ierr; 1825 1826 PetscFunctionBegin; 1827 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 1828 PetscValidPointer(gatherMesh,2); 1829 *gatherMesh = NULL; 1830 if (sf) *sf = NULL; 1831 comm = PetscObjectComm((PetscObject)dm); 1832 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1833 if (size == 1) PetscFunctionReturn(0); 1834 ierr = DMPlexGetPartitioner(dm,&oldPart);CHKERRQ(ierr); 1835 ierr = PetscObjectReference((PetscObject)oldPart);CHKERRQ(ierr); 1836 ierr = PetscPartitionerCreate(comm,&gatherPart);CHKERRQ(ierr); 1837 ierr = PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);CHKERRQ(ierr); 1838 ierr = DMPlexSetPartitioner(dm,gatherPart);CHKERRQ(ierr); 1839 ierr = DMPlexDistribute(dm,0,sf,gatherMesh);CHKERRQ(ierr); 1840 1841 ierr = DMPlexSetPartitioner(dm,oldPart);CHKERRQ(ierr); 1842 ierr = PetscPartitionerDestroy(&gatherPart);CHKERRQ(ierr); 1843 ierr = PetscPartitionerDestroy(&oldPart);CHKERRQ(ierr); 1844 PetscFunctionReturn(0); 1845 } 1846 1847 /*@C 1848 DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process. 1849 1850 Collective on DM 1851 1852 Input Parameters: 1853 . dm - the original DMPlex object 1854 1855 Output Parameters: 1856 + sf - the PetscSF used for point distribution (optional) 1857 - redundantMesh - the redundant DM object, or NULL 1858 1859 Level: intermediate 1860 1861 .keywords: mesh 1862 .seealso: DMPlexDistribute(), DMPlexGetGatherDM() 1863 @*/ 1864 PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh) 1865 { 1866 MPI_Comm comm; 1867 PetscMPIInt size, rank; 1868 PetscInt pStart, pEnd, p; 1869 PetscInt numPoints = -1; 1870 PetscSF migrationSF, sfPoint, gatherSF; 1871 DM gatherDM, dmCoord; 1872 PetscSFNode *points; 1873 PetscErrorCode ierr; 1874 1875 PetscFunctionBegin; 1876 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 1877 PetscValidPointer(redundantMesh,2); 1878 *redundantMesh = NULL; 1879 comm = PetscObjectComm((PetscObject)dm); 1880 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1881 if (size == 1) { 1882 ierr = PetscObjectReference((PetscObject) dm);CHKERRQ(ierr); 1883 *redundantMesh = dm; 1884 if (sf) *sf = NULL; 1885 PetscFunctionReturn(0); 1886 } 1887 ierr = DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);CHKERRQ(ierr); 1888 if (!gatherDM) PetscFunctionReturn(0); 1889 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1890 ierr = DMPlexGetChart(gatherDM,&pStart,&pEnd);CHKERRQ(ierr); 1891 numPoints = pEnd - pStart; 1892 ierr = MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);CHKERRQ(ierr); 1893 ierr = PetscMalloc1(numPoints,&points);CHKERRQ(ierr); 1894 ierr = PetscSFCreate(comm,&migrationSF);CHKERRQ(ierr); 1895 for (p = 0; p < numPoints; p++) { 1896 points[p].index = p; 1897 points[p].rank = 0; 1898 } 1899 ierr = PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);CHKERRQ(ierr); 1900 ierr = DMPlexCreate(comm, redundantMesh);CHKERRQ(ierr); 1901 ierr = PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");CHKERRQ(ierr); 1902 ierr = DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);CHKERRQ(ierr); 1903 ierr = DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1904 ierr = DMSetPointSF(*redundantMesh, sfPoint);CHKERRQ(ierr); 1905 ierr = DMGetCoordinateDM(*redundantMesh, &dmCoord);CHKERRQ(ierr); 1906 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1907 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1908 if (sf) { 1909 PetscSF tsf; 1910 1911 ierr = PetscSFCompose(gatherSF,migrationSF,&tsf);CHKERRQ(ierr); 1912 ierr = DMPlexStratifyMigrationSF(dm, tsf, sf);CHKERRQ(ierr); 1913 ierr = PetscSFDestroy(&tsf);CHKERRQ(ierr); 1914 } 1915 ierr = PetscSFDestroy(&migrationSF);CHKERRQ(ierr); 1916 ierr = PetscSFDestroy(&gatherSF);CHKERRQ(ierr); 1917 ierr = DMDestroy(&gatherDM);CHKERRQ(ierr); 1918 PetscFunctionReturn(0); 1919 } 1920