1 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 2 #include <petsc/private/dmlabelimpl.h> /*I "petscdmlabel.h" I*/ 3 4 /*@C 5 DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback 6 7 Input Parameters: 8 + dm - The DM object 9 . user - The user callback, may be NULL (to clear the callback) 10 - ctx - context for callback evaluation, may be NULL 11 12 Level: advanced 13 14 Notes: 15 The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency. 16 17 Any setting here overrides other configuration of DMPlex adjacency determination. 18 19 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser() 20 @*/ 21 PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx) 22 { 23 DM_Plex *mesh = (DM_Plex *)dm->data; 24 25 PetscFunctionBegin; 26 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 27 mesh->useradjacency = user; 28 mesh->useradjacencyctx = ctx; 29 PetscFunctionReturn(0); 30 } 31 32 /*@C 33 DMPlexGetAdjacencyUser - get the user-defined adjacency callback 34 35 Input Parameter: 36 . dm - The DM object 37 38 Output Parameters: 39 - user - The user callback 40 - ctx - context for callback evaluation 41 42 Level: advanced 43 44 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser() 45 @*/ 46 PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx) 47 { 48 DM_Plex *mesh = (DM_Plex *)dm->data; 49 50 PetscFunctionBegin; 51 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 52 if (user) *user = mesh->useradjacency; 53 if (ctx) *ctx = mesh->useradjacencyctx; 54 PetscFunctionReturn(0); 55 } 56 57 /*@ 58 DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints. 59 60 Input Parameters: 61 + dm - The DM object 62 - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 63 64 Level: intermediate 65 66 .seealso: DMGetAdjacency(), DMSetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 67 @*/ 68 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors) 69 { 70 DM_Plex *mesh = (DM_Plex *) dm->data; 71 72 PetscFunctionBegin; 73 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 74 mesh->useAnchors = useAnchors; 75 PetscFunctionReturn(0); 76 } 77 78 /*@ 79 DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints. 80 81 Input Parameter: 82 . dm - The DM object 83 84 Output Parameter: 85 . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 86 87 Level: intermediate 88 89 .seealso: DMPlexSetAdjacencyUseAnchors(), DMSetAdjacency(), DMGetAdjacency(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 90 @*/ 91 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors) 92 { 93 DM_Plex *mesh = (DM_Plex *) dm->data; 94 95 PetscFunctionBegin; 96 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 97 PetscValidIntPointer(useAnchors, 2); 98 *useAnchors = mesh->useAnchors; 99 PetscFunctionReturn(0); 100 } 101 102 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 103 { 104 const PetscInt *cone = NULL; 105 PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c; 106 PetscErrorCode ierr; 107 108 PetscFunctionBeginHot; 109 ierr = DMPlexGetConeSize(dm, p, &coneSize);CHKERRQ(ierr); 110 ierr = DMPlexGetCone(dm, p, &cone);CHKERRQ(ierr); 111 for (c = 0; c <= coneSize; ++c) { 112 const PetscInt point = !c ? p : cone[c-1]; 113 const PetscInt *support = NULL; 114 PetscInt supportSize, s, q; 115 116 ierr = DMPlexGetSupportSize(dm, point, &supportSize);CHKERRQ(ierr); 117 ierr = DMPlexGetSupport(dm, point, &support);CHKERRQ(ierr); 118 for (s = 0; s < supportSize; ++s) { 119 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) { 120 if (support[s] == adj[q]) break; 121 } 122 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 123 } 124 } 125 *adjSize = numAdj; 126 PetscFunctionReturn(0); 127 } 128 129 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 130 { 131 const PetscInt *support = NULL; 132 PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s; 133 PetscErrorCode ierr; 134 135 PetscFunctionBeginHot; 136 ierr = DMPlexGetSupportSize(dm, p, &supportSize);CHKERRQ(ierr); 137 ierr = DMPlexGetSupport(dm, p, &support);CHKERRQ(ierr); 138 for (s = 0; s <= supportSize; ++s) { 139 const PetscInt point = !s ? p : support[s-1]; 140 const PetscInt *cone = NULL; 141 PetscInt coneSize, c, q; 142 143 ierr = DMPlexGetConeSize(dm, point, &coneSize);CHKERRQ(ierr); 144 ierr = DMPlexGetCone(dm, point, &cone);CHKERRQ(ierr); 145 for (c = 0; c < coneSize; ++c) { 146 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) { 147 if (cone[c] == adj[q]) break; 148 } 149 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 150 } 151 } 152 *adjSize = numAdj; 153 PetscFunctionReturn(0); 154 } 155 156 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[]) 157 { 158 PetscInt *star = NULL; 159 PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s; 160 PetscErrorCode ierr; 161 162 PetscFunctionBeginHot; 163 ierr = DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 164 for (s = 0; s < starSize*2; s += 2) { 165 const PetscInt *closure = NULL; 166 PetscInt closureSize, c, q; 167 168 ierr = DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 169 for (c = 0; c < closureSize*2; c += 2) { 170 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) { 171 if (closure[c] == adj[q]) break; 172 } 173 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 174 } 175 ierr = DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 176 } 177 ierr = DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 178 *adjSize = numAdj; 179 PetscFunctionReturn(0); 180 } 181 182 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[]) 183 { 184 static PetscInt asiz = 0; 185 PetscInt maxAnchors = 1; 186 PetscInt aStart = -1, aEnd = -1; 187 PetscInt maxAdjSize; 188 PetscSection aSec = NULL; 189 IS aIS = NULL; 190 const PetscInt *anchors; 191 DM_Plex *mesh = (DM_Plex *)dm->data; 192 PetscErrorCode ierr; 193 194 PetscFunctionBeginHot; 195 if (useAnchors) { 196 ierr = DMPlexGetAnchors(dm,&aSec,&aIS);CHKERRQ(ierr); 197 if (aSec) { 198 ierr = PetscSectionGetMaxDof(aSec,&maxAnchors);CHKERRQ(ierr); 199 maxAnchors = PetscMax(1,maxAnchors); 200 ierr = PetscSectionGetChart(aSec,&aStart,&aEnd);CHKERRQ(ierr); 201 ierr = ISGetIndices(aIS,&anchors);CHKERRQ(ierr); 202 } 203 } 204 if (!*adj) { 205 PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd; 206 207 ierr = DMPlexGetChart(dm, &pStart,&pEnd);CHKERRQ(ierr); 208 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 209 ierr = DMPlexGetMaxSizes(dm, &maxC, &maxS);CHKERRQ(ierr); 210 coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1; 211 supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1; 212 asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries); 213 asiz *= maxAnchors; 214 asiz = PetscMin(asiz,pEnd-pStart); 215 ierr = PetscMalloc1(asiz,adj);CHKERRQ(ierr); 216 } 217 if (*adjSize < 0) *adjSize = asiz; 218 maxAdjSize = *adjSize; 219 if (mesh->useradjacency) { 220 ierr = mesh->useradjacency(dm, p, adjSize, *adj, mesh->useradjacencyctx);CHKERRQ(ierr); 221 } else if (useTransitiveClosure) { 222 ierr = DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);CHKERRQ(ierr); 223 } else if (useCone) { 224 ierr = DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 225 } else { 226 ierr = DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 227 } 228 if (useAnchors && aSec) { 229 PetscInt origSize = *adjSize; 230 PetscInt numAdj = origSize; 231 PetscInt i = 0, j; 232 PetscInt *orig = *adj; 233 234 while (i < origSize) { 235 PetscInt p = orig[i]; 236 PetscInt aDof = 0; 237 238 if (p >= aStart && p < aEnd) { 239 ierr = PetscSectionGetDof(aSec,p,&aDof);CHKERRQ(ierr); 240 } 241 if (aDof) { 242 PetscInt aOff; 243 PetscInt s, q; 244 245 for (j = i + 1; j < numAdj; j++) { 246 orig[j - 1] = orig[j]; 247 } 248 origSize--; 249 numAdj--; 250 ierr = PetscSectionGetOffset(aSec,p,&aOff);CHKERRQ(ierr); 251 for (s = 0; s < aDof; ++s) { 252 for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) { 253 if (anchors[aOff+s] == orig[q]) break; 254 } 255 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 256 } 257 } 258 else { 259 i++; 260 } 261 } 262 *adjSize = numAdj; 263 ierr = ISRestoreIndices(aIS,&anchors);CHKERRQ(ierr); 264 } 265 PetscFunctionReturn(0); 266 } 267 268 /*@ 269 DMPlexGetAdjacency - Return all points adjacent to the given point 270 271 Input Parameters: 272 + dm - The DM object 273 . p - The point 274 . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE 275 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize 276 277 Output Parameters: 278 + adjSize - The number of adjacent points 279 - adj - The adjacent points 280 281 Level: advanced 282 283 Notes: 284 The user must PetscFree the adj array if it was not passed in. 285 286 .seealso: DMSetAdjacency(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator() 287 @*/ 288 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[]) 289 { 290 PetscBool useCone, useClosure, useAnchors; 291 PetscErrorCode ierr; 292 293 PetscFunctionBeginHot; 294 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 295 PetscValidPointer(adjSize,3); 296 PetscValidPointer(adj,4); 297 ierr = DMGetBasicAdjacency(dm, &useCone, &useClosure);CHKERRQ(ierr); 298 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 299 ierr = DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);CHKERRQ(ierr); 300 PetscFunctionReturn(0); 301 } 302 303 /*@ 304 DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity 305 306 Collective on DM 307 308 Input Parameters: 309 + dm - The DM 310 - sfPoint - The PetscSF which encodes point connectivity 311 312 Output Parameters: 313 + processRanks - A list of process neighbors, or NULL 314 - sfProcess - An SF encoding the two-sided process connectivity, or NULL 315 316 Level: developer 317 318 .seealso: PetscSFCreate(), DMPlexCreateProcessSF() 319 @*/ 320 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess) 321 { 322 const PetscSFNode *remotePoints; 323 PetscInt *localPointsNew; 324 PetscSFNode *remotePointsNew; 325 const PetscInt *nranks; 326 PetscInt *ranksNew; 327 PetscBT neighbors; 328 PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n; 329 PetscMPIInt size, proc, rank; 330 PetscErrorCode ierr; 331 332 PetscFunctionBegin; 333 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 334 PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2); 335 if (processRanks) {PetscValidPointer(processRanks, 3);} 336 if (sfProcess) {PetscValidPointer(sfProcess, 4);} 337 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);CHKERRQ(ierr); 338 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 339 ierr = PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);CHKERRQ(ierr); 340 ierr = PetscBTCreate(size, &neighbors);CHKERRQ(ierr); 341 ierr = PetscBTMemzero(size, neighbors);CHKERRQ(ierr); 342 /* Compute root-to-leaf process connectivity */ 343 ierr = PetscSectionGetChart(rootRankSection, &pStart, &pEnd);CHKERRQ(ierr); 344 ierr = ISGetIndices(rootRanks, &nranks);CHKERRQ(ierr); 345 for (p = pStart; p < pEnd; ++p) { 346 PetscInt ndof, noff, n; 347 348 ierr = PetscSectionGetDof(rootRankSection, p, &ndof);CHKERRQ(ierr); 349 ierr = PetscSectionGetOffset(rootRankSection, p, &noff);CHKERRQ(ierr); 350 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 351 } 352 ierr = ISRestoreIndices(rootRanks, &nranks);CHKERRQ(ierr); 353 /* Compute leaf-to-neighbor process connectivity */ 354 ierr = PetscSectionGetChart(leafRankSection, &pStart, &pEnd);CHKERRQ(ierr); 355 ierr = ISGetIndices(leafRanks, &nranks);CHKERRQ(ierr); 356 for (p = pStart; p < pEnd; ++p) { 357 PetscInt ndof, noff, n; 358 359 ierr = PetscSectionGetDof(leafRankSection, p, &ndof);CHKERRQ(ierr); 360 ierr = PetscSectionGetOffset(leafRankSection, p, &noff);CHKERRQ(ierr); 361 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 362 } 363 ierr = ISRestoreIndices(leafRanks, &nranks);CHKERRQ(ierr); 364 /* Compute leaf-to-root process connectivity */ 365 for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);} 366 /* Calculate edges */ 367 PetscBTClear(neighbors, rank); 368 for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;} 369 ierr = PetscMalloc1(numNeighbors, &ranksNew);CHKERRQ(ierr); 370 ierr = PetscMalloc1(numNeighbors, &localPointsNew);CHKERRQ(ierr); 371 ierr = PetscMalloc1(numNeighbors, &remotePointsNew);CHKERRQ(ierr); 372 for(proc = 0, n = 0; proc < size; ++proc) { 373 if (PetscBTLookup(neighbors, proc)) { 374 ranksNew[n] = proc; 375 localPointsNew[n] = proc; 376 remotePointsNew[n].index = rank; 377 remotePointsNew[n].rank = proc; 378 ++n; 379 } 380 } 381 ierr = PetscBTDestroy(&neighbors);CHKERRQ(ierr); 382 if (processRanks) {ierr = ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);CHKERRQ(ierr);} 383 else {ierr = PetscFree(ranksNew);CHKERRQ(ierr);} 384 if (sfProcess) { 385 ierr = PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);CHKERRQ(ierr); 386 ierr = PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");CHKERRQ(ierr); 387 ierr = PetscSFSetFromOptions(*sfProcess);CHKERRQ(ierr); 388 ierr = PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);CHKERRQ(ierr); 389 } 390 PetscFunctionReturn(0); 391 } 392 393 /*@ 394 DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF. 395 396 Collective on DM 397 398 Input Parameter: 399 . dm - The DM 400 401 Output Parameters: 402 + rootSection - The number of leaves for a given root point 403 . rootrank - The rank of each edge into the root point 404 . leafSection - The number of processes sharing a given leaf point 405 - leafrank - The rank of each process sharing a leaf point 406 407 Level: developer 408 409 .seealso: DMPlexCreateOverlap() 410 @*/ 411 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank) 412 { 413 MPI_Comm comm; 414 PetscSF sfPoint; 415 const PetscInt *rootdegree; 416 PetscInt *myrank, *remoterank; 417 PetscInt pStart, pEnd, p, nedges; 418 PetscMPIInt rank; 419 PetscErrorCode ierr; 420 421 PetscFunctionBegin; 422 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 423 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 424 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 425 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 426 /* Compute number of leaves for each root */ 427 ierr = PetscObjectSetName((PetscObject) rootSection, "Root Section");CHKERRQ(ierr); 428 ierr = PetscSectionSetChart(rootSection, pStart, pEnd);CHKERRQ(ierr); 429 ierr = PetscSFComputeDegreeBegin(sfPoint, &rootdegree);CHKERRQ(ierr); 430 ierr = PetscSFComputeDegreeEnd(sfPoint, &rootdegree);CHKERRQ(ierr); 431 for (p = pStart; p < pEnd; ++p) {ierr = PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);CHKERRQ(ierr);} 432 ierr = PetscSectionSetUp(rootSection);CHKERRQ(ierr); 433 /* Gather rank of each leaf to root */ 434 ierr = PetscSectionGetStorageSize(rootSection, &nedges);CHKERRQ(ierr); 435 ierr = PetscMalloc1(pEnd-pStart, &myrank);CHKERRQ(ierr); 436 ierr = PetscMalloc1(nedges, &remoterank);CHKERRQ(ierr); 437 for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank; 438 ierr = PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 439 ierr = PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 440 ierr = PetscFree(myrank);CHKERRQ(ierr); 441 ierr = ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);CHKERRQ(ierr); 442 /* Distribute remote ranks to leaves */ 443 ierr = PetscObjectSetName((PetscObject) leafSection, "Leaf Section");CHKERRQ(ierr); 444 ierr = DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);CHKERRQ(ierr); 445 PetscFunctionReturn(0); 446 } 447 448 /*@C 449 DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF. 450 451 Collective on DM 452 453 Input Parameters: 454 + dm - The DM 455 . levels - Number of overlap levels 456 . rootSection - The number of leaves for a given root point 457 . rootrank - The rank of each edge into the root point 458 . leafSection - The number of processes sharing a given leaf point 459 - leafrank - The rank of each process sharing a leaf point 460 461 Output Parameters: 462 + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings 463 464 Level: developer 465 466 .seealso: DMPlexDistributeOwnership(), DMPlexDistribute() 467 @*/ 468 PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) 469 { 470 MPI_Comm comm; 471 DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */ 472 PetscSF sfPoint; 473 const PetscSFNode *remote; 474 const PetscInt *local; 475 const PetscInt *nrank, *rrank; 476 PetscInt *adj = NULL; 477 PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l; 478 PetscMPIInt rank, size; 479 PetscBool flg; 480 PetscErrorCode ierr; 481 482 PetscFunctionBegin; 483 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 484 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 485 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 486 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 487 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 488 ierr = PetscSectionGetChart(leafSection, &sStart, &sEnd);CHKERRQ(ierr); 489 ierr = PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);CHKERRQ(ierr); 490 ierr = DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);CHKERRQ(ierr); 491 /* Handle leaves: shared with the root point */ 492 for (l = 0; l < nleaves; ++l) { 493 PetscInt adjSize = PETSC_DETERMINE, a; 494 495 ierr = DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);CHKERRQ(ierr); 496 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);CHKERRQ(ierr);} 497 } 498 ierr = ISGetIndices(rootrank, &rrank);CHKERRQ(ierr); 499 ierr = ISGetIndices(leafrank, &nrank);CHKERRQ(ierr); 500 /* Handle roots */ 501 for (p = pStart; p < pEnd; ++p) { 502 PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a; 503 504 if ((p >= sStart) && (p < sEnd)) { 505 /* Some leaves share a root with other leaves on different processes */ 506 ierr = PetscSectionGetDof(leafSection, p, &neighbors);CHKERRQ(ierr); 507 if (neighbors) { 508 ierr = PetscSectionGetOffset(leafSection, p, &noff);CHKERRQ(ierr); 509 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 510 for (n = 0; n < neighbors; ++n) { 511 const PetscInt remoteRank = nrank[noff+n]; 512 513 if (remoteRank == rank) continue; 514 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 515 } 516 } 517 } 518 /* Roots are shared with leaves */ 519 ierr = PetscSectionGetDof(rootSection, p, &neighbors);CHKERRQ(ierr); 520 if (!neighbors) continue; 521 ierr = PetscSectionGetOffset(rootSection, p, &noff);CHKERRQ(ierr); 522 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 523 for (n = 0; n < neighbors; ++n) { 524 const PetscInt remoteRank = rrank[noff+n]; 525 526 if (remoteRank == rank) continue; 527 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 528 } 529 } 530 ierr = PetscFree(adj);CHKERRQ(ierr); 531 ierr = ISRestoreIndices(rootrank, &rrank);CHKERRQ(ierr); 532 ierr = ISRestoreIndices(leafrank, &nrank);CHKERRQ(ierr); 533 /* Add additional overlap levels */ 534 for (l = 1; l < levels; l++) { 535 /* Propagate point donations over SF to capture remote connections */ 536 ierr = DMPlexPartitionLabelPropagate(dm, ovAdjByRank);CHKERRQ(ierr); 537 /* Add next level of point donations to the label */ 538 ierr = DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);CHKERRQ(ierr); 539 } 540 /* We require the closure in the overlap */ 541 ierr = DMPlexPartitionLabelClosure(dm, ovAdjByRank);CHKERRQ(ierr); 542 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);CHKERRQ(ierr); 543 if (flg) { 544 PetscViewer viewer; 545 ierr = PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);CHKERRQ(ierr); 546 ierr = DMLabelView(ovAdjByRank, viewer);CHKERRQ(ierr); 547 } 548 /* Invert sender to receiver label */ 549 ierr = DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);CHKERRQ(ierr); 550 ierr = DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);CHKERRQ(ierr); 551 /* Add owned points, except for shared local points */ 552 for (p = pStart; p < pEnd; ++p) {ierr = DMLabelSetValue(*ovLabel, p, rank);CHKERRQ(ierr);} 553 for (l = 0; l < nleaves; ++l) { 554 ierr = DMLabelClearValue(*ovLabel, local[l], rank);CHKERRQ(ierr); 555 ierr = DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);CHKERRQ(ierr); 556 } 557 /* Clean up */ 558 ierr = DMLabelDestroy(&ovAdjByRank);CHKERRQ(ierr); 559 PetscFunctionReturn(0); 560 } 561 562 /*@C 563 DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF 564 565 Collective on DM 566 567 Input Parameters: 568 + dm - The DM 569 - overlapSF - The SF mapping ghost points in overlap to owner points on other processes 570 571 Output Parameters: 572 + migrationSF - An SF that maps original points in old locations to points in new locations 573 574 Level: developer 575 576 .seealso: DMPlexCreateOverlap(), DMPlexDistribute() 577 @*/ 578 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF) 579 { 580 MPI_Comm comm; 581 PetscMPIInt rank, size; 582 PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints; 583 PetscInt *pointDepths, *remoteDepths, *ilocal; 584 PetscInt *depthRecv, *depthShift, *depthIdx; 585 PetscSFNode *iremote; 586 PetscSF pointSF; 587 const PetscInt *sharedLocal; 588 const PetscSFNode *overlapRemote, *sharedRemote; 589 PetscErrorCode ierr; 590 591 PetscFunctionBegin; 592 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 593 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 594 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 595 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 596 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 597 598 /* Before building the migration SF we need to know the new stratum offsets */ 599 ierr = PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);CHKERRQ(ierr); 600 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 601 for (d=0; d<dim+1; d++) { 602 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 603 for (p=pStart; p<pEnd; p++) pointDepths[p] = d; 604 } 605 for (p=0; p<nleaves; p++) remoteDepths[p] = -1; 606 ierr = PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 607 ierr = PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 608 609 /* Count recevied points in each stratum and compute the internal strata shift */ 610 ierr = PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);CHKERRQ(ierr); 611 for (d=0; d<dim+1; d++) depthRecv[d]=0; 612 for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++; 613 depthShift[dim] = 0; 614 for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim]; 615 for (d=1; d<dim; d++) depthShift[d] += depthRecv[0]; 616 for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1]; 617 for (d=0; d<dim+1; d++) { 618 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 619 depthIdx[d] = pStart + depthShift[d]; 620 } 621 622 /* Form the overlap SF build an SF that describes the full overlap migration SF */ 623 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 624 newLeaves = pEnd - pStart + nleaves; 625 ierr = PetscMalloc1(newLeaves, &ilocal);CHKERRQ(ierr); 626 ierr = PetscMalloc1(newLeaves, &iremote);CHKERRQ(ierr); 627 /* First map local points to themselves */ 628 for (d=0; d<dim+1; d++) { 629 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 630 for (p=pStart; p<pEnd; p++) { 631 point = p + depthShift[d]; 632 ilocal[point] = point; 633 iremote[point].index = p; 634 iremote[point].rank = rank; 635 depthIdx[d]++; 636 } 637 } 638 639 /* Add in the remote roots for currently shared points */ 640 ierr = DMGetPointSF(dm, &pointSF);CHKERRQ(ierr); 641 ierr = PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);CHKERRQ(ierr); 642 for (d=0; d<dim+1; d++) { 643 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 644 for (p=0; p<numSharedPoints; p++) { 645 if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) { 646 point = sharedLocal[p] + depthShift[d]; 647 iremote[point].index = sharedRemote[p].index; 648 iremote[point].rank = sharedRemote[p].rank; 649 } 650 } 651 } 652 653 /* Now add the incoming overlap points */ 654 for (p=0; p<nleaves; p++) { 655 point = depthIdx[remoteDepths[p]]; 656 ilocal[point] = point; 657 iremote[point].index = overlapRemote[p].index; 658 iremote[point].rank = overlapRemote[p].rank; 659 depthIdx[remoteDepths[p]]++; 660 } 661 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 662 663 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 664 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");CHKERRQ(ierr); 665 ierr = PetscSFSetFromOptions(*migrationSF);CHKERRQ(ierr); 666 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 667 ierr = PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);CHKERRQ(ierr); 668 669 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 670 PetscFunctionReturn(0); 671 } 672 673 /*@ 674 DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification. 675 676 Input Parameter: 677 + dm - The DM 678 - sf - A star forest with non-ordered leaves, usually defining a DM point migration 679 680 Output Parameter: 681 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified 682 683 Level: developer 684 685 .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap() 686 @*/ 687 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF) 688 { 689 MPI_Comm comm; 690 PetscMPIInt rank, size; 691 PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves; 692 PetscInt *pointDepths, *remoteDepths, *ilocal; 693 PetscInt *depthRecv, *depthShift, *depthIdx; 694 PetscInt hybEnd[4]; 695 const PetscSFNode *iremote; 696 PetscErrorCode ierr; 697 698 PetscFunctionBegin; 699 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 700 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 701 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 702 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 703 ierr = DMPlexGetDepth(dm, &ldepth);CHKERRQ(ierr); 704 ierr = MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 705 if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth); 706 ierr = PetscLogEventBegin(DMPLEX_PartStratSF,dm,0,0,0);CHKERRQ(ierr); 707 708 /* Before building the migration SF we need to know the new stratum offsets */ 709 ierr = PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);CHKERRQ(ierr); 710 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 711 ierr = DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[PetscMax(depth-1,0)],&hybEnd[1],&hybEnd[0]);CHKERRQ(ierr); 712 for (d = 0; d < depth+1; ++d) { 713 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 714 for (p = pStart; p < pEnd; ++p) { 715 if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */ 716 pointDepths[p] = 2 * d; 717 } else { 718 pointDepths[p] = 2 * d + 1; 719 } 720 } 721 } 722 for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1; 723 ierr = PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 724 ierr = PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 725 /* Count received points in each stratum and compute the internal strata shift */ 726 ierr = PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);CHKERRQ(ierr); 727 for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0; 728 for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++; 729 depthShift[2*depth+1] = 0; 730 for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1]; 731 for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth]; 732 depthShift[0] += depthRecv[1]; 733 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1]; 734 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0]; 735 for (d = 2 * depth-1; d > 2; --d) { 736 PetscInt e; 737 738 for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d]; 739 } 740 for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;} 741 /* Derive a new local permutation based on stratified indices */ 742 ierr = PetscMalloc1(nleaves, &ilocal);CHKERRQ(ierr); 743 for (p = 0; p < nleaves; ++p) { 744 const PetscInt dep = remoteDepths[p]; 745 746 ilocal[p] = depthShift[dep] + depthIdx[dep]; 747 depthIdx[dep]++; 748 } 749 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 750 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");CHKERRQ(ierr); 751 ierr = PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);CHKERRQ(ierr); 752 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 753 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 754 ierr = PetscLogEventEnd(DMPLEX_PartStratSF,dm,0,0,0);CHKERRQ(ierr); 755 PetscFunctionReturn(0); 756 } 757 758 /*@ 759 DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 760 761 Collective on DM 762 763 Input Parameters: 764 + dm - The DMPlex object 765 . pointSF - The PetscSF describing the communication pattern 766 . originalSection - The PetscSection for existing data layout 767 - originalVec - The existing data 768 769 Output Parameters: 770 + newSection - The PetscSF describing the new data layout 771 - newVec - The new data 772 773 Level: developer 774 775 .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData() 776 @*/ 777 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec) 778 { 779 PetscSF fieldSF; 780 PetscInt *remoteOffsets, fieldSize; 781 PetscScalar *originalValues, *newValues; 782 PetscErrorCode ierr; 783 784 PetscFunctionBegin; 785 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 786 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 787 788 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 789 ierr = VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);CHKERRQ(ierr); 790 ierr = VecSetType(newVec,dm->vectype);CHKERRQ(ierr); 791 792 ierr = VecGetArray(originalVec, &originalValues);CHKERRQ(ierr); 793 ierr = VecGetArray(newVec, &newValues);CHKERRQ(ierr); 794 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 795 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 796 ierr = PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 797 ierr = PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 798 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 799 ierr = VecRestoreArray(newVec, &newValues);CHKERRQ(ierr); 800 ierr = VecRestoreArray(originalVec, &originalValues);CHKERRQ(ierr); 801 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 802 PetscFunctionReturn(0); 803 } 804 805 /*@ 806 DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 807 808 Collective on DM 809 810 Input Parameters: 811 + dm - The DMPlex object 812 . pointSF - The PetscSF describing the communication pattern 813 . originalSection - The PetscSection for existing data layout 814 - originalIS - The existing data 815 816 Output Parameters: 817 + newSection - The PetscSF describing the new data layout 818 - newIS - The new data 819 820 Level: developer 821 822 .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData() 823 @*/ 824 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS) 825 { 826 PetscSF fieldSF; 827 PetscInt *newValues, *remoteOffsets, fieldSize; 828 const PetscInt *originalValues; 829 PetscErrorCode ierr; 830 831 PetscFunctionBegin; 832 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 833 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 834 835 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 836 ierr = PetscMalloc1(fieldSize, &newValues);CHKERRQ(ierr); 837 838 ierr = ISGetIndices(originalIS, &originalValues);CHKERRQ(ierr); 839 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 840 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 841 ierr = PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 842 ierr = PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 843 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 844 ierr = ISRestoreIndices(originalIS, &originalValues);CHKERRQ(ierr); 845 ierr = ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);CHKERRQ(ierr); 846 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 847 PetscFunctionReturn(0); 848 } 849 850 /*@ 851 DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 852 853 Collective on DM 854 855 Input Parameters: 856 + dm - The DMPlex object 857 . pointSF - The PetscSF describing the communication pattern 858 . originalSection - The PetscSection for existing data layout 859 . datatype - The type of data 860 - originalData - The existing data 861 862 Output Parameters: 863 + newSection - The PetscSection describing the new data layout 864 - newData - The new data 865 866 Level: developer 867 868 .seealso: DMPlexDistribute(), DMPlexDistributeField() 869 @*/ 870 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData) 871 { 872 PetscSF fieldSF; 873 PetscInt *remoteOffsets, fieldSize; 874 PetscMPIInt dataSize; 875 PetscErrorCode ierr; 876 877 PetscFunctionBegin; 878 ierr = PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 879 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 880 881 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 882 ierr = MPI_Type_size(datatype, &dataSize);CHKERRQ(ierr); 883 ierr = PetscMalloc(fieldSize * dataSize, newData);CHKERRQ(ierr); 884 885 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 886 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 887 ierr = PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 888 ierr = PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 889 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 890 ierr = PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 891 PetscFunctionReturn(0); 892 } 893 894 static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 895 { 896 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 897 MPI_Comm comm; 898 PetscSF coneSF; 899 PetscSection originalConeSection, newConeSection; 900 PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize; 901 PetscBool flg; 902 PetscErrorCode ierr; 903 904 PetscFunctionBegin; 905 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 906 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 907 ierr = PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 908 /* Distribute cone section */ 909 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 910 ierr = DMPlexGetConeSection(dm, &originalConeSection);CHKERRQ(ierr); 911 ierr = DMPlexGetConeSection(dmParallel, &newConeSection);CHKERRQ(ierr); 912 ierr = PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);CHKERRQ(ierr); 913 ierr = DMSetUp(dmParallel);CHKERRQ(ierr); 914 { 915 PetscInt pStart, pEnd, p; 916 917 ierr = PetscSectionGetChart(newConeSection, &pStart, &pEnd);CHKERRQ(ierr); 918 for (p = pStart; p < pEnd; ++p) { 919 PetscInt coneSize; 920 ierr = PetscSectionGetDof(newConeSection, p, &coneSize);CHKERRQ(ierr); 921 pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize); 922 } 923 } 924 /* Communicate and renumber cones */ 925 ierr = PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);CHKERRQ(ierr); 926 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 927 ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); 928 if (original) { 929 PetscInt numCones; 930 931 ierr = PetscSectionGetStorageSize(originalConeSection,&numCones);CHKERRQ(ierr); 932 ierr = PetscMalloc1(numCones,&globCones);CHKERRQ(ierr); 933 ierr = ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);CHKERRQ(ierr); 934 } else { 935 globCones = cones; 936 } 937 ierr = DMPlexGetCones(dmParallel, &newCones);CHKERRQ(ierr); 938 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 939 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 940 if (original) { 941 ierr = PetscFree(globCones);CHKERRQ(ierr); 942 } 943 ierr = PetscSectionGetStorageSize(newConeSection, &newConesSize);CHKERRQ(ierr); 944 ierr = ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);CHKERRQ(ierr); 945 #if defined(PETSC_USE_DEBUG) 946 { 947 PetscInt p; 948 PetscBool valid = PETSC_TRUE; 949 for (p = 0; p < newConesSize; ++p) { 950 if (newCones[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);CHKERRQ(ierr);} 951 } 952 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 953 } 954 #endif 955 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);CHKERRQ(ierr); 956 if (flg) { 957 ierr = PetscPrintf(comm, "Serial Cone Section:\n");CHKERRQ(ierr); 958 ierr = PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 959 ierr = PetscPrintf(comm, "Parallel Cone Section:\n");CHKERRQ(ierr); 960 ierr = PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 961 ierr = PetscSFView(coneSF, NULL);CHKERRQ(ierr); 962 } 963 ierr = DMPlexGetConeOrientations(dm, &cones);CHKERRQ(ierr); 964 ierr = DMPlexGetConeOrientations(dmParallel, &newCones);CHKERRQ(ierr); 965 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 966 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 967 ierr = PetscSFDestroy(&coneSF);CHKERRQ(ierr); 968 ierr = PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 969 /* Create supports and stratify DMPlex */ 970 { 971 PetscInt pStart, pEnd; 972 973 ierr = PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);CHKERRQ(ierr); 974 ierr = PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);CHKERRQ(ierr); 975 } 976 ierr = DMPlexSymmetrize(dmParallel);CHKERRQ(ierr); 977 ierr = DMPlexStratify(dmParallel);CHKERRQ(ierr); 978 { 979 PetscBool useCone, useClosure, useAnchors; 980 981 ierr = DMGetBasicAdjacency(dm, &useCone, &useClosure);CHKERRQ(ierr); 982 ierr = DMSetBasicAdjacency(dmParallel, useCone, useClosure);CHKERRQ(ierr); 983 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 984 ierr = DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);CHKERRQ(ierr); 985 } 986 PetscFunctionReturn(0); 987 } 988 989 static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel) 990 { 991 MPI_Comm comm; 992 PetscSection originalCoordSection, newCoordSection; 993 Vec originalCoordinates, newCoordinates; 994 PetscInt bs; 995 PetscBool isper; 996 const char *name; 997 const PetscReal *maxCell, *L; 998 const DMBoundaryType *bd; 999 PetscErrorCode ierr; 1000 1001 PetscFunctionBegin; 1002 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1003 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1004 1005 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1006 ierr = DMGetCoordinateSection(dm, &originalCoordSection);CHKERRQ(ierr); 1007 ierr = DMGetCoordinateSection(dmParallel, &newCoordSection);CHKERRQ(ierr); 1008 ierr = DMGetCoordinatesLocal(dm, &originalCoordinates);CHKERRQ(ierr); 1009 if (originalCoordinates) { 1010 ierr = VecCreate(PETSC_COMM_SELF, &newCoordinates);CHKERRQ(ierr); 1011 ierr = PetscObjectGetName((PetscObject) originalCoordinates, &name);CHKERRQ(ierr); 1012 ierr = PetscObjectSetName((PetscObject) newCoordinates, name);CHKERRQ(ierr); 1013 1014 ierr = DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);CHKERRQ(ierr); 1015 ierr = DMSetCoordinatesLocal(dmParallel, newCoordinates);CHKERRQ(ierr); 1016 ierr = VecGetBlockSize(originalCoordinates, &bs);CHKERRQ(ierr); 1017 ierr = VecSetBlockSize(newCoordinates, bs);CHKERRQ(ierr); 1018 ierr = VecDestroy(&newCoordinates);CHKERRQ(ierr); 1019 } 1020 ierr = DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);CHKERRQ(ierr); 1021 ierr = DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);CHKERRQ(ierr); 1022 PetscFunctionReturn(0); 1023 } 1024 1025 /* Here we are assuming that process 0 always has everything */ 1026 static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel) 1027 { 1028 DM_Plex *mesh = (DM_Plex*) dm->data; 1029 MPI_Comm comm; 1030 DMLabel depthLabel; 1031 PetscMPIInt rank; 1032 PetscInt depth, d, numLabels, numLocalLabels, l; 1033 PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth; 1034 PetscObjectState depthState = -1; 1035 PetscErrorCode ierr; 1036 1037 PetscFunctionBegin; 1038 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1039 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1040 1041 ierr = PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1042 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1043 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1044 1045 /* If the user has changed the depth label, communicate it instead */ 1046 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1047 ierr = DMPlexGetDepthLabel(dm, &depthLabel);CHKERRQ(ierr); 1048 if (depthLabel) {ierr = PetscObjectStateGet((PetscObject) depthLabel, &depthState);CHKERRQ(ierr);} 1049 lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE; 1050 ierr = MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);CHKERRQ(ierr); 1051 if (sendDepth) { 1052 ierr = DMRemoveLabel(dmParallel, "depth", &depthLabel);CHKERRQ(ierr); 1053 ierr = DMLabelDestroy(&depthLabel);CHKERRQ(ierr); 1054 } 1055 /* Everyone must have either the same number of labels, or none */ 1056 ierr = DMGetNumLabels(dm, &numLocalLabels);CHKERRQ(ierr); 1057 numLabels = numLocalLabels; 1058 ierr = MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);CHKERRQ(ierr); 1059 if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE; 1060 for (l = numLabels-1; l >= 0; --l) { 1061 DMLabel label = NULL, labelNew = NULL; 1062 PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput; 1063 const char *name = NULL; 1064 1065 if (hasLabels) { 1066 ierr = DMGetLabelByNum(dm, l, &label);CHKERRQ(ierr); 1067 /* Skip "depth" because it is recreated */ 1068 ierr = PetscObjectGetName((PetscObject) label, &name);CHKERRQ(ierr); 1069 ierr = PetscStrcmp(name, "depth", &isDepth);CHKERRQ(ierr); 1070 } 1071 ierr = MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);CHKERRQ(ierr); 1072 if (isDepth && !sendDepth) continue; 1073 ierr = DMLabelDistribute(label, migrationSF, &labelNew);CHKERRQ(ierr); 1074 if (isDepth) { 1075 /* Put in any missing strata which can occur if users are managing the depth label themselves */ 1076 PetscInt gdepth; 1077 1078 ierr = MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 1079 if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth); 1080 for (d = 0; d <= gdepth; ++d) { 1081 PetscBool has; 1082 1083 ierr = DMLabelHasStratum(labelNew, d, &has);CHKERRQ(ierr); 1084 if (!has) {ierr = DMLabelAddStratum(labelNew, d);CHKERRQ(ierr);} 1085 } 1086 } 1087 ierr = DMAddLabel(dmParallel, labelNew);CHKERRQ(ierr); 1088 /* Put the output flag in the new label */ 1089 if (hasLabels) {ierr = DMGetLabelOutput(dm, name, &lisOutput);CHKERRQ(ierr);} 1090 ierr = MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);CHKERRQ(ierr); 1091 ierr = PetscObjectGetName((PetscObject) labelNew, &name);CHKERRQ(ierr); 1092 ierr = DMSetLabelOutput(dmParallel, name, isOutput);CHKERRQ(ierr); 1093 } 1094 ierr = PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1095 PetscFunctionReturn(0); 1096 } 1097 1098 static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 1099 { 1100 DM_Plex *mesh = (DM_Plex*) dm->data; 1101 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1102 PetscBool *isHybrid, *isHybridParallel; 1103 PetscInt dim, depth, d; 1104 PetscInt pStart, pEnd, pStartP, pEndP; 1105 PetscErrorCode ierr; 1106 1107 PetscFunctionBegin; 1108 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1109 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1110 1111 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1112 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1113 ierr = DMPlexGetChart(dm,&pStart,&pEnd);CHKERRQ(ierr); 1114 ierr = DMPlexGetChart(dmParallel,&pStartP,&pEndP);CHKERRQ(ierr); 1115 ierr = PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);CHKERRQ(ierr); 1116 for (d = 0; d <= depth; d++) { 1117 PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d]; 1118 1119 if (hybridMax >= 0) { 1120 PetscInt sStart, sEnd, p; 1121 1122 ierr = DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);CHKERRQ(ierr); 1123 for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE; 1124 } 1125 } 1126 ierr = PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);CHKERRQ(ierr); 1127 ierr = PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);CHKERRQ(ierr); 1128 for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1; 1129 for (d = 0; d <= depth; d++) { 1130 PetscInt sStart, sEnd, p, dd; 1131 1132 ierr = DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);CHKERRQ(ierr); 1133 dd = (depth == 1 && d == 1) ? dim : d; 1134 for (p = sStart; p < sEnd; p++) { 1135 if (isHybridParallel[p-pStartP]) { 1136 pmesh->hybridPointMax[dd] = p; 1137 break; 1138 } 1139 } 1140 } 1141 ierr = PetscFree2(isHybrid,isHybridParallel);CHKERRQ(ierr); 1142 PetscFunctionReturn(0); 1143 } 1144 1145 static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 1146 { 1147 DM_Plex *mesh = (DM_Plex*) dm->data; 1148 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1149 MPI_Comm comm; 1150 DM refTree; 1151 PetscSection origParentSection, newParentSection; 1152 PetscInt *origParents, *origChildIDs; 1153 PetscBool flg; 1154 PetscErrorCode ierr; 1155 1156 PetscFunctionBegin; 1157 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1158 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 1159 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1160 1161 /* Set up tree */ 1162 ierr = DMPlexGetReferenceTree(dm,&refTree);CHKERRQ(ierr); 1163 ierr = DMPlexSetReferenceTree(dmParallel,refTree);CHKERRQ(ierr); 1164 ierr = DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);CHKERRQ(ierr); 1165 if (origParentSection) { 1166 PetscInt pStart, pEnd; 1167 PetscInt *newParents, *newChildIDs, *globParents; 1168 PetscInt *remoteOffsetsParents, newParentSize; 1169 PetscSF parentSF; 1170 1171 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1172 ierr = PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);CHKERRQ(ierr); 1173 ierr = PetscSectionSetChart(newParentSection,pStart,pEnd);CHKERRQ(ierr); 1174 ierr = PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);CHKERRQ(ierr); 1175 ierr = PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);CHKERRQ(ierr); 1176 ierr = PetscFree(remoteOffsetsParents);CHKERRQ(ierr); 1177 ierr = PetscSectionGetStorageSize(newParentSection,&newParentSize);CHKERRQ(ierr); 1178 ierr = PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);CHKERRQ(ierr); 1179 if (original) { 1180 PetscInt numParents; 1181 1182 ierr = PetscSectionGetStorageSize(origParentSection,&numParents);CHKERRQ(ierr); 1183 ierr = PetscMalloc1(numParents,&globParents);CHKERRQ(ierr); 1184 ierr = ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);CHKERRQ(ierr); 1185 } 1186 else { 1187 globParents = origParents; 1188 } 1189 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1190 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1191 if (original) { 1192 ierr = PetscFree(globParents);CHKERRQ(ierr); 1193 } 1194 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1195 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1196 ierr = ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);CHKERRQ(ierr); 1197 #if defined(PETSC_USE_DEBUG) 1198 { 1199 PetscInt p; 1200 PetscBool valid = PETSC_TRUE; 1201 for (p = 0; p < newParentSize; ++p) { 1202 if (newParents[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);CHKERRQ(ierr);} 1203 } 1204 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1205 } 1206 #endif 1207 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);CHKERRQ(ierr); 1208 if (flg) { 1209 ierr = PetscPrintf(comm, "Serial Parent Section: \n");CHKERRQ(ierr); 1210 ierr = PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1211 ierr = PetscPrintf(comm, "Parallel Parent Section: \n");CHKERRQ(ierr); 1212 ierr = PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1213 ierr = PetscSFView(parentSF, NULL);CHKERRQ(ierr); 1214 } 1215 ierr = DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);CHKERRQ(ierr); 1216 ierr = PetscSectionDestroy(&newParentSection);CHKERRQ(ierr); 1217 ierr = PetscFree2(newParents,newChildIDs);CHKERRQ(ierr); 1218 ierr = PetscSFDestroy(&parentSF);CHKERRQ(ierr); 1219 } 1220 pmesh->useAnchors = mesh->useAnchors; 1221 PetscFunctionReturn(0); 1222 } 1223 1224 PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel) 1225 { 1226 PetscMPIInt rank, size; 1227 MPI_Comm comm; 1228 PetscErrorCode ierr; 1229 1230 PetscFunctionBegin; 1231 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1232 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1233 1234 /* Create point SF for parallel mesh */ 1235 ierr = PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1236 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1237 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1238 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1239 { 1240 const PetscInt *leaves; 1241 PetscSFNode *remotePoints, *rowners, *lowners; 1242 PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints; 1243 PetscInt pStart, pEnd; 1244 1245 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1246 ierr = PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);CHKERRQ(ierr); 1247 ierr = PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);CHKERRQ(ierr); 1248 for (p=0; p<numRoots; p++) { 1249 rowners[p].rank = -1; 1250 rowners[p].index = -1; 1251 } 1252 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1253 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1254 for (p = 0; p < numLeaves; ++p) { 1255 if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */ 1256 lowners[p].rank = rank; 1257 lowners[p].index = leaves ? leaves[p] : p; 1258 } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */ 1259 lowners[p].rank = -2; 1260 lowners[p].index = -2; 1261 } 1262 } 1263 for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */ 1264 rowners[p].rank = -3; 1265 rowners[p].index = -3; 1266 } 1267 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1268 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1269 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1270 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1271 for (p = 0; p < numLeaves; ++p) { 1272 if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed"); 1273 if (lowners[p].rank != rank) ++numGhostPoints; 1274 } 1275 ierr = PetscMalloc1(numGhostPoints, &ghostPoints);CHKERRQ(ierr); 1276 ierr = PetscMalloc1(numGhostPoints, &remotePoints);CHKERRQ(ierr); 1277 for (p = 0, gp = 0; p < numLeaves; ++p) { 1278 if (lowners[p].rank != rank) { 1279 ghostPoints[gp] = leaves ? leaves[p] : p; 1280 remotePoints[gp].rank = lowners[p].rank; 1281 remotePoints[gp].index = lowners[p].index; 1282 ++gp; 1283 } 1284 } 1285 ierr = PetscFree2(rowners,lowners);CHKERRQ(ierr); 1286 ierr = PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);CHKERRQ(ierr); 1287 ierr = PetscSFSetFromOptions((dmParallel)->sf);CHKERRQ(ierr); 1288 } 1289 { 1290 PetscBool useCone, useClosure, useAnchors; 1291 1292 ierr = DMGetBasicAdjacency(dm, &useCone, &useClosure);CHKERRQ(ierr); 1293 ierr = DMSetBasicAdjacency(dmParallel, useCone, useClosure);CHKERRQ(ierr); 1294 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 1295 ierr = DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);CHKERRQ(ierr); 1296 } 1297 ierr = PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1298 PetscFunctionReturn(0); 1299 } 1300 1301 /*@ 1302 DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition? 1303 1304 Input Parameters: 1305 + dm - The DMPlex object 1306 - flg - Balance the partition? 1307 1308 Level: intermediate 1309 1310 .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance() 1311 @*/ 1312 PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg) 1313 { 1314 DM_Plex *mesh = (DM_Plex *)dm->data; 1315 1316 PetscFunctionBegin; 1317 mesh->partitionBalance = flg; 1318 PetscFunctionReturn(0); 1319 } 1320 1321 /*@ 1322 DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition? 1323 1324 Input Parameter: 1325 + dm - The DMPlex object 1326 1327 Output Parameter: 1328 + flg - Balance the partition? 1329 1330 Level: intermediate 1331 1332 .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance() 1333 @*/ 1334 PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg) 1335 { 1336 DM_Plex *mesh = (DM_Plex *)dm->data; 1337 1338 PetscFunctionBegin; 1339 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1340 PetscValidIntPointer(flg, 2); 1341 *flg = mesh->partitionBalance; 1342 PetscFunctionReturn(0); 1343 } 1344 1345 /*@C 1346 DMPlexDerivePointSF - Build a point SF from an SF describing a point migration 1347 1348 Input Parameter: 1349 + dm - The source DMPlex object 1350 . migrationSF - The star forest that describes the parallel point remapping 1351 . ownership - Flag causing a vote to determine point ownership 1352 1353 Output Parameter: 1354 - pointSF - The star forest describing the point overlap in the remapped DM 1355 1356 Level: developer 1357 1358 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1359 @*/ 1360 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF) 1361 { 1362 PetscMPIInt rank, size; 1363 PetscInt p, nroots, nleaves, idx, npointLeaves; 1364 PetscInt *pointLocal; 1365 const PetscInt *leaves; 1366 const PetscSFNode *roots; 1367 PetscSFNode *rootNodes, *leafNodes, *pointRemote; 1368 Vec shifts; 1369 const PetscInt numShifts = 13759; 1370 const PetscScalar *shift = NULL; 1371 const PetscBool shiftDebug = PETSC_FALSE; 1372 PetscBool balance; 1373 PetscErrorCode ierr; 1374 1375 PetscFunctionBegin; 1376 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1377 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 1378 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);CHKERRQ(ierr); 1379 ierr = PetscLogEventBegin(DMPLEX_CreatePointSF,dm,0,0,0);CHKERRQ(ierr); 1380 1381 ierr = DMPlexGetPartitionBalance(dm, &balance);CHKERRQ(ierr); 1382 ierr = PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);CHKERRQ(ierr); 1383 ierr = PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);CHKERRQ(ierr); 1384 if (ownership) { 1385 /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */ 1386 if (balance) { 1387 PetscRandom r; 1388 1389 ierr = PetscRandomCreate(PETSC_COMM_SELF, &r);CHKERRQ(ierr); 1390 ierr = PetscRandomSetInterval(r, 0, 2467*size);CHKERRQ(ierr); 1391 ierr = VecCreate(PETSC_COMM_SELF, &shifts);CHKERRQ(ierr); 1392 ierr = VecSetSizes(shifts, numShifts, numShifts);CHKERRQ(ierr); 1393 ierr = VecSetType(shifts, VECSTANDARD);CHKERRQ(ierr); 1394 ierr = VecSetRandom(shifts, r);CHKERRQ(ierr); 1395 ierr = PetscRandomDestroy(&r);CHKERRQ(ierr); 1396 ierr = VecGetArrayRead(shifts, &shift);CHKERRQ(ierr); 1397 } 1398 1399 /* Point ownership vote: Process with highest rank owns shared points */ 1400 for (p = 0; p < nleaves; ++p) { 1401 if (shiftDebug) { 1402 ierr = PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);CHKERRQ(ierr); 1403 } 1404 /* Either put in a bid or we know we own it */ 1405 leafNodes[p].rank = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size; 1406 leafNodes[p].index = p; 1407 } 1408 for (p = 0; p < nroots; p++) { 1409 /* Root must not participate in the reduction, flag so that MAXLOC does not use */ 1410 rootNodes[p].rank = -3; 1411 rootNodes[p].index = -3; 1412 } 1413 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1414 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1415 if (balance) { 1416 /* We've voted, now we need to get the rank. When we're balancing the partition, the "rank" in rootNotes is not 1417 * the rank but rather (rank + random)%size. So we do another reduction, voting the same way, but sending the 1418 * rank instead of the index. */ 1419 PetscSFNode *rootRanks = NULL; 1420 ierr = PetscMalloc1(nroots, &rootRanks);CHKERRQ(ierr); 1421 for (p = 0; p < nroots; p++) { 1422 rootRanks[p].rank = -3; 1423 rootRanks[p].index = -3; 1424 } 1425 for (p = 0; p < nleaves; p++) leafNodes[p].index = rank; 1426 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);CHKERRQ(ierr); 1427 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);CHKERRQ(ierr); 1428 for (p = 0; p < nroots; p++) rootNodes[p].rank = rootRanks[p].index; 1429 ierr = PetscFree(rootRanks);CHKERRQ(ierr); 1430 } 1431 } else { 1432 for (p = 0; p < nroots; p++) { 1433 rootNodes[p].index = -1; 1434 rootNodes[p].rank = rank; 1435 }; 1436 for (p = 0; p < nleaves; p++) { 1437 /* Write new local id into old location */ 1438 if (roots[p].rank == rank) { 1439 rootNodes[roots[p].index].index = leaves ? leaves[p] : p; 1440 } 1441 } 1442 } 1443 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1444 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1445 1446 for (npointLeaves = 0, p = 0; p < nleaves; p++) { 1447 if (leafNodes[p].rank != rank) npointLeaves++; 1448 } 1449 ierr = PetscMalloc1(npointLeaves, &pointLocal);CHKERRQ(ierr); 1450 ierr = PetscMalloc1(npointLeaves, &pointRemote);CHKERRQ(ierr); 1451 for (idx = 0, p = 0; p < nleaves; p++) { 1452 if (leafNodes[p].rank != rank) { 1453 pointLocal[idx] = p; 1454 pointRemote[idx] = leafNodes[p]; 1455 idx++; 1456 } 1457 } 1458 if (shift) { 1459 ierr = VecRestoreArrayRead(shifts, &shift);CHKERRQ(ierr); 1460 ierr = VecDestroy(&shifts);CHKERRQ(ierr); 1461 } 1462 if (shiftDebug) {ierr = PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);CHKERRQ(ierr);} 1463 ierr = PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);CHKERRQ(ierr); 1464 ierr = PetscSFSetFromOptions(*pointSF);CHKERRQ(ierr); 1465 ierr = PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1466 ierr = PetscFree2(rootNodes, leafNodes);CHKERRQ(ierr); 1467 ierr = PetscLogEventEnd(DMPLEX_CreatePointSF,dm,0,0,0);CHKERRQ(ierr); 1468 PetscFunctionReturn(0); 1469 } 1470 1471 /*@C 1472 DMPlexMigrate - Migrates internal DM data over the supplied star forest 1473 1474 Collective on DM and PetscSF 1475 1476 Input Parameter: 1477 + dm - The source DMPlex object 1478 . sf - The star forest communication context describing the migration pattern 1479 1480 Output Parameter: 1481 - targetDM - The target DMPlex object 1482 1483 Level: intermediate 1484 1485 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1486 @*/ 1487 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM) 1488 { 1489 MPI_Comm comm; 1490 PetscInt dim, cdim, nroots; 1491 PetscSF sfPoint; 1492 ISLocalToGlobalMapping ltogMigration; 1493 ISLocalToGlobalMapping ltogOriginal = NULL; 1494 PetscBool flg; 1495 PetscErrorCode ierr; 1496 1497 PetscFunctionBegin; 1498 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1499 ierr = PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1500 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 1501 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1502 ierr = DMSetDimension(targetDM, dim);CHKERRQ(ierr); 1503 ierr = DMGetCoordinateDim(dm, &cdim);CHKERRQ(ierr); 1504 ierr = DMSetCoordinateDim(targetDM, cdim);CHKERRQ(ierr); 1505 1506 /* Check for a one-to-all distribution pattern */ 1507 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 1508 ierr = PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);CHKERRQ(ierr); 1509 if (nroots >= 0) { 1510 IS isOriginal; 1511 PetscInt n, size, nleaves; 1512 PetscInt *numbering_orig, *numbering_new; 1513 1514 /* Get the original point numbering */ 1515 ierr = DMPlexCreatePointNumbering(dm, &isOriginal);CHKERRQ(ierr); 1516 ierr = ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);CHKERRQ(ierr); 1517 ierr = ISLocalToGlobalMappingGetSize(ltogOriginal, &size);CHKERRQ(ierr); 1518 ierr = ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1519 /* Convert to positive global numbers */ 1520 for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);} 1521 /* Derive the new local-to-global mapping from the old one */ 1522 ierr = PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1523 ierr = PetscMalloc1(nleaves, &numbering_new);CHKERRQ(ierr); 1524 ierr = PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1525 ierr = PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1526 ierr = ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);CHKERRQ(ierr); 1527 ierr = ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1528 ierr = ISDestroy(&isOriginal);CHKERRQ(ierr); 1529 } else { 1530 /* One-to-all distribution pattern: We can derive LToG from SF */ 1531 ierr = ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);CHKERRQ(ierr); 1532 } 1533 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1534 if (flg) { 1535 ierr = PetscPrintf(comm, "Point renumbering for DM migration:\n");CHKERRQ(ierr); 1536 ierr = ISLocalToGlobalMappingView(ltogMigration, NULL);CHKERRQ(ierr); 1537 } 1538 /* Migrate DM data to target DM */ 1539 ierr = DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1540 ierr = DMPlexDistributeLabels(dm, sf, targetDM);CHKERRQ(ierr); 1541 ierr = DMPlexDistributeCoordinates(dm, sf, targetDM);CHKERRQ(ierr); 1542 ierr = DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1543 ierr = DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1544 ierr = ISLocalToGlobalMappingDestroy(<ogOriginal);CHKERRQ(ierr); 1545 ierr = ISLocalToGlobalMappingDestroy(<ogMigration);CHKERRQ(ierr); 1546 ierr = PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1547 PetscFunctionReturn(0); 1548 } 1549 1550 PETSC_INTERN PetscErrorCode DMPlexPartitionLabelClosure_Private(DM,DMLabel,PetscInt,PetscInt,const PetscInt[],IS*); 1551 1552 /*@C 1553 DMPlexDistribute - Distributes the mesh and any associated sections. 1554 1555 Collective on DM 1556 1557 Input Parameter: 1558 + dm - The original DMPlex object 1559 - overlap - The overlap of partitions, 0 is the default 1560 1561 Output Parameter: 1562 + sf - The PetscSF used for point distribution, or NULL if not needed 1563 - dmParallel - The distributed DMPlex object 1564 1565 Note: If the mesh was not distributed, the output dmParallel will be NULL. 1566 1567 The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function 1568 representation on the mesh. 1569 1570 Level: intermediate 1571 1572 .keywords: mesh, elements 1573 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMSetAdjacency() 1574 @*/ 1575 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel) 1576 { 1577 MPI_Comm comm; 1578 PetscPartitioner partitioner; 1579 IS cellPart; 1580 PetscSection cellPartSection; 1581 DM dmCoord; 1582 DMLabel lblPartition, lblMigration; 1583 PetscSF sfMigration, sfStratified, sfPoint; 1584 PetscBool flg, balance; 1585 PetscMPIInt rank, size; 1586 PetscErrorCode ierr; 1587 1588 PetscFunctionBegin; 1589 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1590 PetscValidLogicalCollectiveInt(dm, overlap, 2); 1591 if (sf) PetscValidPointer(sf,3); 1592 PetscValidPointer(dmParallel,4); 1593 1594 if (sf) *sf = NULL; 1595 *dmParallel = NULL; 1596 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1597 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1598 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1599 if (size == 1) PetscFunctionReturn(0); 1600 1601 ierr = PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1602 /* Create cell partition */ 1603 ierr = PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1604 ierr = PetscSectionCreate(comm, &cellPartSection);CHKERRQ(ierr); 1605 ierr = DMPlexGetPartitioner(dm, &partitioner);CHKERRQ(ierr); 1606 ierr = PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);CHKERRQ(ierr); 1607 ierr = PetscLogEventBegin(DMPLEX_PartSelf,dm,0,0,0);CHKERRQ(ierr); 1608 { 1609 /* Convert partition to DMLabel */ 1610 IS is; 1611 PetscHSetI ht; 1612 PetscInt pStart, pEnd, proc, npoints, poff = 0, nranks, *iranks; 1613 const PetscInt *points; 1614 1615 ierr = DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);CHKERRQ(ierr); 1616 /* Preallocate strata */ 1617 ierr = PetscHSetICreate(&ht);CHKERRQ(ierr); 1618 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1619 for (proc = pStart; proc < pEnd; proc++) { 1620 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1621 if (npoints) {ierr = PetscHSetIAdd(ht, proc);CHKERRQ(ierr);} 1622 } 1623 ierr = PetscHSetIGetSize(ht, &nranks);CHKERRQ(ierr); 1624 ierr = PetscMalloc1(nranks, &iranks);CHKERRQ(ierr); 1625 ierr = PetscHSetIGetElems(ht, &poff, iranks);CHKERRQ(ierr); 1626 ierr = PetscHSetIDestroy(&ht);CHKERRQ(ierr); 1627 ierr = DMLabelAddStrata(lblPartition, nranks, iranks);CHKERRQ(ierr); 1628 ierr = PetscFree(iranks);CHKERRQ(ierr); 1629 /* Inline DMPlexPartitionLabelClosure() */ 1630 ierr = ISGetIndices(cellPart, &points);CHKERRQ(ierr); 1631 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1632 for (proc = pStart; proc < pEnd; proc++) { 1633 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1634 if (!npoints) continue; 1635 ierr = PetscSectionGetOffset(cellPartSection, proc, &poff);CHKERRQ(ierr); 1636 ierr = DMPlexPartitionLabelClosure_Private(dm, lblPartition, proc, npoints, points+poff, &is);CHKERRQ(ierr); 1637 ierr = DMLabelSetStratumIS(lblPartition, proc, is);CHKERRQ(ierr); 1638 ierr = ISDestroy(&is);CHKERRQ(ierr); 1639 } 1640 ierr = ISRestoreIndices(cellPart, &points);CHKERRQ(ierr); 1641 } 1642 ierr = PetscLogEventEnd(DMPLEX_PartSelf,dm,0,0,0);CHKERRQ(ierr); 1643 ierr = DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);CHKERRQ(ierr); 1644 ierr = DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);CHKERRQ(ierr); 1645 ierr = DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);CHKERRQ(ierr); 1646 /* Stratify the SF in case we are migrating an already parallel plex */ 1647 ierr = DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);CHKERRQ(ierr); 1648 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1649 sfMigration = sfStratified; 1650 ierr = PetscSFSetUp(sfMigration);CHKERRQ(ierr); 1651 ierr = PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1652 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1653 if (flg) { 1654 ierr = DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1655 ierr = PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1656 } 1657 1658 /* Create non-overlapping parallel DM and migrate internal data */ 1659 ierr = DMPlexCreate(comm, dmParallel);CHKERRQ(ierr); 1660 ierr = PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");CHKERRQ(ierr); 1661 ierr = DMPlexMigrate(dm, sfMigration, *dmParallel);CHKERRQ(ierr); 1662 1663 /* Build the point SF without overlap */ 1664 ierr = DMPlexGetPartitionBalance(dm, &balance);CHKERRQ(ierr); 1665 ierr = DMPlexSetPartitionBalance(*dmParallel, balance);CHKERRQ(ierr); 1666 ierr = DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);CHKERRQ(ierr); 1667 ierr = DMSetPointSF(*dmParallel, sfPoint);CHKERRQ(ierr); 1668 ierr = DMGetCoordinateDM(*dmParallel, &dmCoord);CHKERRQ(ierr); 1669 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1670 if (flg) {ierr = PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);} 1671 1672 if (overlap > 0) { 1673 DM dmOverlap; 1674 PetscInt nroots, nleaves; 1675 PetscSFNode *newRemote; 1676 const PetscSFNode *oldRemote; 1677 PetscSF sfOverlap, sfOverlapPoint; 1678 /* Add the partition overlap to the distributed DM */ 1679 ierr = DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);CHKERRQ(ierr); 1680 ierr = DMDestroy(dmParallel);CHKERRQ(ierr); 1681 *dmParallel = dmOverlap; 1682 if (flg) { 1683 ierr = PetscPrintf(comm, "Overlap Migration SF:\n");CHKERRQ(ierr); 1684 ierr = PetscSFView(sfOverlap, NULL);CHKERRQ(ierr); 1685 } 1686 1687 /* Re-map the migration SF to establish the full migration pattern */ 1688 ierr = PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);CHKERRQ(ierr); 1689 ierr = PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1690 ierr = PetscMalloc1(nleaves, &newRemote);CHKERRQ(ierr); 1691 ierr = PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1692 ierr = PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1693 ierr = PetscSFCreate(comm, &sfOverlapPoint);CHKERRQ(ierr); 1694 ierr = PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1695 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1696 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1697 sfMigration = sfOverlapPoint; 1698 } 1699 /* Cleanup Partition */ 1700 ierr = DMLabelDestroy(&lblPartition);CHKERRQ(ierr); 1701 ierr = DMLabelDestroy(&lblMigration);CHKERRQ(ierr); 1702 ierr = PetscSectionDestroy(&cellPartSection);CHKERRQ(ierr); 1703 ierr = ISDestroy(&cellPart);CHKERRQ(ierr); 1704 /* Copy BC */ 1705 ierr = DMCopyBoundary(dm, *dmParallel);CHKERRQ(ierr); 1706 /* Create sfNatural */ 1707 if (dm->useNatural) { 1708 PetscSection section; 1709 1710 ierr = DMGetSection(dm, §ion);CHKERRQ(ierr); 1711 ierr = DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);CHKERRQ(ierr); 1712 ierr = DMSetUseNatural(*dmParallel, PETSC_TRUE);CHKERRQ(ierr); 1713 } 1714 /* Cleanup */ 1715 if (sf) {*sf = sfMigration;} 1716 else {ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr);} 1717 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1718 ierr = PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1719 PetscFunctionReturn(0); 1720 } 1721 1722 /*@C 1723 DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM. 1724 1725 Collective on DM 1726 1727 Input Parameter: 1728 + dm - The non-overlapping distrbuted DMPlex object 1729 - overlap - The overlap of partitions 1730 1731 Output Parameter: 1732 + sf - The PetscSF used for point distribution 1733 - dmOverlap - The overlapping distributed DMPlex object, or NULL 1734 1735 Note: If the mesh was not distributed, the return value is NULL. 1736 1737 The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function 1738 representation on the mesh. 1739 1740 Level: intermediate 1741 1742 .keywords: mesh, elements 1743 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMSetAdjacency() 1744 @*/ 1745 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap) 1746 { 1747 MPI_Comm comm; 1748 PetscMPIInt size, rank; 1749 PetscSection rootSection, leafSection; 1750 IS rootrank, leafrank; 1751 DM dmCoord; 1752 DMLabel lblOverlap; 1753 PetscSF sfOverlap, sfStratified, sfPoint; 1754 PetscErrorCode ierr; 1755 1756 PetscFunctionBegin; 1757 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1758 if (sf) PetscValidPointer(sf, 3); 1759 PetscValidPointer(dmOverlap, 4); 1760 1761 if (sf) *sf = NULL; 1762 *dmOverlap = NULL; 1763 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1764 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1765 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1766 if (size == 1) PetscFunctionReturn(0); 1767 1768 ierr = PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1769 /* Compute point overlap with neighbouring processes on the distributed DM */ 1770 ierr = PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1771 ierr = PetscSectionCreate(comm, &rootSection);CHKERRQ(ierr); 1772 ierr = PetscSectionCreate(comm, &leafSection);CHKERRQ(ierr); 1773 ierr = DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);CHKERRQ(ierr); 1774 ierr = DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);CHKERRQ(ierr); 1775 /* Convert overlap label to stratified migration SF */ 1776 ierr = DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);CHKERRQ(ierr); 1777 ierr = DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);CHKERRQ(ierr); 1778 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1779 sfOverlap = sfStratified; 1780 ierr = PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");CHKERRQ(ierr); 1781 ierr = PetscSFSetFromOptions(sfOverlap);CHKERRQ(ierr); 1782 1783 ierr = PetscSectionDestroy(&rootSection);CHKERRQ(ierr); 1784 ierr = PetscSectionDestroy(&leafSection);CHKERRQ(ierr); 1785 ierr = ISDestroy(&rootrank);CHKERRQ(ierr); 1786 ierr = ISDestroy(&leafrank);CHKERRQ(ierr); 1787 ierr = PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1788 1789 /* Build the overlapping DM */ 1790 ierr = DMPlexCreate(comm, dmOverlap);CHKERRQ(ierr); 1791 ierr = PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");CHKERRQ(ierr); 1792 ierr = DMPlexMigrate(dm, sfOverlap, *dmOverlap);CHKERRQ(ierr); 1793 /* Build the new point SF */ 1794 ierr = DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1795 ierr = DMSetPointSF(*dmOverlap, sfPoint);CHKERRQ(ierr); 1796 ierr = DMGetCoordinateDM(*dmOverlap, &dmCoord);CHKERRQ(ierr); 1797 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1798 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1799 /* Cleanup overlap partition */ 1800 ierr = DMLabelDestroy(&lblOverlap);CHKERRQ(ierr); 1801 if (sf) *sf = sfOverlap; 1802 else {ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr);} 1803 ierr = PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1804 PetscFunctionReturn(0); 1805 } 1806 1807 /*@C 1808 DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the 1809 root process of the original's communicator. 1810 1811 Collective on DM 1812 1813 Input Parameters: 1814 . dm - the original DMPlex object 1815 1816 Output Parameters: 1817 + sf - the PetscSF used for point distribution (optional) 1818 - gatherMesh - the gathered DM object, or NULL 1819 1820 Level: intermediate 1821 1822 .keywords: mesh 1823 .seealso: DMPlexDistribute(), DMPlexGetRedundantDM() 1824 @*/ 1825 PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh) 1826 { 1827 MPI_Comm comm; 1828 PetscMPIInt size; 1829 PetscPartitioner oldPart, gatherPart; 1830 PetscErrorCode ierr; 1831 1832 PetscFunctionBegin; 1833 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 1834 PetscValidPointer(gatherMesh,2); 1835 *gatherMesh = NULL; 1836 if (sf) *sf = NULL; 1837 comm = PetscObjectComm((PetscObject)dm); 1838 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1839 if (size == 1) PetscFunctionReturn(0); 1840 ierr = DMPlexGetPartitioner(dm,&oldPart);CHKERRQ(ierr); 1841 ierr = PetscObjectReference((PetscObject)oldPart);CHKERRQ(ierr); 1842 ierr = PetscPartitionerCreate(comm,&gatherPart);CHKERRQ(ierr); 1843 ierr = PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);CHKERRQ(ierr); 1844 ierr = DMPlexSetPartitioner(dm,gatherPart);CHKERRQ(ierr); 1845 ierr = DMPlexDistribute(dm,0,sf,gatherMesh);CHKERRQ(ierr); 1846 1847 ierr = DMPlexSetPartitioner(dm,oldPart);CHKERRQ(ierr); 1848 ierr = PetscPartitionerDestroy(&gatherPart);CHKERRQ(ierr); 1849 ierr = PetscPartitionerDestroy(&oldPart);CHKERRQ(ierr); 1850 PetscFunctionReturn(0); 1851 } 1852 1853 /*@C 1854 DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process. 1855 1856 Collective on DM 1857 1858 Input Parameters: 1859 . dm - the original DMPlex object 1860 1861 Output Parameters: 1862 + sf - the PetscSF used for point distribution (optional) 1863 - redundantMesh - the redundant DM object, or NULL 1864 1865 Level: intermediate 1866 1867 .keywords: mesh 1868 .seealso: DMPlexDistribute(), DMPlexGetGatherDM() 1869 @*/ 1870 PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh) 1871 { 1872 MPI_Comm comm; 1873 PetscMPIInt size, rank; 1874 PetscInt pStart, pEnd, p; 1875 PetscInt numPoints = -1; 1876 PetscSF migrationSF, sfPoint, gatherSF; 1877 DM gatherDM, dmCoord; 1878 PetscSFNode *points; 1879 PetscErrorCode ierr; 1880 1881 PetscFunctionBegin; 1882 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 1883 PetscValidPointer(redundantMesh,2); 1884 *redundantMesh = NULL; 1885 comm = PetscObjectComm((PetscObject)dm); 1886 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1887 if (size == 1) { 1888 ierr = PetscObjectReference((PetscObject) dm);CHKERRQ(ierr); 1889 *redundantMesh = dm; 1890 if (sf) *sf = NULL; 1891 PetscFunctionReturn(0); 1892 } 1893 ierr = DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);CHKERRQ(ierr); 1894 if (!gatherDM) PetscFunctionReturn(0); 1895 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1896 ierr = DMPlexGetChart(gatherDM,&pStart,&pEnd);CHKERRQ(ierr); 1897 numPoints = pEnd - pStart; 1898 ierr = MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);CHKERRQ(ierr); 1899 ierr = PetscMalloc1(numPoints,&points);CHKERRQ(ierr); 1900 ierr = PetscSFCreate(comm,&migrationSF);CHKERRQ(ierr); 1901 for (p = 0; p < numPoints; p++) { 1902 points[p].index = p; 1903 points[p].rank = 0; 1904 } 1905 ierr = PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);CHKERRQ(ierr); 1906 ierr = DMPlexCreate(comm, redundantMesh);CHKERRQ(ierr); 1907 ierr = PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");CHKERRQ(ierr); 1908 ierr = DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);CHKERRQ(ierr); 1909 ierr = DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1910 ierr = DMSetPointSF(*redundantMesh, sfPoint);CHKERRQ(ierr); 1911 ierr = DMGetCoordinateDM(*redundantMesh, &dmCoord);CHKERRQ(ierr); 1912 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1913 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1914 if (sf) { 1915 PetscSF tsf; 1916 1917 ierr = PetscSFCompose(gatherSF,migrationSF,&tsf);CHKERRQ(ierr); 1918 ierr = DMPlexStratifyMigrationSF(dm, tsf, sf);CHKERRQ(ierr); 1919 ierr = PetscSFDestroy(&tsf);CHKERRQ(ierr); 1920 } 1921 ierr = PetscSFDestroy(&migrationSF);CHKERRQ(ierr); 1922 ierr = PetscSFDestroy(&gatherSF);CHKERRQ(ierr); 1923 ierr = DMDestroy(&gatherDM);CHKERRQ(ierr); 1924 PetscFunctionReturn(0); 1925 } 1926