1 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 2 #include <petsc/private/dmlabelimpl.h> /*I "petscdmlabel.h" I*/ 3 4 #undef __FUNCT__ 5 #define __FUNCT__ "DMPlexSetAdjacencyUseCone" 6 /*@ 7 DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first 8 9 Input Parameters: 10 + dm - The DM object 11 - useCone - Flag to use the cone first 12 13 Level: intermediate 14 15 Notes: 16 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 17 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 18 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 19 20 .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator() 21 @*/ 22 PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone) 23 { 24 DM_Plex *mesh = (DM_Plex *) dm->data; 25 26 PetscFunctionBegin; 27 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 28 mesh->useCone = useCone; 29 PetscFunctionReturn(0); 30 } 31 32 #undef __FUNCT__ 33 #define __FUNCT__ "DMPlexGetAdjacencyUseCone" 34 /*@ 35 DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first 36 37 Input Parameter: 38 . dm - The DM object 39 40 Output Parameter: 41 . useCone - Flag to use the cone first 42 43 Level: intermediate 44 45 Notes: 46 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 47 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 48 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 49 50 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator() 51 @*/ 52 PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone) 53 { 54 DM_Plex *mesh = (DM_Plex *) dm->data; 55 56 PetscFunctionBegin; 57 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 58 PetscValidIntPointer(useCone, 2); 59 *useCone = mesh->useCone; 60 PetscFunctionReturn(0); 61 } 62 63 #undef __FUNCT__ 64 #define __FUNCT__ "DMPlexSetAdjacencyUseClosure" 65 /*@ 66 DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure 67 68 Input Parameters: 69 + dm - The DM object 70 - useClosure - Flag to use the closure 71 72 Level: intermediate 73 74 Notes: 75 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 76 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 77 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 78 79 .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator() 80 @*/ 81 PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure) 82 { 83 DM_Plex *mesh = (DM_Plex *) dm->data; 84 85 PetscFunctionBegin; 86 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 87 mesh->useClosure = useClosure; 88 PetscFunctionReturn(0); 89 } 90 91 #undef __FUNCT__ 92 #define __FUNCT__ "DMPlexGetAdjacencyUseClosure" 93 /*@ 94 DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure 95 96 Input Parameter: 97 . dm - The DM object 98 99 Output Parameter: 100 . useClosure - Flag to use the closure 101 102 Level: intermediate 103 104 Notes: 105 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 106 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 107 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 108 109 .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator() 110 @*/ 111 PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure) 112 { 113 DM_Plex *mesh = (DM_Plex *) dm->data; 114 115 PetscFunctionBegin; 116 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 117 PetscValidIntPointer(useClosure, 2); 118 *useClosure = mesh->useClosure; 119 PetscFunctionReturn(0); 120 } 121 122 #undef __FUNCT__ 123 #define __FUNCT__ "DMPlexSetAdjacencyUseAnchors" 124 /*@ 125 DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints. 126 127 Input Parameters: 128 + dm - The DM object 129 - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 130 131 Level: intermediate 132 133 .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 134 @*/ 135 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors) 136 { 137 DM_Plex *mesh = (DM_Plex *) dm->data; 138 139 PetscFunctionBegin; 140 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 141 mesh->useAnchors = useAnchors; 142 PetscFunctionReturn(0); 143 } 144 145 #undef __FUNCT__ 146 #define __FUNCT__ "DMPlexGetAdjacencyUseAnchors" 147 /*@ 148 DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints. 149 150 Input Parameter: 151 . dm - The DM object 152 153 Output Parameter: 154 . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 155 156 Level: intermediate 157 158 .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 159 @*/ 160 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors) 161 { 162 DM_Plex *mesh = (DM_Plex *) dm->data; 163 164 PetscFunctionBegin; 165 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 166 PetscValidIntPointer(useAnchors, 2); 167 *useAnchors = mesh->useAnchors; 168 PetscFunctionReturn(0); 169 } 170 171 #undef __FUNCT__ 172 #define __FUNCT__ "DMPlexGetAdjacency_Cone_Internal" 173 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 174 { 175 const PetscInt *cone = NULL; 176 PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c; 177 PetscErrorCode ierr; 178 179 PetscFunctionBeginHot; 180 ierr = DMPlexGetConeSize(dm, p, &coneSize);CHKERRQ(ierr); 181 ierr = DMPlexGetCone(dm, p, &cone);CHKERRQ(ierr); 182 for (c = 0; c <= coneSize; ++c) { 183 const PetscInt point = !c ? p : cone[c-1]; 184 const PetscInt *support = NULL; 185 PetscInt supportSize, s, q; 186 187 ierr = DMPlexGetSupportSize(dm, point, &supportSize);CHKERRQ(ierr); 188 ierr = DMPlexGetSupport(dm, point, &support);CHKERRQ(ierr); 189 for (s = 0; s < supportSize; ++s) { 190 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) { 191 if (support[s] == adj[q]) break; 192 } 193 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 194 } 195 } 196 *adjSize = numAdj; 197 PetscFunctionReturn(0); 198 } 199 200 #undef __FUNCT__ 201 #define __FUNCT__ "DMPlexGetAdjacency_Support_Internal" 202 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 203 { 204 const PetscInt *support = NULL; 205 PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s; 206 PetscErrorCode ierr; 207 208 PetscFunctionBeginHot; 209 ierr = DMPlexGetSupportSize(dm, p, &supportSize);CHKERRQ(ierr); 210 ierr = DMPlexGetSupport(dm, p, &support);CHKERRQ(ierr); 211 for (s = 0; s <= supportSize; ++s) { 212 const PetscInt point = !s ? p : support[s-1]; 213 const PetscInt *cone = NULL; 214 PetscInt coneSize, c, q; 215 216 ierr = DMPlexGetConeSize(dm, point, &coneSize);CHKERRQ(ierr); 217 ierr = DMPlexGetCone(dm, point, &cone);CHKERRQ(ierr); 218 for (c = 0; c < coneSize; ++c) { 219 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) { 220 if (cone[c] == adj[q]) break; 221 } 222 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 223 } 224 } 225 *adjSize = numAdj; 226 PetscFunctionReturn(0); 227 } 228 229 #undef __FUNCT__ 230 #define __FUNCT__ "DMPlexGetAdjacency_Transitive_Internal" 231 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[]) 232 { 233 PetscInt *star = NULL; 234 PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s; 235 PetscErrorCode ierr; 236 237 PetscFunctionBeginHot; 238 ierr = DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 239 for (s = 0; s < starSize*2; s += 2) { 240 const PetscInt *closure = NULL; 241 PetscInt closureSize, c, q; 242 243 ierr = DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 244 for (c = 0; c < closureSize*2; c += 2) { 245 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) { 246 if (closure[c] == adj[q]) break; 247 } 248 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 249 } 250 ierr = DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 251 } 252 ierr = DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 253 *adjSize = numAdj; 254 PetscFunctionReturn(0); 255 } 256 257 #undef __FUNCT__ 258 #define __FUNCT__ "DMPlexGetAdjacency_Internal" 259 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[]) 260 { 261 static PetscInt asiz = 0; 262 PetscInt maxAnchors = 1; 263 PetscInt aStart = -1, aEnd = -1; 264 PetscInt maxAdjSize; 265 PetscSection aSec = NULL; 266 IS aIS = NULL; 267 const PetscInt *anchors; 268 PetscErrorCode ierr; 269 270 PetscFunctionBeginHot; 271 if (useAnchors) { 272 ierr = DMPlexGetAnchors(dm,&aSec,&aIS);CHKERRQ(ierr); 273 if (aSec) { 274 ierr = PetscSectionGetMaxDof(aSec,&maxAnchors);CHKERRQ(ierr); 275 maxAnchors = PetscMax(1,maxAnchors); 276 ierr = PetscSectionGetChart(aSec,&aStart,&aEnd);CHKERRQ(ierr); 277 ierr = ISGetIndices(aIS,&anchors);CHKERRQ(ierr); 278 } 279 } 280 if (!*adj) { 281 PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd; 282 283 ierr = DMPlexGetChart(dm, &pStart,&pEnd);CHKERRQ(ierr); 284 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 285 ierr = DMPlexGetMaxSizes(dm, &maxC, &maxS);CHKERRQ(ierr); 286 coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1; 287 supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1; 288 asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries); 289 asiz *= maxAnchors; 290 asiz = PetscMin(asiz,pEnd-pStart); 291 ierr = PetscMalloc1(asiz,adj);CHKERRQ(ierr); 292 } 293 if (*adjSize < 0) *adjSize = asiz; 294 maxAdjSize = *adjSize; 295 if (useTransitiveClosure) { 296 ierr = DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);CHKERRQ(ierr); 297 } else if (useCone) { 298 ierr = DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 299 } else { 300 ierr = DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 301 } 302 if (useAnchors && aSec) { 303 PetscInt origSize = *adjSize; 304 PetscInt numAdj = origSize; 305 PetscInt i = 0, j; 306 PetscInt *orig = *adj; 307 308 while (i < origSize) { 309 PetscInt p = orig[i]; 310 PetscInt aDof = 0; 311 312 if (p >= aStart && p < aEnd) { 313 ierr = PetscSectionGetDof(aSec,p,&aDof);CHKERRQ(ierr); 314 } 315 if (aDof) { 316 PetscInt aOff; 317 PetscInt s, q; 318 319 for (j = i + 1; j < numAdj; j++) { 320 orig[j - 1] = orig[j]; 321 } 322 origSize--; 323 numAdj--; 324 ierr = PetscSectionGetOffset(aSec,p,&aOff);CHKERRQ(ierr); 325 for (s = 0; s < aDof; ++s) { 326 for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) { 327 if (anchors[aOff+s] == orig[q]) break; 328 } 329 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 330 } 331 } 332 else { 333 i++; 334 } 335 } 336 *adjSize = numAdj; 337 ierr = ISRestoreIndices(aIS,&anchors);CHKERRQ(ierr); 338 } 339 PetscFunctionReturn(0); 340 } 341 342 #undef __FUNCT__ 343 #define __FUNCT__ "DMPlexGetAdjacency" 344 /*@ 345 DMPlexGetAdjacency - Return all points adjacent to the given point 346 347 Input Parameters: 348 + dm - The DM object 349 . p - The point 350 . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE 351 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize 352 353 Output Parameters: 354 + adjSize - The number of adjacent points 355 - adj - The adjacent points 356 357 Level: advanced 358 359 Notes: The user must PetscFree the adj array if it was not passed in. 360 361 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator() 362 @*/ 363 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[]) 364 { 365 DM_Plex *mesh = (DM_Plex *) dm->data; 366 PetscErrorCode ierr; 367 368 PetscFunctionBeginHot; 369 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 370 PetscValidPointer(adjSize,3); 371 PetscValidPointer(adj,4); 372 ierr = DMPlexGetAdjacency_Internal(dm, p, mesh->useCone, mesh->useClosure, mesh->useAnchors, adjSize, adj);CHKERRQ(ierr); 373 PetscFunctionReturn(0); 374 } 375 376 #undef __FUNCT__ 377 #define __FUNCT__ "DMPlexCreateTwoSidedProcessSF" 378 /*@ 379 DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity 380 381 Collective on DM 382 383 Input Parameters: 384 + dm - The DM 385 - sfPoint - The PetscSF which encodes point connectivity 386 387 Output Parameters: 388 + processRanks - A list of process neighbors, or NULL 389 - sfProcess - An SF encoding the two-sided process connectivity, or NULL 390 391 Level: developer 392 393 .seealso: PetscSFCreate(), DMPlexCreateProcessSF() 394 @*/ 395 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess) 396 { 397 const PetscSFNode *remotePoints; 398 PetscInt *localPointsNew; 399 PetscSFNode *remotePointsNew; 400 const PetscInt *nranks; 401 PetscInt *ranksNew; 402 PetscBT neighbors; 403 PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n; 404 PetscMPIInt numProcs, proc, rank; 405 PetscErrorCode ierr; 406 407 PetscFunctionBegin; 408 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 409 PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2); 410 if (processRanks) {PetscValidPointer(processRanks, 3);} 411 if (sfProcess) {PetscValidPointer(sfProcess, 4);} 412 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &numProcs);CHKERRQ(ierr); 413 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 414 ierr = PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);CHKERRQ(ierr); 415 ierr = PetscBTCreate(numProcs, &neighbors);CHKERRQ(ierr); 416 ierr = PetscBTMemzero(numProcs, neighbors);CHKERRQ(ierr); 417 /* Compute root-to-leaf process connectivity */ 418 ierr = PetscSectionGetChart(rootRankSection, &pStart, &pEnd);CHKERRQ(ierr); 419 ierr = ISGetIndices(rootRanks, &nranks);CHKERRQ(ierr); 420 for (p = pStart; p < pEnd; ++p) { 421 PetscInt ndof, noff, n; 422 423 ierr = PetscSectionGetDof(rootRankSection, p, &ndof);CHKERRQ(ierr); 424 ierr = PetscSectionGetOffset(rootRankSection, p, &noff);CHKERRQ(ierr); 425 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 426 } 427 ierr = ISRestoreIndices(rootRanks, &nranks);CHKERRQ(ierr); 428 /* Compute leaf-to-neighbor process connectivity */ 429 ierr = PetscSectionGetChart(leafRankSection, &pStart, &pEnd);CHKERRQ(ierr); 430 ierr = ISGetIndices(leafRanks, &nranks);CHKERRQ(ierr); 431 for (p = pStart; p < pEnd; ++p) { 432 PetscInt ndof, noff, n; 433 434 ierr = PetscSectionGetDof(leafRankSection, p, &ndof);CHKERRQ(ierr); 435 ierr = PetscSectionGetOffset(leafRankSection, p, &noff);CHKERRQ(ierr); 436 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 437 } 438 ierr = ISRestoreIndices(leafRanks, &nranks);CHKERRQ(ierr); 439 /* Compute leaf-to-root process connectivity */ 440 for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);} 441 /* Calculate edges */ 442 PetscBTClear(neighbors, rank); 443 for(proc = 0, numNeighbors = 0; proc < numProcs; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;} 444 ierr = PetscMalloc1(numNeighbors, &ranksNew);CHKERRQ(ierr); 445 ierr = PetscMalloc1(numNeighbors, &localPointsNew);CHKERRQ(ierr); 446 ierr = PetscMalloc1(numNeighbors, &remotePointsNew);CHKERRQ(ierr); 447 for(proc = 0, n = 0; proc < numProcs; ++proc) { 448 if (PetscBTLookup(neighbors, proc)) { 449 ranksNew[n] = proc; 450 localPointsNew[n] = proc; 451 remotePointsNew[n].index = rank; 452 remotePointsNew[n].rank = proc; 453 ++n; 454 } 455 } 456 ierr = PetscBTDestroy(&neighbors);CHKERRQ(ierr); 457 if (processRanks) {ierr = ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);CHKERRQ(ierr);} 458 else {ierr = PetscFree(ranksNew);CHKERRQ(ierr);} 459 if (sfProcess) { 460 ierr = PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);CHKERRQ(ierr); 461 ierr = PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");CHKERRQ(ierr); 462 ierr = PetscSFSetFromOptions(*sfProcess);CHKERRQ(ierr); 463 ierr = PetscSFSetGraph(*sfProcess, numProcs, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);CHKERRQ(ierr); 464 } 465 PetscFunctionReturn(0); 466 } 467 468 #undef __FUNCT__ 469 #define __FUNCT__ "DMPlexDistributeOwnership" 470 /*@ 471 DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF. 472 473 Collective on DM 474 475 Input Parameter: 476 . dm - The DM 477 478 Output Parameters: 479 + rootSection - The number of leaves for a given root point 480 . rootrank - The rank of each edge into the root point 481 . leafSection - The number of processes sharing a given leaf point 482 - leafrank - The rank of each process sharing a leaf point 483 484 Level: developer 485 486 .seealso: DMPlexCreateOverlap() 487 @*/ 488 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank) 489 { 490 MPI_Comm comm; 491 PetscSF sfPoint; 492 const PetscInt *rootdegree; 493 PetscInt *myrank, *remoterank; 494 PetscInt pStart, pEnd, p, nedges; 495 PetscMPIInt rank; 496 PetscErrorCode ierr; 497 498 PetscFunctionBegin; 499 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 500 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 501 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 502 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 503 /* Compute number of leaves for each root */ 504 ierr = PetscObjectSetName((PetscObject) rootSection, "Root Section");CHKERRQ(ierr); 505 ierr = PetscSectionSetChart(rootSection, pStart, pEnd);CHKERRQ(ierr); 506 ierr = PetscSFComputeDegreeBegin(sfPoint, &rootdegree);CHKERRQ(ierr); 507 ierr = PetscSFComputeDegreeEnd(sfPoint, &rootdegree);CHKERRQ(ierr); 508 for (p = pStart; p < pEnd; ++p) {ierr = PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);CHKERRQ(ierr);} 509 ierr = PetscSectionSetUp(rootSection);CHKERRQ(ierr); 510 /* Gather rank of each leaf to root */ 511 ierr = PetscSectionGetStorageSize(rootSection, &nedges);CHKERRQ(ierr); 512 ierr = PetscMalloc1(pEnd-pStart, &myrank);CHKERRQ(ierr); 513 ierr = PetscMalloc1(nedges, &remoterank);CHKERRQ(ierr); 514 for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank; 515 ierr = PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 516 ierr = PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 517 ierr = PetscFree(myrank);CHKERRQ(ierr); 518 ierr = ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);CHKERRQ(ierr); 519 /* Distribute remote ranks to leaves */ 520 ierr = PetscObjectSetName((PetscObject) leafSection, "Leaf Section");CHKERRQ(ierr); 521 ierr = DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);CHKERRQ(ierr); 522 PetscFunctionReturn(0); 523 } 524 525 #undef __FUNCT__ 526 #define __FUNCT__ "DMPlexCreateOverlap" 527 /*@C 528 DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF. 529 530 Collective on DM 531 532 Input Parameters: 533 + dm - The DM 534 . levels - Number of overlap levels 535 . rootSection - The number of leaves for a given root point 536 . rootrank - The rank of each edge into the root point 537 . leafSection - The number of processes sharing a given leaf point 538 - leafrank - The rank of each process sharing a leaf point 539 540 Output Parameters: 541 + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings 542 543 Level: developer 544 545 .seealso: DMPlexDistributeOwnership(), DMPlexDistribute() 546 @*/ 547 PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) 548 { 549 MPI_Comm comm; 550 DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */ 551 PetscSF sfPoint, sfProc; 552 const PetscSFNode *remote; 553 const PetscInt *local; 554 const PetscInt *nrank, *rrank; 555 PetscInt *adj = NULL; 556 PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l; 557 PetscMPIInt rank, numProcs; 558 PetscBool useCone, useClosure, flg; 559 PetscErrorCode ierr; 560 561 PetscFunctionBegin; 562 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 563 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 564 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 565 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 566 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 567 ierr = PetscSectionGetChart(leafSection, &sStart, &sEnd);CHKERRQ(ierr); 568 ierr = PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);CHKERRQ(ierr); 569 ierr = DMLabelCreate("Overlap adjacency", &ovAdjByRank);CHKERRQ(ierr); 570 /* Handle leaves: shared with the root point */ 571 for (l = 0; l < nleaves; ++l) { 572 PetscInt adjSize = PETSC_DETERMINE, a; 573 574 ierr = DMPlexGetAdjacency(dm, local[l], &adjSize, &adj);CHKERRQ(ierr); 575 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);CHKERRQ(ierr);} 576 } 577 ierr = ISGetIndices(rootrank, &rrank);CHKERRQ(ierr); 578 ierr = ISGetIndices(leafrank, &nrank);CHKERRQ(ierr); 579 /* Handle roots */ 580 for (p = pStart; p < pEnd; ++p) { 581 PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a; 582 583 if ((p >= sStart) && (p < sEnd)) { 584 /* Some leaves share a root with other leaves on different processes */ 585 ierr = PetscSectionGetDof(leafSection, p, &neighbors);CHKERRQ(ierr); 586 if (neighbors) { 587 ierr = PetscSectionGetOffset(leafSection, p, &noff);CHKERRQ(ierr); 588 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 589 for (n = 0; n < neighbors; ++n) { 590 const PetscInt remoteRank = nrank[noff+n]; 591 592 if (remoteRank == rank) continue; 593 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 594 } 595 } 596 } 597 /* Roots are shared with leaves */ 598 ierr = PetscSectionGetDof(rootSection, p, &neighbors);CHKERRQ(ierr); 599 if (!neighbors) continue; 600 ierr = PetscSectionGetOffset(rootSection, p, &noff);CHKERRQ(ierr); 601 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 602 for (n = 0; n < neighbors; ++n) { 603 const PetscInt remoteRank = rrank[noff+n]; 604 605 if (remoteRank == rank) continue; 606 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 607 } 608 } 609 ierr = PetscFree(adj);CHKERRQ(ierr); 610 ierr = ISRestoreIndices(rootrank, &rrank);CHKERRQ(ierr); 611 ierr = ISRestoreIndices(leafrank, &nrank);CHKERRQ(ierr); 612 /* Add additional overlap levels */ 613 for (l = 1; l < levels; l++) { 614 /* Propagate point donations over SF to capture remote connections */ 615 ierr = DMPlexPartitionLabelPropagate(dm, ovAdjByRank);CHKERRQ(ierr); 616 /* Add next level of point donations to the label */ 617 ierr = DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);CHKERRQ(ierr); 618 } 619 /* We require the closure in the overlap */ 620 ierr = DMPlexGetAdjacencyUseCone(dm, &useCone);CHKERRQ(ierr); 621 ierr = DMPlexGetAdjacencyUseClosure(dm, &useClosure);CHKERRQ(ierr); 622 if (useCone || !useClosure) { 623 ierr = DMPlexPartitionLabelClosure(dm, ovAdjByRank);CHKERRQ(ierr); 624 } 625 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);CHKERRQ(ierr); 626 if (flg) { 627 ierr = DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 628 } 629 /* Make global process SF and invert sender to receiver label */ 630 { 631 /* Build a global process SF */ 632 PetscSFNode *remoteProc; 633 ierr = PetscMalloc1(numProcs, &remoteProc);CHKERRQ(ierr); 634 for (p = 0; p < numProcs; ++p) { 635 remoteProc[p].rank = p; 636 remoteProc[p].index = rank; 637 } 638 ierr = PetscSFCreate(comm, &sfProc);CHKERRQ(ierr); 639 ierr = PetscObjectSetName((PetscObject) sfProc, "Process SF");CHKERRQ(ierr); 640 ierr = PetscSFSetGraph(sfProc, numProcs, numProcs, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);CHKERRQ(ierr); 641 } 642 ierr = DMLabelCreate("Overlap label", ovLabel);CHKERRQ(ierr); 643 ierr = DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);CHKERRQ(ierr); 644 /* Add owned points, except for shared local points */ 645 for (p = pStart; p < pEnd; ++p) {ierr = DMLabelSetValue(*ovLabel, p, rank);CHKERRQ(ierr);} 646 for (l = 0; l < nleaves; ++l) { 647 ierr = DMLabelClearValue(*ovLabel, local[l], rank);CHKERRQ(ierr); 648 ierr = DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);CHKERRQ(ierr); 649 } 650 /* Clean up */ 651 ierr = DMLabelDestroy(&ovAdjByRank);CHKERRQ(ierr); 652 ierr = PetscSFDestroy(&sfProc);CHKERRQ(ierr); 653 PetscFunctionReturn(0); 654 } 655 656 #undef __FUNCT__ 657 #define __FUNCT__ "DMPlexCreateOverlapMigrationSF" 658 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF) 659 { 660 MPI_Comm comm; 661 PetscMPIInt rank, numProcs; 662 PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints; 663 PetscInt *pointDepths, *remoteDepths, *ilocal; 664 PetscInt *depthRecv, *depthShift, *depthIdx; 665 PetscSFNode *iremote; 666 PetscSF pointSF; 667 const PetscInt *sharedLocal; 668 const PetscSFNode *overlapRemote, *sharedRemote; 669 PetscErrorCode ierr; 670 671 PetscFunctionBegin; 672 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 673 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 674 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 675 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 676 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 677 678 /* Before building the migration SF we need to know the new stratum offsets */ 679 ierr = PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);CHKERRQ(ierr); 680 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 681 for (d=0; d<dim+1; d++) { 682 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 683 for (p=pStart; p<pEnd; p++) pointDepths[p] = d; 684 } 685 for (p=0; p<nleaves; p++) remoteDepths[p] = -1; 686 ierr = PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 687 ierr = PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 688 689 /* Count recevied points in each stratum and compute the internal strata shift */ 690 ierr = PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);CHKERRQ(ierr); 691 for (d=0; d<dim+1; d++) depthRecv[d]=0; 692 for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++; 693 depthShift[dim] = 0; 694 for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim]; 695 for (d=1; d<dim; d++) depthShift[d] += depthRecv[0]; 696 for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1]; 697 for (d=0; d<dim+1; d++) { 698 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 699 depthIdx[d] = pStart + depthShift[d]; 700 } 701 702 /* Form the overlap SF build an SF that describes the full overlap migration SF */ 703 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 704 newLeaves = pEnd - pStart + nleaves; 705 ierr = PetscMalloc1(newLeaves, &ilocal);CHKERRQ(ierr); 706 ierr = PetscMalloc1(newLeaves, &iremote);CHKERRQ(ierr); 707 /* First map local points to themselves */ 708 for (d=0; d<dim+1; d++) { 709 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 710 for (p=pStart; p<pEnd; p++) { 711 point = p + depthShift[d]; 712 ilocal[point] = point; 713 iremote[point].index = p; 714 iremote[point].rank = rank; 715 depthIdx[d]++; 716 } 717 } 718 719 /* Add in the remote roots for currently shared points */ 720 ierr = DMGetPointSF(dm, &pointSF);CHKERRQ(ierr); 721 ierr = PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);CHKERRQ(ierr); 722 for (d=0; d<dim+1; d++) { 723 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 724 for (p=0; p<numSharedPoints; p++) { 725 if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) { 726 point = sharedLocal[p] + depthShift[d]; 727 iremote[point].index = sharedRemote[p].index; 728 iremote[point].rank = sharedRemote[p].rank; 729 } 730 } 731 } 732 733 /* Now add the incoming overlap points */ 734 for (p=0; p<nleaves; p++) { 735 point = depthIdx[remoteDepths[p]]; 736 ilocal[point] = point; 737 iremote[point].index = overlapRemote[p].index; 738 iremote[point].rank = overlapRemote[p].rank; 739 depthIdx[remoteDepths[p]]++; 740 } 741 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 742 743 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 744 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");CHKERRQ(ierr); 745 ierr = PetscSFSetFromOptions(*migrationSF);CHKERRQ(ierr); 746 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 747 ierr = PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);CHKERRQ(ierr); 748 749 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 750 PetscFunctionReturn(0); 751 } 752 753 #undef __FUNCT__ 754 #define __FUNCT__ "DMPlexStratifyMigrationSF" 755 /*@ 756 DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification. 757 758 Input Parameter: 759 + dm - The DM 760 - sf - A star forest with non-ordered leaves, usually defining a DM point migration 761 762 Output Parameter: 763 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified 764 765 Level: developer 766 767 .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap() 768 @*/ 769 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF) 770 { 771 MPI_Comm comm; 772 PetscMPIInt rank, numProcs; 773 PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves; 774 PetscInt *pointDepths, *remoteDepths, *ilocal; 775 PetscInt *depthRecv, *depthShift, *depthIdx; 776 PetscInt hybEnd[4]; 777 const PetscSFNode *iremote; 778 PetscErrorCode ierr; 779 780 PetscFunctionBegin; 781 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 782 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 783 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 784 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 785 ierr = DMPlexGetDepth(dm, &ldepth);CHKERRQ(ierr); 786 ierr = MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 787 if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth); 788 789 /* Before building the migration SF we need to know the new stratum offsets */ 790 ierr = PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);CHKERRQ(ierr); 791 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 792 ierr = DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[depth-1],&hybEnd[1],&hybEnd[0]);CHKERRQ(ierr); 793 for (d = 0; d < depth+1; ++d) { 794 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 795 for (p = pStart; p < pEnd; ++p) { 796 if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */ 797 pointDepths[p] = 2 * d; 798 } else { 799 pointDepths[p] = 2 * d + 1; 800 } 801 } 802 } 803 for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1; 804 ierr = PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 805 ierr = PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 806 /* Count recevied points in each stratum and compute the internal strata shift */ 807 ierr = PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);CHKERRQ(ierr); 808 for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0; 809 for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++; 810 depthShift[2*depth+1] = 0; 811 for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1]; 812 for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth]; 813 depthShift[0] += depthRecv[1]; 814 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1]; 815 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0]; 816 for (d = 2 * depth-1; d > 2; --d) { 817 PetscInt e; 818 819 for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d]; 820 } 821 for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;} 822 /* Derive a new local permutation based on stratified indices */ 823 ierr = PetscMalloc1(nleaves, &ilocal);CHKERRQ(ierr); 824 for (p = 0; p < nleaves; ++p) { 825 const PetscInt dep = remoteDepths[p]; 826 827 ilocal[p] = depthShift[dep] + depthIdx[dep]; 828 depthIdx[dep]++; 829 } 830 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 831 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");CHKERRQ(ierr); 832 ierr = PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);CHKERRQ(ierr); 833 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 834 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 835 PetscFunctionReturn(0); 836 } 837 838 #undef __FUNCT__ 839 #define __FUNCT__ "DMPlexDistributeField" 840 /*@ 841 DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 842 843 Collective on DM 844 845 Input Parameters: 846 + dm - The DMPlex object 847 . pointSF - The PetscSF describing the communication pattern 848 . originalSection - The PetscSection for existing data layout 849 - originalVec - The existing data 850 851 Output Parameters: 852 + newSection - The PetscSF describing the new data layout 853 - newVec - The new data 854 855 Level: developer 856 857 .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData() 858 @*/ 859 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec) 860 { 861 PetscSF fieldSF; 862 PetscInt *remoteOffsets, fieldSize; 863 PetscScalar *originalValues, *newValues; 864 PetscErrorCode ierr; 865 866 PetscFunctionBegin; 867 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 868 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 869 870 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 871 ierr = VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);CHKERRQ(ierr); 872 ierr = VecSetType(newVec,dm->vectype);CHKERRQ(ierr); 873 874 ierr = VecGetArray(originalVec, &originalValues);CHKERRQ(ierr); 875 ierr = VecGetArray(newVec, &newValues);CHKERRQ(ierr); 876 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 877 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 878 ierr = PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 879 ierr = PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 880 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 881 ierr = VecRestoreArray(newVec, &newValues);CHKERRQ(ierr); 882 ierr = VecRestoreArray(originalVec, &originalValues);CHKERRQ(ierr); 883 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 884 PetscFunctionReturn(0); 885 } 886 887 #undef __FUNCT__ 888 #define __FUNCT__ "DMPlexDistributeFieldIS" 889 /*@ 890 DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 891 892 Collective on DM 893 894 Input Parameters: 895 + dm - The DMPlex object 896 . pointSF - The PetscSF describing the communication pattern 897 . originalSection - The PetscSection for existing data layout 898 - originalIS - The existing data 899 900 Output Parameters: 901 + newSection - The PetscSF describing the new data layout 902 - newIS - The new data 903 904 Level: developer 905 906 .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData() 907 @*/ 908 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS) 909 { 910 PetscSF fieldSF; 911 PetscInt *newValues, *remoteOffsets, fieldSize; 912 const PetscInt *originalValues; 913 PetscErrorCode ierr; 914 915 PetscFunctionBegin; 916 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 917 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 918 919 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 920 ierr = PetscMalloc1(fieldSize, &newValues);CHKERRQ(ierr); 921 922 ierr = ISGetIndices(originalIS, &originalValues);CHKERRQ(ierr); 923 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 924 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 925 ierr = PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 926 ierr = PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 927 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 928 ierr = ISRestoreIndices(originalIS, &originalValues);CHKERRQ(ierr); 929 ierr = ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);CHKERRQ(ierr); 930 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 931 PetscFunctionReturn(0); 932 } 933 934 #undef __FUNCT__ 935 #define __FUNCT__ "DMPlexDistributeData" 936 /*@ 937 DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 938 939 Collective on DM 940 941 Input Parameters: 942 + dm - The DMPlex object 943 . pointSF - The PetscSF describing the communication pattern 944 . originalSection - The PetscSection for existing data layout 945 . datatype - The type of data 946 - originalData - The existing data 947 948 Output Parameters: 949 + newSection - The PetscSection describing the new data layout 950 - newData - The new data 951 952 Level: developer 953 954 .seealso: DMPlexDistribute(), DMPlexDistributeField() 955 @*/ 956 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData) 957 { 958 PetscSF fieldSF; 959 PetscInt *remoteOffsets, fieldSize; 960 PetscMPIInt dataSize; 961 PetscErrorCode ierr; 962 963 PetscFunctionBegin; 964 ierr = PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 965 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 966 967 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 968 ierr = MPI_Type_size(datatype, &dataSize);CHKERRQ(ierr); 969 ierr = PetscMalloc(fieldSize * dataSize, newData);CHKERRQ(ierr); 970 971 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 972 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 973 ierr = PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 974 ierr = PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 975 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 976 ierr = PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 977 PetscFunctionReturn(0); 978 } 979 980 #undef __FUNCT__ 981 #define __FUNCT__ "DMPlexDistributeCones" 982 PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 983 { 984 DM_Plex *mesh = (DM_Plex*) dm->data; 985 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 986 MPI_Comm comm; 987 PetscSF coneSF; 988 PetscSection originalConeSection, newConeSection; 989 PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize; 990 PetscBool flg; 991 PetscErrorCode ierr; 992 993 PetscFunctionBegin; 994 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 995 PetscValidPointer(dmParallel,4); 996 ierr = PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 997 998 /* Distribute cone section */ 999 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1000 ierr = DMPlexGetConeSection(dm, &originalConeSection);CHKERRQ(ierr); 1001 ierr = DMPlexGetConeSection(dmParallel, &newConeSection);CHKERRQ(ierr); 1002 ierr = PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);CHKERRQ(ierr); 1003 ierr = DMSetUp(dmParallel);CHKERRQ(ierr); 1004 { 1005 PetscInt pStart, pEnd, p; 1006 1007 ierr = PetscSectionGetChart(newConeSection, &pStart, &pEnd);CHKERRQ(ierr); 1008 for (p = pStart; p < pEnd; ++p) { 1009 PetscInt coneSize; 1010 ierr = PetscSectionGetDof(newConeSection, p, &coneSize);CHKERRQ(ierr); 1011 pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize); 1012 } 1013 } 1014 /* Communicate and renumber cones */ 1015 ierr = PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);CHKERRQ(ierr); 1016 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 1017 ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); 1018 if (original) { 1019 PetscInt numCones; 1020 1021 ierr = PetscSectionGetStorageSize(originalConeSection,&numCones);CHKERRQ(ierr); ierr = PetscMalloc1(numCones,&globCones);CHKERRQ(ierr); 1022 ierr = ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);CHKERRQ(ierr); 1023 } 1024 else { 1025 globCones = cones; 1026 } 1027 ierr = DMPlexGetCones(dmParallel, &newCones);CHKERRQ(ierr); 1028 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 1029 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 1030 if (original) { 1031 ierr = PetscFree(globCones);CHKERRQ(ierr); 1032 } 1033 ierr = PetscSectionGetStorageSize(newConeSection, &newConesSize);CHKERRQ(ierr); 1034 ierr = ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);CHKERRQ(ierr); 1035 #if PETSC_USE_DEBUG 1036 { 1037 PetscInt p; 1038 PetscBool valid = PETSC_TRUE; 1039 for (p = 0; p < newConesSize; ++p) { 1040 if (newCones[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);CHKERRQ(ierr);} 1041 } 1042 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1043 } 1044 #endif 1045 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);CHKERRQ(ierr); 1046 if (flg) { 1047 ierr = PetscPrintf(comm, "Serial Cone Section:\n");CHKERRQ(ierr); 1048 ierr = PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1049 ierr = PetscPrintf(comm, "Parallel Cone Section:\n");CHKERRQ(ierr); 1050 ierr = PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1051 ierr = PetscSFView(coneSF, NULL);CHKERRQ(ierr); 1052 } 1053 ierr = DMPlexGetConeOrientations(dm, &cones);CHKERRQ(ierr); 1054 ierr = DMPlexGetConeOrientations(dmParallel, &newCones);CHKERRQ(ierr); 1055 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 1056 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 1057 ierr = PetscSFDestroy(&coneSF);CHKERRQ(ierr); 1058 ierr = PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 1059 /* Create supports and stratify sieve */ 1060 { 1061 PetscInt pStart, pEnd; 1062 1063 ierr = PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);CHKERRQ(ierr); 1064 ierr = PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);CHKERRQ(ierr); 1065 } 1066 ierr = DMPlexSymmetrize(dmParallel);CHKERRQ(ierr); 1067 ierr = DMPlexStratify(dmParallel);CHKERRQ(ierr); 1068 pmesh->useCone = mesh->useCone; 1069 pmesh->useClosure = mesh->useClosure; 1070 PetscFunctionReturn(0); 1071 } 1072 1073 #undef __FUNCT__ 1074 #define __FUNCT__ "DMPlexDistributeCoordinates" 1075 PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel) 1076 { 1077 MPI_Comm comm; 1078 PetscSection originalCoordSection, newCoordSection; 1079 Vec originalCoordinates, newCoordinates; 1080 PetscInt bs; 1081 const char *name; 1082 const PetscReal *maxCell, *L; 1083 const DMBoundaryType *bd; 1084 PetscErrorCode ierr; 1085 1086 PetscFunctionBegin; 1087 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1088 PetscValidPointer(dmParallel, 3); 1089 1090 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1091 ierr = DMGetCoordinateSection(dm, &originalCoordSection);CHKERRQ(ierr); 1092 ierr = DMGetCoordinateSection(dmParallel, &newCoordSection);CHKERRQ(ierr); 1093 ierr = DMGetCoordinatesLocal(dm, &originalCoordinates);CHKERRQ(ierr); 1094 if (originalCoordinates) { 1095 ierr = VecCreate(PETSC_COMM_SELF, &newCoordinates);CHKERRQ(ierr); 1096 ierr = PetscObjectGetName((PetscObject) originalCoordinates, &name);CHKERRQ(ierr); 1097 ierr = PetscObjectSetName((PetscObject) newCoordinates, name);CHKERRQ(ierr); 1098 1099 ierr = DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);CHKERRQ(ierr); 1100 ierr = DMSetCoordinatesLocal(dmParallel, newCoordinates);CHKERRQ(ierr); 1101 ierr = VecGetBlockSize(originalCoordinates, &bs);CHKERRQ(ierr); 1102 ierr = VecSetBlockSize(newCoordinates, bs);CHKERRQ(ierr); 1103 ierr = VecDestroy(&newCoordinates);CHKERRQ(ierr); 1104 } 1105 ierr = DMGetPeriodicity(dm, &maxCell, &L, &bd);CHKERRQ(ierr); 1106 if (L) {ierr = DMSetPeriodicity(dmParallel, maxCell, L, bd);CHKERRQ(ierr);} 1107 PetscFunctionReturn(0); 1108 } 1109 1110 #undef __FUNCT__ 1111 #define __FUNCT__ "DMPlexDistributeLabels" 1112 /* Here we are assuming that process 0 always has everything */ 1113 PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel) 1114 { 1115 DM_Plex *mesh = (DM_Plex*) dm->data; 1116 MPI_Comm comm; 1117 DMLabel depthLabel; 1118 PetscMPIInt rank; 1119 PetscInt depth, d, numLabels, numLocalLabels, l; 1120 PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth; 1121 PetscObjectState depthState = -1; 1122 PetscErrorCode ierr; 1123 1124 PetscFunctionBegin; 1125 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1126 PetscValidHeaderSpecific(dm, DM_CLASSID, 3); 1127 ierr = PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1128 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1129 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1130 1131 /* If the user has changed the depth label, communicate it instead */ 1132 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1133 ierr = DMPlexGetDepthLabel(dm, &depthLabel);CHKERRQ(ierr); 1134 if (depthLabel) {ierr = DMLabelGetState(depthLabel, &depthState);CHKERRQ(ierr);} 1135 lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE; 1136 ierr = MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);CHKERRQ(ierr); 1137 if (sendDepth) { 1138 ierr = DMRemoveLabel(dmParallel, "depth", &depthLabel);CHKERRQ(ierr); 1139 ierr = DMLabelDestroy(&depthLabel);CHKERRQ(ierr); 1140 } 1141 /* Everyone must have either the same number of labels, or none */ 1142 ierr = DMGetNumLabels(dm, &numLocalLabels);CHKERRQ(ierr); 1143 numLabels = numLocalLabels; 1144 ierr = MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);CHKERRQ(ierr); 1145 if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE; 1146 for (l = numLabels-1; l >= 0; --l) { 1147 DMLabel label = NULL, labelNew = NULL; 1148 PetscBool isdepth; 1149 1150 if (hasLabels) { 1151 ierr = DMGetLabelByNum(dm, l, &label);CHKERRQ(ierr); 1152 /* Skip "depth" because it is recreated */ 1153 ierr = PetscStrcmp(label->name, "depth", &isdepth);CHKERRQ(ierr); 1154 } 1155 ierr = MPI_Bcast(&isdepth, 1, MPIU_BOOL, 0, comm);CHKERRQ(ierr); 1156 if (isdepth && !sendDepth) continue; 1157 ierr = DMLabelDistribute(label, migrationSF, &labelNew);CHKERRQ(ierr); 1158 if (isdepth) { 1159 /* Put in any missing strata which can occur if users are managing the depth label themselves */ 1160 PetscInt gdepth; 1161 1162 ierr = MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 1163 if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth); 1164 for (d = 0; d <= gdepth; ++d) { 1165 PetscBool has; 1166 1167 ierr = DMLabelHasStratum(labelNew, d, &has);CHKERRQ(ierr); 1168 if (!has) ierr = DMLabelAddStratum(labelNew, d);CHKERRQ(ierr); 1169 } 1170 } 1171 ierr = DMAddLabel(dmParallel, labelNew);CHKERRQ(ierr); 1172 } 1173 ierr = PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1174 PetscFunctionReturn(0); 1175 } 1176 1177 #undef __FUNCT__ 1178 #define __FUNCT__ "DMPlexDistributeSetupHybrid" 1179 PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 1180 { 1181 DM_Plex *mesh = (DM_Plex*) dm->data; 1182 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1183 PetscBool *isHybrid, *isHybridParallel; 1184 PetscInt dim, depth, d; 1185 PetscInt pStart, pEnd, pStartP, pEndP; 1186 PetscErrorCode ierr; 1187 1188 PetscFunctionBegin; 1189 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1190 PetscValidPointer(dmParallel, 4); 1191 1192 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1193 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1194 ierr = DMPlexGetChart(dm,&pStart,&pEnd);CHKERRQ(ierr); 1195 ierr = DMPlexGetChart(dmParallel,&pStartP,&pEndP);CHKERRQ(ierr); 1196 ierr = PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);CHKERRQ(ierr); 1197 for (d = 0; d <= depth; d++) { 1198 PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d]; 1199 1200 if (hybridMax >= 0) { 1201 PetscInt sStart, sEnd, p; 1202 1203 ierr = DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);CHKERRQ(ierr); 1204 for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE; 1205 } 1206 } 1207 ierr = PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);CHKERRQ(ierr); 1208 ierr = PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);CHKERRQ(ierr); 1209 for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1; 1210 for (d = 0; d <= depth; d++) { 1211 PetscInt sStart, sEnd, p, dd; 1212 1213 ierr = DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);CHKERRQ(ierr); 1214 dd = (depth == 1 && d == 1) ? dim : d; 1215 for (p = sStart; p < sEnd; p++) { 1216 if (isHybridParallel[p-pStartP]) { 1217 pmesh->hybridPointMax[dd] = p; 1218 break; 1219 } 1220 } 1221 } 1222 ierr = PetscFree2(isHybrid,isHybridParallel);CHKERRQ(ierr); 1223 PetscFunctionReturn(0); 1224 } 1225 1226 #undef __FUNCT__ 1227 #define __FUNCT__ "DMPlexDistributeSetupTree" 1228 PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 1229 { 1230 DM_Plex *mesh = (DM_Plex*) dm->data; 1231 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1232 MPI_Comm comm; 1233 DM refTree; 1234 PetscSection origParentSection, newParentSection; 1235 PetscInt *origParents, *origChildIDs; 1236 PetscBool flg; 1237 PetscErrorCode ierr; 1238 1239 PetscFunctionBegin; 1240 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1241 PetscValidHeaderSpecific(dm, DM_CLASSID, 4); 1242 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1243 1244 /* Set up tree */ 1245 ierr = DMPlexGetReferenceTree(dm,&refTree);CHKERRQ(ierr); 1246 ierr = DMPlexSetReferenceTree(dmParallel,refTree);CHKERRQ(ierr); 1247 ierr = DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);CHKERRQ(ierr); 1248 if (origParentSection) { 1249 PetscInt pStart, pEnd; 1250 PetscInt *newParents, *newChildIDs, *globParents; 1251 PetscInt *remoteOffsetsParents, newParentSize; 1252 PetscSF parentSF; 1253 1254 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1255 ierr = PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);CHKERRQ(ierr); 1256 ierr = PetscSectionSetChart(newParentSection,pStart,pEnd);CHKERRQ(ierr); 1257 ierr = PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);CHKERRQ(ierr); 1258 ierr = PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);CHKERRQ(ierr); 1259 ierr = PetscFree(remoteOffsetsParents);CHKERRQ(ierr); 1260 ierr = PetscSectionGetStorageSize(newParentSection,&newParentSize);CHKERRQ(ierr); 1261 ierr = PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);CHKERRQ(ierr); 1262 if (original) { 1263 PetscInt numParents; 1264 1265 ierr = PetscSectionGetStorageSize(origParentSection,&numParents);CHKERRQ(ierr); 1266 ierr = PetscMalloc1(numParents,&globParents);CHKERRQ(ierr); 1267 ierr = ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);CHKERRQ(ierr); 1268 } 1269 else { 1270 globParents = origParents; 1271 } 1272 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1273 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1274 if (original) { 1275 ierr = PetscFree(globParents);CHKERRQ(ierr); 1276 } 1277 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1278 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1279 ierr = ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);CHKERRQ(ierr); 1280 #if PETSC_USE_DEBUG 1281 { 1282 PetscInt p; 1283 PetscBool valid = PETSC_TRUE; 1284 for (p = 0; p < newParentSize; ++p) { 1285 if (newParents[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);CHKERRQ(ierr);} 1286 } 1287 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1288 } 1289 #endif 1290 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);CHKERRQ(ierr); 1291 if (flg) { 1292 ierr = PetscPrintf(comm, "Serial Parent Section: \n");CHKERRQ(ierr); 1293 ierr = PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1294 ierr = PetscPrintf(comm, "Parallel Parent Section: \n");CHKERRQ(ierr); 1295 ierr = PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1296 ierr = PetscSFView(parentSF, NULL);CHKERRQ(ierr); 1297 } 1298 ierr = DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);CHKERRQ(ierr); 1299 ierr = PetscSectionDestroy(&newParentSection);CHKERRQ(ierr); 1300 ierr = PetscFree2(newParents,newChildIDs);CHKERRQ(ierr); 1301 ierr = PetscSFDestroy(&parentSF);CHKERRQ(ierr); 1302 } 1303 pmesh->useAnchors = mesh->useAnchors; 1304 PetscFunctionReturn(0); 1305 } 1306 1307 #undef __FUNCT__ 1308 #define __FUNCT__ "DMPlexDistributeSF" 1309 PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel) 1310 { 1311 DM_Plex *mesh = (DM_Plex*) dm->data; 1312 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1313 PetscMPIInt rank, numProcs; 1314 MPI_Comm comm; 1315 PetscErrorCode ierr; 1316 1317 PetscFunctionBegin; 1318 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1319 PetscValidPointer(dmParallel,7); 1320 1321 /* Create point SF for parallel mesh */ 1322 ierr = PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1323 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1324 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1325 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 1326 { 1327 const PetscInt *leaves; 1328 PetscSFNode *remotePoints, *rowners, *lowners; 1329 PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints; 1330 PetscInt pStart, pEnd; 1331 1332 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1333 ierr = PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);CHKERRQ(ierr); 1334 ierr = PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);CHKERRQ(ierr); 1335 for (p=0; p<numRoots; p++) { 1336 rowners[p].rank = -1; 1337 rowners[p].index = -1; 1338 } 1339 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1340 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1341 for (p = 0; p < numLeaves; ++p) { 1342 if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */ 1343 lowners[p].rank = rank; 1344 lowners[p].index = leaves ? leaves[p] : p; 1345 } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */ 1346 lowners[p].rank = -2; 1347 lowners[p].index = -2; 1348 } 1349 } 1350 for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */ 1351 rowners[p].rank = -3; 1352 rowners[p].index = -3; 1353 } 1354 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1355 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1356 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1357 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1358 for (p = 0; p < numLeaves; ++p) { 1359 if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed"); 1360 if (lowners[p].rank != rank) ++numGhostPoints; 1361 } 1362 ierr = PetscMalloc1(numGhostPoints, &ghostPoints);CHKERRQ(ierr); 1363 ierr = PetscMalloc1(numGhostPoints, &remotePoints);CHKERRQ(ierr); 1364 for (p = 0, gp = 0; p < numLeaves; ++p) { 1365 if (lowners[p].rank != rank) { 1366 ghostPoints[gp] = leaves ? leaves[p] : p; 1367 remotePoints[gp].rank = lowners[p].rank; 1368 remotePoints[gp].index = lowners[p].index; 1369 ++gp; 1370 } 1371 } 1372 ierr = PetscFree2(rowners,lowners);CHKERRQ(ierr); 1373 ierr = PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);CHKERRQ(ierr); 1374 ierr = PetscSFSetFromOptions((dmParallel)->sf);CHKERRQ(ierr); 1375 } 1376 pmesh->useCone = mesh->useCone; 1377 pmesh->useClosure = mesh->useClosure; 1378 ierr = PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1379 PetscFunctionReturn(0); 1380 } 1381 1382 #undef __FUNCT__ 1383 #define __FUNCT__ "DMPlexCreatePointSF" 1384 /*@C 1385 DMPlexDerivePointSF - Build a point SF from an SF describing a point migration 1386 1387 Input Parameter: 1388 + dm - The source DMPlex object 1389 . migrationSF - The star forest that describes the parallel point remapping 1390 . ownership - Flag causing a vote to determine point ownership 1391 1392 Output Parameter: 1393 - pointSF - The star forest describing the point overlap in the remapped DM 1394 1395 Level: developer 1396 1397 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1398 @*/ 1399 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF) 1400 { 1401 PetscMPIInt rank; 1402 PetscInt p, nroots, nleaves, idx, npointLeaves; 1403 PetscInt *pointLocal; 1404 const PetscInt *leaves; 1405 const PetscSFNode *roots; 1406 PetscSFNode *rootNodes, *leafNodes, *pointRemote; 1407 PetscErrorCode ierr; 1408 1409 PetscFunctionBegin; 1410 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1411 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 1412 1413 ierr = PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);CHKERRQ(ierr); 1414 ierr = PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);CHKERRQ(ierr); 1415 if (ownership) { 1416 /* Point ownership vote: Process with highest rank ownes shared points */ 1417 for (p = 0; p < nleaves; ++p) { 1418 /* Either put in a bid or we know we own it */ 1419 leafNodes[p].rank = rank; 1420 leafNodes[p].index = p; 1421 } 1422 for (p = 0; p < nroots; p++) { 1423 /* Root must not participate in the reduction, flag so that MAXLOC does not use */ 1424 rootNodes[p].rank = -3; 1425 rootNodes[p].index = -3; 1426 } 1427 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1428 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1429 } else { 1430 for (p = 0; p < nroots; p++) { 1431 rootNodes[p].index = -1; 1432 rootNodes[p].rank = rank; 1433 }; 1434 for (p = 0; p < nleaves; p++) { 1435 /* Write new local id into old location */ 1436 if (roots[p].rank == rank) { 1437 rootNodes[roots[p].index].index = leaves ? leaves[p] : p; 1438 } 1439 } 1440 } 1441 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1442 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1443 1444 for (npointLeaves = 0, p = 0; p < nleaves; p++) {if (leafNodes[p].rank != rank) npointLeaves++;} 1445 ierr = PetscMalloc1(npointLeaves, &pointLocal);CHKERRQ(ierr); 1446 ierr = PetscMalloc1(npointLeaves, &pointRemote);CHKERRQ(ierr); 1447 for (idx = 0, p = 0; p < nleaves; p++) { 1448 if (leafNodes[p].rank != rank) { 1449 pointLocal[idx] = p; 1450 pointRemote[idx] = leafNodes[p]; 1451 idx++; 1452 } 1453 } 1454 ierr = PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);CHKERRQ(ierr); 1455 ierr = PetscSFSetFromOptions(*pointSF);CHKERRQ(ierr); 1456 ierr = PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1457 ierr = PetscFree2(rootNodes, leafNodes);CHKERRQ(ierr); 1458 PetscFunctionReturn(0); 1459 } 1460 1461 #undef __FUNCT__ 1462 #define __FUNCT__ "DMPlexMigrate" 1463 /*@C 1464 DMPlexMigrate - Migrates internal DM data over the supplied star forest 1465 1466 Input Parameter: 1467 + dm - The source DMPlex object 1468 . sf - The star forest communication context describing the migration pattern 1469 1470 Output Parameter: 1471 - targetDM - The target DMPlex object 1472 1473 Level: intermediate 1474 1475 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1476 @*/ 1477 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM) 1478 { 1479 MPI_Comm comm; 1480 PetscInt dim, nroots; 1481 PetscSF sfPoint; 1482 ISLocalToGlobalMapping ltogMigration; 1483 ISLocalToGlobalMapping ltogOriginal = NULL; 1484 PetscBool flg; 1485 PetscErrorCode ierr; 1486 1487 PetscFunctionBegin; 1488 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1489 ierr = PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1490 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 1491 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1492 ierr = DMSetDimension(targetDM, dim);CHKERRQ(ierr); 1493 1494 /* Check for a one-to-all distribution pattern */ 1495 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 1496 ierr = PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);CHKERRQ(ierr); 1497 if (nroots >= 0) { 1498 IS isOriginal; 1499 PetscInt n, size, nleaves; 1500 PetscInt *numbering_orig, *numbering_new; 1501 /* Get the original point numbering */ 1502 ierr = DMPlexCreatePointNumbering(dm, &isOriginal);CHKERRQ(ierr); 1503 ierr = ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);CHKERRQ(ierr); 1504 ierr = ISLocalToGlobalMappingGetSize(ltogOriginal, &size);CHKERRQ(ierr); 1505 ierr = ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1506 /* Convert to positive global numbers */ 1507 for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);} 1508 /* Derive the new local-to-global mapping from the old one */ 1509 ierr = PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1510 ierr = PetscMalloc1(nleaves, &numbering_new);CHKERRQ(ierr); 1511 ierr = PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1512 ierr = PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1513 ierr = ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);CHKERRQ(ierr); 1514 ierr = ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1515 ierr = ISDestroy(&isOriginal);CHKERRQ(ierr); 1516 } else { 1517 /* One-to-all distribution pattern: We can derive LToG from SF */ 1518 ierr = ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);CHKERRQ(ierr); 1519 } 1520 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1521 if (flg) { 1522 ierr = PetscPrintf(comm, "Point renumbering for DM migration:\n");CHKERRQ(ierr); 1523 ierr = ISLocalToGlobalMappingView(ltogMigration, NULL);CHKERRQ(ierr); 1524 } 1525 /* Migrate DM data to target DM */ 1526 ierr = DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1527 ierr = DMPlexDistributeCoordinates(dm, sf, targetDM);CHKERRQ(ierr); 1528 ierr = DMPlexDistributeLabels(dm, sf, targetDM);CHKERRQ(ierr); 1529 ierr = DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1530 ierr = DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1531 ierr = ISLocalToGlobalMappingDestroy(<ogOriginal);CHKERRQ(ierr); 1532 ierr = ISLocalToGlobalMappingDestroy(<ogMigration);CHKERRQ(ierr); 1533 ierr = PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1534 PetscFunctionReturn(0); 1535 } 1536 1537 #undef __FUNCT__ 1538 #define __FUNCT__ "DMPlexDistribute" 1539 /*@C 1540 DMPlexDistribute - Distributes the mesh and any associated sections. 1541 1542 Not Collective 1543 1544 Input Parameter: 1545 + dm - The original DMPlex object 1546 - overlap - The overlap of partitions, 0 is the default 1547 1548 Output Parameter: 1549 + sf - The PetscSF used for point distribution 1550 - parallelMesh - The distributed DMPlex object, or NULL 1551 1552 Note: If the mesh was not distributed, the return value is NULL. 1553 1554 The user can control the definition of adjacency for the mesh using DMPlexSetAdjacencyUseCone() and 1555 DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function 1556 representation on the mesh. 1557 1558 Level: intermediate 1559 1560 .keywords: mesh, elements 1561 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure() 1562 @*/ 1563 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel) 1564 { 1565 MPI_Comm comm; 1566 PetscPartitioner partitioner; 1567 IS cellPart; 1568 PetscSection cellPartSection; 1569 DM dmCoord; 1570 DMLabel lblPartition, lblMigration; 1571 PetscSF sfProcess, sfMigration, sfStratified, sfPoint; 1572 PetscBool flg; 1573 PetscMPIInt rank, numProcs, p; 1574 PetscErrorCode ierr; 1575 1576 PetscFunctionBegin; 1577 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1578 if (sf) PetscValidPointer(sf,4); 1579 PetscValidPointer(dmParallel,5); 1580 1581 ierr = PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1582 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1583 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1584 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 1585 1586 *dmParallel = NULL; 1587 if (numProcs == 1) { 1588 ierr = PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1589 PetscFunctionReturn(0); 1590 } 1591 1592 /* Create cell partition */ 1593 ierr = PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1594 ierr = PetscSectionCreate(comm, &cellPartSection);CHKERRQ(ierr); 1595 ierr = DMPlexGetPartitioner(dm, &partitioner);CHKERRQ(ierr); 1596 ierr = PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);CHKERRQ(ierr); 1597 { 1598 /* Convert partition to DMLabel */ 1599 PetscInt proc, pStart, pEnd, npoints, poffset; 1600 const PetscInt *points; 1601 ierr = DMLabelCreate("Point Partition", &lblPartition);CHKERRQ(ierr); 1602 ierr = ISGetIndices(cellPart, &points);CHKERRQ(ierr); 1603 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1604 for (proc = pStart; proc < pEnd; proc++) { 1605 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1606 ierr = PetscSectionGetOffset(cellPartSection, proc, &poffset);CHKERRQ(ierr); 1607 for (p = poffset; p < poffset+npoints; p++) { 1608 ierr = DMLabelSetValue(lblPartition, points[p], proc);CHKERRQ(ierr); 1609 } 1610 } 1611 ierr = ISRestoreIndices(cellPart, &points);CHKERRQ(ierr); 1612 } 1613 ierr = DMPlexPartitionLabelClosure(dm, lblPartition);CHKERRQ(ierr); 1614 { 1615 /* Build a global process SF */ 1616 PetscSFNode *remoteProc; 1617 ierr = PetscMalloc1(numProcs, &remoteProc);CHKERRQ(ierr); 1618 for (p = 0; p < numProcs; ++p) { 1619 remoteProc[p].rank = p; 1620 remoteProc[p].index = rank; 1621 } 1622 ierr = PetscSFCreate(comm, &sfProcess);CHKERRQ(ierr); 1623 ierr = PetscObjectSetName((PetscObject) sfProcess, "Process SF");CHKERRQ(ierr); 1624 ierr = PetscSFSetGraph(sfProcess, numProcs, numProcs, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);CHKERRQ(ierr); 1625 } 1626 ierr = DMLabelCreate("Point migration", &lblMigration);CHKERRQ(ierr); 1627 ierr = DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);CHKERRQ(ierr); 1628 ierr = DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);CHKERRQ(ierr); 1629 /* Stratify the SF in case we are migrating an already parallel plex */ 1630 ierr = DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);CHKERRQ(ierr); 1631 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1632 sfMigration = sfStratified; 1633 ierr = PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1634 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1635 if (flg) { 1636 ierr = DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1637 ierr = PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1638 } 1639 1640 /* Create non-overlapping parallel DM and migrate internal data */ 1641 ierr = DMPlexCreate(comm, dmParallel);CHKERRQ(ierr); 1642 ierr = PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");CHKERRQ(ierr); 1643 ierr = DMPlexMigrate(dm, sfMigration, *dmParallel);CHKERRQ(ierr); 1644 1645 /* Build the point SF without overlap */ 1646 ierr = DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);CHKERRQ(ierr); 1647 ierr = DMSetPointSF(*dmParallel, sfPoint);CHKERRQ(ierr); 1648 ierr = DMGetCoordinateDM(*dmParallel, &dmCoord);CHKERRQ(ierr); 1649 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1650 if (flg) {ierr = PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);} 1651 1652 if (overlap > 0) { 1653 DM dmOverlap; 1654 PetscInt nroots, nleaves; 1655 PetscSFNode *newRemote; 1656 const PetscSFNode *oldRemote; 1657 PetscSF sfOverlap, sfOverlapPoint; 1658 /* Add the partition overlap to the distributed DM */ 1659 ierr = DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);CHKERRQ(ierr); 1660 ierr = DMDestroy(dmParallel);CHKERRQ(ierr); 1661 *dmParallel = dmOverlap; 1662 if (flg) { 1663 ierr = PetscPrintf(comm, "Overlap Migration SF:\n");CHKERRQ(ierr); 1664 ierr = PetscSFView(sfOverlap, NULL);CHKERRQ(ierr); 1665 } 1666 1667 /* Re-map the migration SF to establish the full migration pattern */ 1668 ierr = PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);CHKERRQ(ierr); 1669 ierr = PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1670 ierr = PetscMalloc1(nleaves, &newRemote);CHKERRQ(ierr); 1671 ierr = PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1672 ierr = PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1673 ierr = PetscSFCreate(comm, &sfOverlapPoint);CHKERRQ(ierr); 1674 ierr = PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1675 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1676 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1677 sfMigration = sfOverlapPoint; 1678 } 1679 /* Cleanup Partition */ 1680 ierr = PetscSFDestroy(&sfProcess);CHKERRQ(ierr); 1681 ierr = DMLabelDestroy(&lblPartition);CHKERRQ(ierr); 1682 ierr = DMLabelDestroy(&lblMigration);CHKERRQ(ierr); 1683 ierr = PetscSectionDestroy(&cellPartSection);CHKERRQ(ierr); 1684 ierr = ISDestroy(&cellPart);CHKERRQ(ierr); 1685 /* Copy BC */ 1686 ierr = DMCopyBoundary(dm, *dmParallel);CHKERRQ(ierr); 1687 /* Create sfNatural */ 1688 if (dm->useNatural) { 1689 PetscSection section; 1690 1691 ierr = DMGetDefaultSection(dm, §ion);CHKERRQ(ierr); 1692 ierr = DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);CHKERRQ(ierr); 1693 } 1694 /* Cleanup */ 1695 if (sf) {*sf = sfMigration;} 1696 else {ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr);} 1697 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1698 ierr = PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1699 PetscFunctionReturn(0); 1700 } 1701 1702 #undef __FUNCT__ 1703 #define __FUNCT__ "DMPlexDistributeOverlap" 1704 /*@C 1705 DMPlexDistribute - Add partition overlap to a distributed non-overlapping DM. 1706 1707 Not Collective 1708 1709 Input Parameter: 1710 + dm - The non-overlapping distrbuted DMPlex object 1711 - overlap - The overlap of partitions, 0 is the default 1712 1713 Output Parameter: 1714 + sf - The PetscSF used for point distribution 1715 - dmOverlap - The overlapping distributed DMPlex object, or NULL 1716 1717 Note: If the mesh was not distributed, the return value is NULL. 1718 1719 The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and 1720 DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function 1721 representation on the mesh. 1722 1723 Level: intermediate 1724 1725 .keywords: mesh, elements 1726 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure() 1727 @*/ 1728 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap) 1729 { 1730 MPI_Comm comm; 1731 PetscMPIInt rank; 1732 PetscSection rootSection, leafSection; 1733 IS rootrank, leafrank; 1734 DM dmCoord; 1735 DMLabel lblOverlap; 1736 PetscSF sfOverlap, sfStratified, sfPoint; 1737 PetscErrorCode ierr; 1738 1739 PetscFunctionBegin; 1740 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1741 if (sf) PetscValidPointer(sf, 3); 1742 PetscValidPointer(dmOverlap, 4); 1743 1744 ierr = PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1745 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1746 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1747 1748 /* Compute point overlap with neighbouring processes on the distributed DM */ 1749 ierr = PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1750 ierr = PetscSectionCreate(comm, &rootSection);CHKERRQ(ierr); 1751 ierr = PetscSectionCreate(comm, &leafSection);CHKERRQ(ierr); 1752 ierr = DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);CHKERRQ(ierr); 1753 ierr = DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);CHKERRQ(ierr); 1754 /* Convert overlap label to stratified migration SF */ 1755 ierr = DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);CHKERRQ(ierr); 1756 ierr = DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);CHKERRQ(ierr); 1757 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1758 sfOverlap = sfStratified; 1759 ierr = PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");CHKERRQ(ierr); 1760 ierr = PetscSFSetFromOptions(sfOverlap);CHKERRQ(ierr); 1761 1762 ierr = PetscSectionDestroy(&rootSection);CHKERRQ(ierr); 1763 ierr = PetscSectionDestroy(&leafSection);CHKERRQ(ierr); 1764 ierr = ISDestroy(&rootrank);CHKERRQ(ierr); 1765 ierr = ISDestroy(&leafrank);CHKERRQ(ierr); 1766 ierr = PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1767 1768 /* Build the overlapping DM */ 1769 ierr = DMPlexCreate(comm, dmOverlap);CHKERRQ(ierr); 1770 ierr = PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");CHKERRQ(ierr); 1771 ierr = DMPlexMigrate(dm, sfOverlap, *dmOverlap);CHKERRQ(ierr); 1772 /* Build the new point SF */ 1773 ierr = DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1774 ierr = DMSetPointSF(*dmOverlap, sfPoint);CHKERRQ(ierr); 1775 ierr = DMGetCoordinateDM(*dmOverlap, &dmCoord);CHKERRQ(ierr); 1776 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1777 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1778 /* Cleanup overlap partition */ 1779 ierr = DMLabelDestroy(&lblOverlap);CHKERRQ(ierr); 1780 if (sf) *sf = sfOverlap; 1781 else {ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr);} 1782 ierr = PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1783 PetscFunctionReturn(0); 1784 } 1785 1786 #undef __FUNCT__ 1787 #define __FUNCT__ "DMPlexGetGatherDM" 1788 /*@C 1789 DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the 1790 root process of the original's communicator. 1791 1792 Input Parameters: 1793 . dm - the original DMPlex object 1794 1795 Output Parameters: 1796 . gatherMesh - the gathered DM object, or NULL 1797 1798 Level: intermediate 1799 1800 .keywords: mesh 1801 .seealso: DMPlexDistribute(), DMPlexGetRedundantDM() 1802 @*/ 1803 PetscErrorCode DMPlexGetGatherDM(DM dm, DM * gatherMesh) 1804 { 1805 MPI_Comm comm; 1806 PetscMPIInt size; 1807 PetscPartitioner oldPart, gatherPart; 1808 PetscErrorCode ierr; 1809 1810 PetscFunctionBegin; 1811 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 1812 comm = PetscObjectComm((PetscObject)dm); 1813 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1814 *gatherMesh = NULL; 1815 if (size == 1) PetscFunctionReturn(0); 1816 ierr = DMPlexGetPartitioner(dm,&oldPart);CHKERRQ(ierr); 1817 ierr = PetscObjectReference((PetscObject)oldPart);CHKERRQ(ierr); 1818 ierr = PetscPartitionerCreate(comm,&gatherPart);CHKERRQ(ierr); 1819 ierr = PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);CHKERRQ(ierr); 1820 ierr = DMPlexSetPartitioner(dm,gatherPart);CHKERRQ(ierr); 1821 ierr = DMPlexDistribute(dm,0,NULL,gatherMesh);CHKERRQ(ierr); 1822 ierr = DMPlexSetPartitioner(dm,oldPart);CHKERRQ(ierr); 1823 ierr = PetscPartitionerDestroy(&gatherPart);CHKERRQ(ierr); 1824 ierr = PetscPartitionerDestroy(&oldPart);CHKERRQ(ierr); 1825 PetscFunctionReturn(0); 1826 } 1827 1828 #undef __FUNCT__ 1829 #define __FUNCT__ "DMPlexGetRedundantDM" 1830 /*@C 1831 DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process. 1832 1833 Input Parameters: 1834 . dm - the original DMPlex object 1835 1836 Output Parameters: 1837 . redundantMesh - the redundant DM object, or NULL 1838 1839 Level: intermediate 1840 1841 .keywords: mesh 1842 .seealso: DMPlexDistribute(), DMPlexGetGatherDM() 1843 @*/ 1844 PetscErrorCode DMPlexGetRedundantDM(DM dm, DM * redundantMesh) 1845 { 1846 MPI_Comm comm; 1847 PetscMPIInt size, rank; 1848 PetscInt pStart, pEnd, p; 1849 PetscInt numPoints = -1; 1850 PetscSF migrationSF, sfPoint; 1851 DM gatherDM, dmCoord; 1852 PetscSFNode *points; 1853 PetscErrorCode ierr; 1854 1855 PetscFunctionBegin; 1856 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 1857 comm = PetscObjectComm((PetscObject)dm); 1858 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1859 *redundantMesh = NULL; 1860 if (size == 1) PetscFunctionReturn(0); 1861 ierr = DMPlexGetGatherDM(dm,&gatherDM);CHKERRQ(ierr); 1862 if (!gatherDM) PetscFunctionReturn(0); 1863 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1864 ierr = DMPlexGetChart(gatherDM,&pStart,&pEnd);CHKERRQ(ierr); 1865 numPoints = pEnd - pStart; 1866 ierr = MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);CHKERRQ(ierr); 1867 ierr = PetscMalloc1(numPoints,&points);CHKERRQ(ierr); 1868 ierr = PetscSFCreate(comm,&migrationSF);CHKERRQ(ierr); 1869 for (p = 0; p < numPoints; p++) { 1870 points[p].index = p; 1871 points[p].rank = 0; 1872 } 1873 ierr = PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);CHKERRQ(ierr); 1874 ierr = DMPlexCreate(comm, redundantMesh);CHKERRQ(ierr); 1875 ierr = PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");CHKERRQ(ierr); 1876 ierr = DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);CHKERRQ(ierr); 1877 ierr = DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1878 ierr = DMSetPointSF(*redundantMesh, sfPoint);CHKERRQ(ierr); 1879 ierr = DMGetCoordinateDM(*redundantMesh, &dmCoord);CHKERRQ(ierr); 1880 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1881 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1882 ierr = PetscSFDestroy(&migrationSF);CHKERRQ(ierr); 1883 ierr = DMDestroy(&gatherDM);CHKERRQ(ierr); 1884 PetscFunctionReturn(0); 1885 } 1886