1 #include <petsc-private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 2 3 #undef __FUNCT__ 4 #define __FUNCT__ "DMPlexSetAdjacencyUseCone" 5 /*@ 6 DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first 7 8 Input Parameters: 9 + dm - The DM object 10 - useCone - Flag to use the cone first 11 12 Level: intermediate 13 14 Notes: 15 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 16 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 17 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 18 19 .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator() 20 @*/ 21 PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone) 22 { 23 DM_Plex *mesh = (DM_Plex *) dm->data; 24 25 PetscFunctionBegin; 26 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 27 mesh->useCone = useCone; 28 PetscFunctionReturn(0); 29 } 30 31 #undef __FUNCT__ 32 #define __FUNCT__ "DMPlexGetAdjacencyUseCone" 33 /*@ 34 DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first 35 36 Input Parameter: 37 . dm - The DM object 38 39 Output Parameter: 40 . useCone - Flag to use the cone first 41 42 Level: intermediate 43 44 Notes: 45 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 46 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 47 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 48 49 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator() 50 @*/ 51 PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone) 52 { 53 DM_Plex *mesh = (DM_Plex *) dm->data; 54 55 PetscFunctionBegin; 56 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 57 PetscValidIntPointer(useCone, 2); 58 *useCone = mesh->useCone; 59 PetscFunctionReturn(0); 60 } 61 62 #undef __FUNCT__ 63 #define __FUNCT__ "DMPlexSetAdjacencyUseClosure" 64 /*@ 65 DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure 66 67 Input Parameters: 68 + dm - The DM object 69 - useClosure - Flag to use the closure 70 71 Level: intermediate 72 73 Notes: 74 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 75 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 76 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 77 78 .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator() 79 @*/ 80 PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure) 81 { 82 DM_Plex *mesh = (DM_Plex *) dm->data; 83 84 PetscFunctionBegin; 85 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 86 mesh->useClosure = useClosure; 87 PetscFunctionReturn(0); 88 } 89 90 #undef __FUNCT__ 91 #define __FUNCT__ "DMPlexGetAdjacencyUseClosure" 92 /*@ 93 DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure 94 95 Input Parameter: 96 . dm - The DM object 97 98 Output Parameter: 99 . useClosure - Flag to use the closure 100 101 Level: intermediate 102 103 Notes: 104 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 105 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 106 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 107 108 .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator() 109 @*/ 110 PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure) 111 { 112 DM_Plex *mesh = (DM_Plex *) dm->data; 113 114 PetscFunctionBegin; 115 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 116 PetscValidIntPointer(useClosure, 2); 117 *useClosure = mesh->useClosure; 118 PetscFunctionReturn(0); 119 } 120 121 #undef __FUNCT__ 122 #define __FUNCT__ "DMPlexSetAdjacencyUseAnchors" 123 /*@ 124 DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints. 125 126 Input Parameters: 127 + dm - The DM object 128 - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 129 130 Level: intermediate 131 132 .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 133 @*/ 134 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors) 135 { 136 DM_Plex *mesh = (DM_Plex *) dm->data; 137 138 PetscFunctionBegin; 139 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 140 mesh->useAnchors = useAnchors; 141 PetscFunctionReturn(0); 142 } 143 144 #undef __FUNCT__ 145 #define __FUNCT__ "DMPlexGetAdjacencyUseAnchors" 146 /*@ 147 DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints. 148 149 Input Parameter: 150 . dm - The DM object 151 152 Output Parameter: 153 . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 154 155 Level: intermediate 156 157 .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 158 @*/ 159 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors) 160 { 161 DM_Plex *mesh = (DM_Plex *) dm->data; 162 163 PetscFunctionBegin; 164 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 165 PetscValidIntPointer(useAnchors, 2); 166 *useAnchors = mesh->useAnchors; 167 PetscFunctionReturn(0); 168 } 169 170 #undef __FUNCT__ 171 #define __FUNCT__ "DMPlexGetAdjacency_Cone_Internal" 172 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 173 { 174 const PetscInt *cone = NULL; 175 PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c; 176 PetscErrorCode ierr; 177 178 PetscFunctionBeginHot; 179 ierr = DMPlexGetConeSize(dm, p, &coneSize);CHKERRQ(ierr); 180 ierr = DMPlexGetCone(dm, p, &cone);CHKERRQ(ierr); 181 for (c = 0; c <= coneSize; ++c) { 182 const PetscInt point = !c ? p : cone[c-1]; 183 const PetscInt *support = NULL; 184 PetscInt supportSize, s, q; 185 186 ierr = DMPlexGetSupportSize(dm, point, &supportSize);CHKERRQ(ierr); 187 ierr = DMPlexGetSupport(dm, point, &support);CHKERRQ(ierr); 188 for (s = 0; s < supportSize; ++s) { 189 for (q = 0; q < numAdj || (adj[numAdj++] = support[s],0); ++q) { 190 if (support[s] == adj[q]) break; 191 } 192 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 193 } 194 } 195 *adjSize = numAdj; 196 PetscFunctionReturn(0); 197 } 198 199 #undef __FUNCT__ 200 #define __FUNCT__ "DMPlexGetAdjacency_Support_Internal" 201 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 202 { 203 const PetscInt *support = NULL; 204 PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s; 205 PetscErrorCode ierr; 206 207 PetscFunctionBeginHot; 208 ierr = DMPlexGetSupportSize(dm, p, &supportSize);CHKERRQ(ierr); 209 ierr = DMPlexGetSupport(dm, p, &support);CHKERRQ(ierr); 210 for (s = 0; s <= supportSize; ++s) { 211 const PetscInt point = !s ? p : support[s-1]; 212 const PetscInt *cone = NULL; 213 PetscInt coneSize, c, q; 214 215 ierr = DMPlexGetConeSize(dm, point, &coneSize);CHKERRQ(ierr); 216 ierr = DMPlexGetCone(dm, point, &cone);CHKERRQ(ierr); 217 for (c = 0; c < coneSize; ++c) { 218 for (q = 0; q < numAdj || (adj[numAdj++] = cone[c],0); ++q) { 219 if (cone[c] == adj[q]) break; 220 } 221 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 222 } 223 } 224 *adjSize = numAdj; 225 PetscFunctionReturn(0); 226 } 227 228 #undef __FUNCT__ 229 #define __FUNCT__ "DMPlexGetAdjacency_Transitive_Internal" 230 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[]) 231 { 232 PetscInt *star = NULL; 233 PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s; 234 PetscErrorCode ierr; 235 236 PetscFunctionBeginHot; 237 ierr = DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 238 for (s = 0; s < starSize*2; s += 2) { 239 const PetscInt *closure = NULL; 240 PetscInt closureSize, c, q; 241 242 ierr = DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 243 for (c = 0; c < closureSize*2; c += 2) { 244 for (q = 0; q < numAdj || (adj[numAdj++] = closure[c],0); ++q) { 245 if (closure[c] == adj[q]) break; 246 } 247 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 248 } 249 ierr = DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 250 } 251 ierr = DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 252 *adjSize = numAdj; 253 PetscFunctionReturn(0); 254 } 255 256 #undef __FUNCT__ 257 #define __FUNCT__ "DMPlexGetAdjacency_Internal" 258 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[]) 259 { 260 static PetscInt asiz = 0; 261 PetscInt maxAnchors = 1; 262 PetscInt aStart = -1, aEnd = -1; 263 PetscInt maxAdjSize; 264 PetscSection aSec = NULL; 265 IS aIS = NULL; 266 const PetscInt *anchors; 267 PetscErrorCode ierr; 268 269 PetscFunctionBeginHot; 270 if (useAnchors) { 271 ierr = DMPlexGetAnchors(dm,&aSec,&aIS);CHKERRQ(ierr); 272 if (aSec) { 273 ierr = PetscSectionGetMaxDof(aSec,&maxAnchors);CHKERRQ(ierr); 274 maxAnchors = PetscMax(1,maxAnchors); 275 ierr = PetscSectionGetChart(aSec,&aStart,&aEnd);CHKERRQ(ierr); 276 ierr = ISGetIndices(aIS,&anchors);CHKERRQ(ierr); 277 } 278 } 279 if (!*adj) { 280 PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd; 281 282 ierr = DMPlexGetChart(dm, &pStart,&pEnd);CHKERRQ(ierr); 283 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 284 ierr = DMPlexGetMaxSizes(dm, &maxC, &maxS);CHKERRQ(ierr); 285 coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1; 286 supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1; 287 asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries); 288 asiz *= maxAnchors; 289 asiz = PetscMin(asiz,pEnd-pStart); 290 ierr = PetscMalloc1(asiz,adj);CHKERRQ(ierr); 291 } 292 if (*adjSize < 0) *adjSize = asiz; 293 maxAdjSize = *adjSize; 294 if (useTransitiveClosure) { 295 ierr = DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);CHKERRQ(ierr); 296 } else if (useCone) { 297 ierr = DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 298 } else { 299 ierr = DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 300 } 301 if (useAnchors && aSec) { 302 PetscInt origSize = *adjSize; 303 PetscInt numAdj = origSize; 304 PetscInt i = 0, j; 305 PetscInt *orig = *adj; 306 307 while (i < origSize) { 308 PetscInt p = orig[i]; 309 PetscInt aDof = 0; 310 311 if (p >= aStart && p < aEnd) { 312 ierr = PetscSectionGetDof(aSec,p,&aDof);CHKERRQ(ierr); 313 } 314 if (aDof) { 315 PetscInt aOff; 316 PetscInt s, q; 317 318 for (j = i + 1; j < numAdj; j++) { 319 orig[j - 1] = orig[j]; 320 } 321 origSize--; 322 numAdj--; 323 ierr = PetscSectionGetOffset(aSec,p,&aOff);CHKERRQ(ierr); 324 for (s = 0; s < aDof; ++s) { 325 for (q = 0; q < numAdj || (orig[numAdj++] = anchors[aOff+s],0); ++q) { 326 if (anchors[aOff+s] == orig[q]) break; 327 } 328 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 329 } 330 } 331 else { 332 i++; 333 } 334 } 335 *adjSize = numAdj; 336 ierr = ISRestoreIndices(aIS,&anchors);CHKERRQ(ierr); 337 } 338 PetscFunctionReturn(0); 339 } 340 341 #undef __FUNCT__ 342 #define __FUNCT__ "DMPlexGetAdjacency" 343 /*@ 344 DMPlexGetAdjacency - Return all points adjacent to the given point 345 346 Input Parameters: 347 + dm - The DM object 348 . p - The point 349 . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE 350 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize 351 352 Output Parameters: 353 + adjSize - The number of adjacent points 354 - adj - The adjacent points 355 356 Level: advanced 357 358 Notes: The user must PetscFree the adj array if it was not passed in. 359 360 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator() 361 @*/ 362 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[]) 363 { 364 DM_Plex *mesh = (DM_Plex *) dm->data; 365 PetscErrorCode ierr; 366 367 PetscFunctionBeginHot; 368 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 369 PetscValidPointer(adjSize,3); 370 PetscValidPointer(adj,4); 371 ierr = DMPlexGetAdjacency_Internal(dm, p, mesh->useCone, mesh->useClosure, mesh->useAnchors, adjSize, adj);CHKERRQ(ierr); 372 PetscFunctionReturn(0); 373 } 374 #undef __FUNCT__ 375 #define __FUNCT__ "DMPlexCreateTwoSidedProcessSF" 376 /*@ 377 DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity 378 379 Collective on DM 380 381 Input Parameters: 382 + dm - The DM 383 - sfPoint - The PetscSF which encodes point connectivity 384 385 Output Parameters: 386 + processRanks - A list of process neighbors, or NULL 387 - sfProcess - An SF encoding the two-sided process connectivity, or NULL 388 389 Level: developer 390 391 .seealso: PetscSFCreate(), DMPlexCreateProcessSF() 392 @*/ 393 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess) 394 { 395 const PetscSFNode *remotePoints; 396 PetscInt *localPointsNew; 397 PetscSFNode *remotePointsNew; 398 const PetscInt *nranks; 399 PetscInt *ranksNew; 400 PetscBT neighbors; 401 PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n; 402 PetscMPIInt numProcs, proc, rank; 403 PetscErrorCode ierr; 404 405 PetscFunctionBegin; 406 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 407 PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2); 408 if (processRanks) {PetscValidPointer(processRanks, 3);} 409 if (sfProcess) {PetscValidPointer(sfProcess, 4);} 410 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &numProcs);CHKERRQ(ierr); 411 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 412 ierr = PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);CHKERRQ(ierr); 413 ierr = PetscBTCreate(numProcs, &neighbors);CHKERRQ(ierr); 414 ierr = PetscBTMemzero(numProcs, neighbors);CHKERRQ(ierr); 415 /* Compute root-to-leaf process connectivity */ 416 ierr = PetscSectionGetChart(rootRankSection, &pStart, &pEnd);CHKERRQ(ierr); 417 ierr = ISGetIndices(rootRanks, &nranks);CHKERRQ(ierr); 418 for (p = pStart; p < pEnd; ++p) { 419 PetscInt ndof, noff, n; 420 421 ierr = PetscSectionGetDof(rootRankSection, p, &ndof);CHKERRQ(ierr); 422 ierr = PetscSectionGetOffset(rootRankSection, p, &noff);CHKERRQ(ierr); 423 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);} 424 } 425 ierr = ISRestoreIndices(rootRanks, &nranks);CHKERRQ(ierr); 426 /* Compute leaf-to-neighbor process connectivity */ 427 ierr = PetscSectionGetChart(leafRankSection, &pStart, &pEnd);CHKERRQ(ierr); 428 ierr = ISGetIndices(leafRanks, &nranks);CHKERRQ(ierr); 429 for (p = pStart; p < pEnd; ++p) { 430 PetscInt ndof, noff, n; 431 432 ierr = PetscSectionGetDof(leafRankSection, p, &ndof);CHKERRQ(ierr); 433 ierr = PetscSectionGetOffset(leafRankSection, p, &noff);CHKERRQ(ierr); 434 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);} 435 } 436 ierr = ISRestoreIndices(leafRanks, &nranks);CHKERRQ(ierr); 437 /* Compute leaf-to-root process connectivity */ 438 for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);} 439 /* Calculate edges */ 440 PetscBTClear(neighbors, rank); 441 for(proc = 0, numNeighbors = 0; proc < numProcs; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;} 442 ierr = PetscMalloc1(numNeighbors, &ranksNew);CHKERRQ(ierr); 443 ierr = PetscMalloc1(numNeighbors, &localPointsNew);CHKERRQ(ierr); 444 ierr = PetscMalloc1(numNeighbors, &remotePointsNew);CHKERRQ(ierr); 445 for(proc = 0, n = 0; proc < numProcs; ++proc) { 446 if (PetscBTLookup(neighbors, proc)) { 447 ranksNew[n] = proc; 448 localPointsNew[n] = proc; 449 remotePointsNew[n].index = rank; 450 remotePointsNew[n].rank = proc; 451 ++n; 452 } 453 } 454 ierr = PetscBTDestroy(&neighbors);CHKERRQ(ierr); 455 if (processRanks) {ierr = ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);CHKERRQ(ierr);} 456 else {ierr = PetscFree(ranksNew);CHKERRQ(ierr);} 457 if (sfProcess) { 458 ierr = PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);CHKERRQ(ierr); 459 ierr = PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");CHKERRQ(ierr); 460 ierr = PetscSFSetFromOptions(*sfProcess);CHKERRQ(ierr); 461 ierr = PetscSFSetGraph(*sfProcess, numProcs, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);CHKERRQ(ierr); 462 } 463 PetscFunctionReturn(0); 464 } 465 466 #undef __FUNCT__ 467 #define __FUNCT__ "DMPlexDistributeOwnership" 468 /*@ 469 DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF. 470 471 Collective on DM 472 473 Input Parameter: 474 . dm - The DM 475 476 Output Parameters: 477 + rootSection - The number of leaves for a given root point 478 . rootrank - The rank of each edge into the root point 479 . leafSection - The number of processes sharing a given leaf point 480 - leafrank - The rank of each process sharing a leaf point 481 482 Level: developer 483 484 .seealso: DMPlexCreateOverlap() 485 @*/ 486 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank) 487 { 488 MPI_Comm comm; 489 PetscSF sfPoint; 490 const PetscInt *rootdegree; 491 PetscInt *myrank, *remoterank; 492 PetscInt pStart, pEnd, p, nedges; 493 PetscMPIInt rank; 494 PetscErrorCode ierr; 495 496 PetscFunctionBegin; 497 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 498 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 499 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 500 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 501 /* Compute number of leaves for each root */ 502 ierr = PetscObjectSetName((PetscObject) rootSection, "Root Section");CHKERRQ(ierr); 503 ierr = PetscSectionSetChart(rootSection, pStart, pEnd);CHKERRQ(ierr); 504 ierr = PetscSFComputeDegreeBegin(sfPoint, &rootdegree);CHKERRQ(ierr); 505 ierr = PetscSFComputeDegreeEnd(sfPoint, &rootdegree);CHKERRQ(ierr); 506 for (p = pStart; p < pEnd; ++p) {ierr = PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);CHKERRQ(ierr);} 507 ierr = PetscSectionSetUp(rootSection);CHKERRQ(ierr); 508 /* Gather rank of each leaf to root */ 509 ierr = PetscSectionGetStorageSize(rootSection, &nedges);CHKERRQ(ierr); 510 ierr = PetscMalloc1(pEnd-pStart, &myrank);CHKERRQ(ierr); 511 ierr = PetscMalloc1(nedges, &remoterank);CHKERRQ(ierr); 512 for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank; 513 ierr = PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 514 ierr = PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 515 ierr = PetscFree(myrank);CHKERRQ(ierr); 516 ierr = ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);CHKERRQ(ierr); 517 /* Distribute remote ranks to leaves */ 518 ierr = PetscObjectSetName((PetscObject) leafSection, "Leaf Section");CHKERRQ(ierr); 519 ierr = DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);CHKERRQ(ierr); 520 PetscFunctionReturn(0); 521 } 522 523 #undef __FUNCT__ 524 #define __FUNCT__ "DMPlexCreateOverlap" 525 /*@C 526 DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF. 527 528 Collective on DM 529 530 Input Parameters: 531 + dm - The DM 532 . levels - Number of overlap levels 533 . rootSection - The number of leaves for a given root point 534 . rootrank - The rank of each edge into the root point 535 . leafSection - The number of processes sharing a given leaf point 536 - leafrank - The rank of each process sharing a leaf point 537 538 Output Parameters: 539 + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings 540 541 Level: developer 542 543 .seealso: DMPlexDistributeOwnership(), DMPlexDistribute() 544 @*/ 545 PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) 546 { 547 MPI_Comm comm; 548 DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */ 549 PetscSF sfPoint, sfProc; 550 const PetscSFNode *remote; 551 const PetscInt *local; 552 const PetscInt *nrank, *rrank; 553 PetscInt *adj = NULL; 554 PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l; 555 PetscMPIInt rank, numProcs; 556 PetscBool useCone, useClosure, flg; 557 PetscErrorCode ierr; 558 559 PetscFunctionBegin; 560 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 561 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 562 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 563 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 564 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 565 ierr = PetscSectionGetChart(leafSection, &sStart, &sEnd);CHKERRQ(ierr); 566 ierr = PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);CHKERRQ(ierr); 567 ierr = DMLabelCreate("Overlap adjacency", &ovAdjByRank);CHKERRQ(ierr); 568 /* Handle leaves: shared with the root point */ 569 for (l = 0; l < nleaves; ++l) { 570 PetscInt adjSize = PETSC_DETERMINE, a; 571 572 ierr = DMPlexGetAdjacency(dm, local[l], &adjSize, &adj);CHKERRQ(ierr); 573 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);CHKERRQ(ierr);} 574 } 575 ierr = ISGetIndices(rootrank, &rrank);CHKERRQ(ierr); 576 ierr = ISGetIndices(leafrank, &nrank);CHKERRQ(ierr); 577 /* Handle roots */ 578 for (p = pStart; p < pEnd; ++p) { 579 PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a; 580 581 if ((p >= sStart) && (p < sEnd)) { 582 /* Some leaves share a root with other leaves on different processes */ 583 ierr = PetscSectionGetDof(leafSection, p, &neighbors);CHKERRQ(ierr); 584 if (neighbors) { 585 ierr = PetscSectionGetOffset(leafSection, p, &noff);CHKERRQ(ierr); 586 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 587 for (n = 0; n < neighbors; ++n) { 588 const PetscInt remoteRank = nrank[noff+n]; 589 590 if (remoteRank == rank) continue; 591 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 592 } 593 } 594 } 595 /* Roots are shared with leaves */ 596 ierr = PetscSectionGetDof(rootSection, p, &neighbors);CHKERRQ(ierr); 597 if (!neighbors) continue; 598 ierr = PetscSectionGetOffset(rootSection, p, &noff);CHKERRQ(ierr); 599 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 600 for (n = 0; n < neighbors; ++n) { 601 const PetscInt remoteRank = rrank[noff+n]; 602 603 if (remoteRank == rank) continue; 604 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 605 } 606 } 607 ierr = PetscFree(adj);CHKERRQ(ierr); 608 ierr = ISRestoreIndices(rootrank, &rrank);CHKERRQ(ierr); 609 ierr = ISRestoreIndices(leafrank, &nrank);CHKERRQ(ierr); 610 /* Add additional overlap levels */ 611 for (l = 1; l < levels; l++) {ierr = DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);CHKERRQ(ierr);} 612 /* We require the closure in the overlap */ 613 ierr = DMPlexGetAdjacencyUseCone(dm, &useCone);CHKERRQ(ierr); 614 ierr = DMPlexGetAdjacencyUseClosure(dm, &useClosure);CHKERRQ(ierr); 615 if (useCone || !useClosure) { 616 ierr = DMPlexPartitionLabelClosure(dm, ovAdjByRank);CHKERRQ(ierr); 617 } 618 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-overlap_view", &flg);CHKERRQ(ierr); 619 if (flg) { 620 ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD, PETSC_TRUE);CHKERRQ(ierr); 621 ierr = DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 622 } 623 /* Make process SF and invert sender to receiver label */ 624 ierr = DMPlexCreateTwoSidedProcessSF(dm, sfPoint, rootSection, rootrank, leafSection, leafrank, NULL, &sfProc);CHKERRQ(ierr); 625 ierr = DMLabelCreate("Overlap label", ovLabel);CHKERRQ(ierr); 626 ierr = DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);CHKERRQ(ierr); 627 /* Add owned points, except for shared local points */ 628 for (p = pStart; p < pEnd; ++p) {ierr = DMLabelSetValue(*ovLabel, p, rank);CHKERRQ(ierr);} 629 for (l = 0; l < nleaves; ++l) { 630 ierr = DMLabelClearValue(*ovLabel, local[l], rank);CHKERRQ(ierr); 631 ierr = DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);CHKERRQ(ierr); 632 } 633 /* Clean up */ 634 ierr = DMLabelDestroy(&ovAdjByRank);CHKERRQ(ierr); 635 ierr = PetscSFDestroy(&sfProc);CHKERRQ(ierr); 636 PetscFunctionReturn(0); 637 } 638 639 #undef __FUNCT__ 640 #define __FUNCT__ "DMPlexCreateOverlapMigrationSF" 641 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF) 642 { 643 MPI_Comm comm; 644 PetscMPIInt rank, numProcs; 645 PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints; 646 PetscInt *pointDepths, *remoteDepths, *ilocal; 647 PetscInt *depthRecv, *depthShift, *depthIdx; 648 PetscSFNode *iremote; 649 PetscSF pointSF; 650 const PetscInt *sharedLocal; 651 const PetscSFNode *overlapRemote, *sharedRemote; 652 PetscErrorCode ierr; 653 654 PetscFunctionBegin; 655 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 656 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 657 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 658 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 659 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 660 661 /* Before building the migration SF we need to know the new stratum offsets */ 662 ierr = PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);CHKERRQ(ierr); 663 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 664 for (d=0; d<dim+1; d++) { 665 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 666 for (p=pStart; p<pEnd; p++) pointDepths[p] = d; 667 } 668 for (p=0; p<nleaves; p++) remoteDepths[p] = -1; 669 ierr = PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 670 ierr = PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 671 672 /* Count recevied points in each stratum and compute the internal strata shift */ 673 ierr = PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);CHKERRQ(ierr); 674 for (d=0; d<dim+1; d++) depthRecv[d]=0; 675 for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++; 676 depthShift[dim] = 0; 677 for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim]; 678 for (d=1; d<dim; d++) depthShift[d] += depthRecv[0]; 679 for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1]; 680 for (d=0; d<dim+1; d++) { 681 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 682 depthIdx[d] = pStart + depthShift[d]; 683 } 684 685 /* Form the overlap SF build an SF that describes the full overlap migration SF */ 686 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 687 newLeaves = pEnd - pStart + nleaves; 688 ierr = PetscMalloc1(newLeaves, &ilocal);CHKERRQ(ierr); 689 ierr = PetscMalloc1(newLeaves, &iremote);CHKERRQ(ierr); 690 /* First map local points to themselves */ 691 for (d=0; d<dim+1; d++) { 692 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 693 for (p=pStart; p<pEnd; p++) { 694 point = p + depthShift[d]; 695 ilocal[point] = point; 696 iremote[point].index = p; 697 iremote[point].rank = rank; 698 depthIdx[d]++; 699 } 700 } 701 702 /* Add in the remote roots for currently shared points */ 703 ierr = DMGetPointSF(dm, &pointSF);CHKERRQ(ierr); 704 ierr = PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);CHKERRQ(ierr); 705 for (d=0; d<dim+1; d++) { 706 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 707 for (p=0; p<numSharedPoints; p++) { 708 if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) { 709 point = sharedLocal[p] + depthShift[d]; 710 iremote[point].index = sharedRemote[p].index; 711 iremote[point].rank = sharedRemote[p].rank; 712 } 713 } 714 } 715 716 /* Now add the incoming overlap points */ 717 for (p=0; p<nleaves; p++) { 718 point = depthIdx[remoteDepths[p]]; 719 ilocal[point] = point; 720 iremote[point].index = overlapRemote[p].index; 721 iremote[point].rank = overlapRemote[p].rank; 722 depthIdx[remoteDepths[p]]++; 723 } 724 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 725 726 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 727 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");CHKERRQ(ierr); 728 ierr = PetscSFSetFromOptions(*migrationSF);CHKERRQ(ierr); 729 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 730 ierr = PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);CHKERRQ(ierr); 731 732 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 733 PetscFunctionReturn(0); 734 } 735 736 #undef __FUNCT__ 737 #define __FUNCT__ "DMPlexStratifyMigrationSF" 738 /*@ 739 DMPlexStratifyMigrationSF - Add partition overlap to a distributed non-overlapping DM. 740 741 Input Parameter: 742 + dm - The DM 743 - sf - A star forest with non-ordered leaves, usually defining a DM point migration 744 745 Output Parameter: 746 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified 747 748 Level: developer 749 750 .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap() 751 @*/ 752 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF) 753 { 754 MPI_Comm comm; 755 PetscMPIInt rank, numProcs; 756 PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves; 757 PetscInt *pointDepths, *remoteDepths, *ilocal; 758 PetscInt *depthRecv, *depthShift, *depthIdx; 759 const PetscSFNode *iremote; 760 PetscErrorCode ierr; 761 762 PetscFunctionBegin; 763 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 764 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 765 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 766 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 767 ierr = DMPlexGetDepth(dm, &ldepth);CHKERRQ(ierr); 768 ierr = MPI_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 769 if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth); 770 771 /* Before building the migration SF we need to know the new stratum offsets */ 772 ierr = PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);CHKERRQ(ierr); 773 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 774 for (d = 0; d < depth+1; ++d) { 775 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 776 for (p = pStart; p < pEnd; ++p) pointDepths[p] = d; 777 } 778 for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1; 779 ierr = PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 780 ierr = PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 781 /* Count recevied points in each stratum and compute the internal strata shift */ 782 ierr = PetscMalloc3(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx);CHKERRQ(ierr); 783 for (d = 0; d < depth+1; ++d) depthRecv[d] = 0; 784 for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++; 785 depthShift[depth] = 0; 786 for (d = 0; d < depth; ++d) depthShift[d] = depthRecv[depth]; 787 for (d = 1; d < depth; ++d) depthShift[d] += depthRecv[0]; 788 for (d = depth-2; d > 0; --d) depthShift[d] += depthRecv[d+1]; 789 for (d = 0; d < depth+1; ++d) {depthIdx[d] = 0;} 790 /* Derive a new local permutation based on stratified indices */ 791 ierr = PetscMalloc1(nleaves, &ilocal);CHKERRQ(ierr); 792 for (p = 0; p < nleaves; ++p) { 793 const PetscInt dep = remoteDepths[p]; 794 795 ilocal[p] = depthShift[dep] + depthIdx[dep]; 796 depthIdx[dep]++; 797 } 798 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 799 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");CHKERRQ(ierr); 800 ierr = PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);CHKERRQ(ierr); 801 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 802 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 803 PetscFunctionReturn(0); 804 } 805 806 #undef __FUNCT__ 807 #define __FUNCT__ "DMPlexDistributeField" 808 /*@ 809 DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 810 811 Collective on DM 812 813 Input Parameters: 814 + dm - The DMPlex object 815 . pointSF - The PetscSF describing the communication pattern 816 . originalSection - The PetscSection for existing data layout 817 - originalVec - The existing data 818 819 Output Parameters: 820 + newSection - The PetscSF describing the new data layout 821 - newVec - The new data 822 823 Level: developer 824 825 .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData() 826 @*/ 827 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec) 828 { 829 PetscSF fieldSF; 830 PetscInt *remoteOffsets, fieldSize; 831 PetscScalar *originalValues, *newValues; 832 PetscErrorCode ierr; 833 834 PetscFunctionBegin; 835 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 836 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 837 838 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 839 ierr = VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);CHKERRQ(ierr); 840 ierr = VecSetType(newVec,dm->vectype);CHKERRQ(ierr); 841 842 ierr = VecGetArray(originalVec, &originalValues);CHKERRQ(ierr); 843 ierr = VecGetArray(newVec, &newValues);CHKERRQ(ierr); 844 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 845 ierr = PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 846 ierr = PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 847 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 848 ierr = VecRestoreArray(newVec, &newValues);CHKERRQ(ierr); 849 ierr = VecRestoreArray(originalVec, &originalValues);CHKERRQ(ierr); 850 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 851 PetscFunctionReturn(0); 852 } 853 854 #undef __FUNCT__ 855 #define __FUNCT__ "DMPlexDistributeFieldIS" 856 /*@ 857 DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 858 859 Collective on DM 860 861 Input Parameters: 862 + dm - The DMPlex object 863 . pointSF - The PetscSF describing the communication pattern 864 . originalSection - The PetscSection for existing data layout 865 - originalIS - The existing data 866 867 Output Parameters: 868 + newSection - The PetscSF describing the new data layout 869 - newIS - The new data 870 871 Level: developer 872 873 .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData() 874 @*/ 875 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS) 876 { 877 PetscSF fieldSF; 878 PetscInt *newValues, *remoteOffsets, fieldSize; 879 const PetscInt *originalValues; 880 PetscErrorCode ierr; 881 882 PetscFunctionBegin; 883 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 884 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 885 886 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 887 ierr = PetscMalloc1(fieldSize, &newValues);CHKERRQ(ierr); 888 889 ierr = ISGetIndices(originalIS, &originalValues);CHKERRQ(ierr); 890 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 891 ierr = PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 892 ierr = PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 893 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 894 ierr = ISRestoreIndices(originalIS, &originalValues);CHKERRQ(ierr); 895 ierr = ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);CHKERRQ(ierr); 896 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 897 PetscFunctionReturn(0); 898 } 899 900 #undef __FUNCT__ 901 #define __FUNCT__ "DMPlexDistributeData" 902 /*@ 903 DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 904 905 Collective on DM 906 907 Input Parameters: 908 + dm - The DMPlex object 909 . pointSF - The PetscSF describing the communication pattern 910 . originalSection - The PetscSection for existing data layout 911 . datatype - The type of data 912 - originalData - The existing data 913 914 Output Parameters: 915 + newSection - The PetscSection describing the new data layout 916 - newData - The new data 917 918 Level: developer 919 920 .seealso: DMPlexDistribute(), DMPlexDistributeField() 921 @*/ 922 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData) 923 { 924 PetscSF fieldSF; 925 PetscInt *remoteOffsets, fieldSize; 926 PetscMPIInt dataSize; 927 PetscErrorCode ierr; 928 929 PetscFunctionBegin; 930 ierr = PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 931 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 932 933 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 934 ierr = MPI_Type_size(datatype, &dataSize);CHKERRQ(ierr); 935 ierr = PetscMalloc(fieldSize * dataSize, newData);CHKERRQ(ierr); 936 937 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 938 ierr = PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 939 ierr = PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 940 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 941 ierr = PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 942 PetscFunctionReturn(0); 943 } 944 945 #undef __FUNCT__ 946 #define __FUNCT__ "DMPlexDistributeCones" 947 PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 948 { 949 DM_Plex *mesh = (DM_Plex*) dm->data; 950 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 951 MPI_Comm comm; 952 PetscSF coneSF; 953 PetscSection originalConeSection, newConeSection; 954 PetscInt *remoteOffsets, *cones, *newCones, newConesSize; 955 PetscBool flg; 956 PetscErrorCode ierr; 957 958 PetscFunctionBegin; 959 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 960 PetscValidPointer(dmParallel,4); 961 ierr = PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 962 963 /* Distribute cone section */ 964 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 965 ierr = DMPlexGetConeSection(dm, &originalConeSection);CHKERRQ(ierr); 966 ierr = DMPlexGetConeSection(dmParallel, &newConeSection);CHKERRQ(ierr); 967 ierr = PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);CHKERRQ(ierr); 968 ierr = DMSetUp(dmParallel);CHKERRQ(ierr); 969 { 970 PetscInt pStart, pEnd, p; 971 972 ierr = PetscSectionGetChart(newConeSection, &pStart, &pEnd);CHKERRQ(ierr); 973 for (p = pStart; p < pEnd; ++p) { 974 PetscInt coneSize; 975 ierr = PetscSectionGetDof(newConeSection, p, &coneSize);CHKERRQ(ierr); 976 pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize); 977 } 978 } 979 /* Communicate and renumber cones */ 980 ierr = PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);CHKERRQ(ierr); 981 ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); 982 ierr = DMPlexGetCones(dmParallel, &newCones);CHKERRQ(ierr); 983 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 984 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 985 ierr = PetscSectionGetStorageSize(newConeSection, &newConesSize);CHKERRQ(ierr); 986 ierr = ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);CHKERRQ(ierr); 987 #if PETSC_USE_DEBUG 988 { 989 PetscInt p; 990 PetscBool valid = PETSC_TRUE; 991 for (p = 0; p < newConesSize; ++p) { 992 if (newCones[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);CHKERRQ(ierr);} 993 } 994 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 995 } 996 #endif 997 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-cones_view", &flg);CHKERRQ(ierr); 998 if (flg) { 999 ierr = PetscPrintf(comm, "Serial Cone Section:\n");CHKERRQ(ierr); 1000 ierr = PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1001 ierr = PetscPrintf(comm, "Parallel Cone Section:\n");CHKERRQ(ierr); 1002 ierr = PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1003 ierr = PetscSFView(coneSF, NULL);CHKERRQ(ierr); 1004 } 1005 ierr = DMPlexGetConeOrientations(dm, &cones);CHKERRQ(ierr); 1006 ierr = DMPlexGetConeOrientations(dmParallel, &newCones);CHKERRQ(ierr); 1007 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 1008 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 1009 ierr = PetscSFDestroy(&coneSF);CHKERRQ(ierr); 1010 ierr = PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 1011 /* Create supports and stratify sieve */ 1012 { 1013 PetscInt pStart, pEnd; 1014 1015 ierr = PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);CHKERRQ(ierr); 1016 ierr = PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);CHKERRQ(ierr); 1017 } 1018 ierr = DMPlexSymmetrize(dmParallel);CHKERRQ(ierr); 1019 ierr = DMPlexStratify(dmParallel);CHKERRQ(ierr); 1020 pmesh->useCone = mesh->useCone; 1021 pmesh->useClosure = mesh->useClosure; 1022 PetscFunctionReturn(0); 1023 } 1024 1025 #undef __FUNCT__ 1026 #define __FUNCT__ "DMPlexDistributeCoordinates" 1027 PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel) 1028 { 1029 MPI_Comm comm; 1030 PetscSection originalCoordSection, newCoordSection; 1031 Vec originalCoordinates, newCoordinates; 1032 PetscInt bs; 1033 const char *name; 1034 const PetscReal *maxCell, *L; 1035 const DMBoundaryType *bd; 1036 PetscErrorCode ierr; 1037 1038 PetscFunctionBegin; 1039 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1040 PetscValidPointer(dmParallel, 3); 1041 1042 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1043 ierr = DMGetCoordinateSection(dm, &originalCoordSection);CHKERRQ(ierr); 1044 ierr = DMGetCoordinateSection(dmParallel, &newCoordSection);CHKERRQ(ierr); 1045 ierr = DMGetCoordinatesLocal(dm, &originalCoordinates);CHKERRQ(ierr); 1046 if (originalCoordinates) { 1047 ierr = VecCreate(comm, &newCoordinates);CHKERRQ(ierr); 1048 ierr = PetscObjectGetName((PetscObject) originalCoordinates, &name);CHKERRQ(ierr); 1049 ierr = PetscObjectSetName((PetscObject) newCoordinates, name);CHKERRQ(ierr); 1050 1051 ierr = DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);CHKERRQ(ierr); 1052 ierr = DMSetCoordinatesLocal(dmParallel, newCoordinates);CHKERRQ(ierr); 1053 ierr = VecGetBlockSize(originalCoordinates, &bs);CHKERRQ(ierr); 1054 ierr = VecSetBlockSize(newCoordinates, bs);CHKERRQ(ierr); 1055 ierr = VecDestroy(&newCoordinates);CHKERRQ(ierr); 1056 } 1057 ierr = DMGetPeriodicity(dm, &maxCell, &L, &bd);CHKERRQ(ierr); 1058 if (L) {ierr = DMSetPeriodicity(dmParallel, maxCell, L, bd);CHKERRQ(ierr);} 1059 PetscFunctionReturn(0); 1060 } 1061 1062 #undef __FUNCT__ 1063 #define __FUNCT__ "DMPlexDistributeLabels" 1064 /* Here we are assuming that process 0 always has everything */ 1065 PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel) 1066 { 1067 MPI_Comm comm; 1068 PetscMPIInt rank; 1069 PetscInt numLabels, numLocalLabels, l; 1070 PetscBool hasLabels = PETSC_FALSE; 1071 PetscErrorCode ierr; 1072 1073 PetscFunctionBegin; 1074 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1075 PetscValidHeaderSpecific(dm, DM_CLASSID, 3); 1076 ierr = PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1077 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1078 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1079 1080 /* Everyone must have either the same number of labels, or none */ 1081 ierr = DMPlexGetNumLabels(dm, &numLocalLabels);CHKERRQ(ierr); 1082 numLabels = numLocalLabels; 1083 ierr = MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);CHKERRQ(ierr); 1084 if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE; 1085 for (l = numLabels-1; l >= 0; --l) { 1086 DMLabel label = NULL, labelNew = NULL; 1087 PetscBool isdepth; 1088 1089 if (hasLabels) { 1090 ierr = DMPlexGetLabelByNum(dm, l, &label);CHKERRQ(ierr); 1091 /* Skip "depth" because it is recreated */ 1092 ierr = PetscStrcmp(label->name, "depth", &isdepth);CHKERRQ(ierr); 1093 } 1094 ierr = MPI_Bcast(&isdepth, 1, MPIU_BOOL, 0, comm);CHKERRQ(ierr); 1095 if (isdepth) continue; 1096 ierr = DMLabelDistribute(label, migrationSF, &labelNew);CHKERRQ(ierr); 1097 ierr = DMPlexAddLabel(dmParallel, labelNew);CHKERRQ(ierr); 1098 } 1099 ierr = PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1100 PetscFunctionReturn(0); 1101 } 1102 1103 #undef __FUNCT__ 1104 #define __FUNCT__ "DMPlexDistributeSetupHybrid" 1105 PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 1106 { 1107 DM_Plex *mesh = (DM_Plex*) dm->data; 1108 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1109 MPI_Comm comm; 1110 const PetscInt *gpoints; 1111 PetscInt dim, depth, n, d; 1112 PetscErrorCode ierr; 1113 1114 PetscFunctionBegin; 1115 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1116 PetscValidPointer(dmParallel, 4); 1117 1118 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1119 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1120 1121 /* Setup hybrid structure */ 1122 for (d = 0; d <= dim; ++d) {pmesh->hybridPointMax[d] = mesh->hybridPointMax[d];} 1123 ierr = MPI_Bcast(pmesh->hybridPointMax, dim+1, MPIU_INT, 0, comm);CHKERRQ(ierr); 1124 ierr = ISLocalToGlobalMappingGetSize(renumbering, &n);CHKERRQ(ierr); 1125 ierr = ISLocalToGlobalMappingGetIndices(renumbering, &gpoints);CHKERRQ(ierr); 1126 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1127 for (d = 0; d <= dim; ++d) { 1128 PetscInt pmax = pmesh->hybridPointMax[d], newmax = 0, pEnd, stratum[2], p; 1129 1130 if (pmax < 0) continue; 1131 ierr = DMPlexGetDepthStratum(dm, d > depth ? depth : d, &stratum[0], &stratum[1]);CHKERRQ(ierr); 1132 ierr = DMPlexGetDepthStratum(dmParallel, d, NULL, &pEnd);CHKERRQ(ierr); 1133 ierr = MPI_Bcast(stratum, 2, MPIU_INT, 0, comm);CHKERRQ(ierr); 1134 for (p = 0; p < n; ++p) { 1135 const PetscInt point = gpoints[p]; 1136 1137 if ((point >= stratum[0]) && (point < stratum[1]) && (point >= pmax)) ++newmax; 1138 } 1139 if (newmax > 0) pmesh->hybridPointMax[d] = pEnd - newmax; 1140 else pmesh->hybridPointMax[d] = -1; 1141 } 1142 ierr = ISLocalToGlobalMappingRestoreIndices(renumbering, &gpoints);CHKERRQ(ierr); 1143 PetscFunctionReturn(0); 1144 } 1145 1146 #undef __FUNCT__ 1147 #define __FUNCT__ "DMPlexDistributeSetupTree" 1148 PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 1149 { 1150 DM_Plex *mesh = (DM_Plex*) dm->data; 1151 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1152 MPI_Comm comm; 1153 DM refTree; 1154 PetscSection origParentSection, newParentSection; 1155 PetscInt *origParents, *origChildIDs; 1156 PetscBool flg; 1157 PetscErrorCode ierr; 1158 1159 PetscFunctionBegin; 1160 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1161 PetscValidHeaderSpecific(dm, DM_CLASSID, 4); 1162 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1163 1164 /* Set up tree */ 1165 ierr = DMPlexGetReferenceTree(dm,&refTree);CHKERRQ(ierr); 1166 ierr = DMPlexSetReferenceTree(dmParallel,refTree);CHKERRQ(ierr); 1167 ierr = DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);CHKERRQ(ierr); 1168 if (origParentSection) { 1169 PetscInt pStart, pEnd; 1170 PetscInt *newParents, *newChildIDs; 1171 PetscInt *remoteOffsetsParents, newParentSize; 1172 PetscSF parentSF; 1173 1174 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1175 ierr = PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);CHKERRQ(ierr); 1176 ierr = PetscSectionSetChart(newParentSection,pStart,pEnd);CHKERRQ(ierr); 1177 ierr = PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);CHKERRQ(ierr); 1178 ierr = PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);CHKERRQ(ierr); 1179 ierr = PetscSectionGetStorageSize(newParentSection,&newParentSize);CHKERRQ(ierr); 1180 ierr = PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);CHKERRQ(ierr); 1181 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, origParents, newParents);CHKERRQ(ierr); 1182 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, origParents, newParents);CHKERRQ(ierr); 1183 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1184 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1185 ierr = ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);CHKERRQ(ierr); 1186 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-parents_view", &flg);CHKERRQ(ierr); 1187 if (flg) { 1188 ierr = PetscPrintf(comm, "Serial Parent Section: \n");CHKERRQ(ierr); 1189 ierr = PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1190 ierr = PetscPrintf(comm, "Parallel Parent Section: \n");CHKERRQ(ierr); 1191 ierr = PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1192 ierr = PetscSFView(parentSF, NULL);CHKERRQ(ierr); 1193 } 1194 ierr = DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);CHKERRQ(ierr); 1195 ierr = PetscSectionDestroy(&newParentSection);CHKERRQ(ierr); 1196 ierr = PetscFree2(newParents,newChildIDs);CHKERRQ(ierr); 1197 ierr = PetscSFDestroy(&parentSF);CHKERRQ(ierr); 1198 } 1199 pmesh->useAnchors = mesh->useAnchors; 1200 PetscFunctionReturn(0); 1201 } 1202 1203 #undef __FUNCT__ 1204 #define __FUNCT__ "DMPlexDistributeSF" 1205 PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel) 1206 { 1207 DM_Plex *mesh = (DM_Plex*) dm->data; 1208 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1209 PetscMPIInt rank, numProcs; 1210 MPI_Comm comm; 1211 PetscErrorCode ierr; 1212 1213 PetscFunctionBegin; 1214 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1215 PetscValidPointer(dmParallel,7); 1216 1217 /* Create point SF for parallel mesh */ 1218 ierr = PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1219 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1220 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1221 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 1222 { 1223 const PetscInt *leaves; 1224 PetscSFNode *remotePoints, *rowners, *lowners; 1225 PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints; 1226 PetscInt pStart, pEnd; 1227 1228 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1229 ierr = PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);CHKERRQ(ierr); 1230 ierr = PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);CHKERRQ(ierr); 1231 for (p=0; p<numRoots; p++) { 1232 rowners[p].rank = -1; 1233 rowners[p].index = -1; 1234 } 1235 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1236 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1237 for (p = 0; p < numLeaves; ++p) { 1238 if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */ 1239 lowners[p].rank = rank; 1240 lowners[p].index = leaves ? leaves[p] : p; 1241 } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */ 1242 lowners[p].rank = -2; 1243 lowners[p].index = -2; 1244 } 1245 } 1246 for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */ 1247 rowners[p].rank = -3; 1248 rowners[p].index = -3; 1249 } 1250 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1251 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1252 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1253 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1254 for (p = 0; p < numLeaves; ++p) { 1255 if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed"); 1256 if (lowners[p].rank != rank) ++numGhostPoints; 1257 } 1258 ierr = PetscMalloc1(numGhostPoints, &ghostPoints);CHKERRQ(ierr); 1259 ierr = PetscMalloc1(numGhostPoints, &remotePoints);CHKERRQ(ierr); 1260 for (p = 0, gp = 0; p < numLeaves; ++p) { 1261 if (lowners[p].rank != rank) { 1262 ghostPoints[gp] = leaves ? leaves[p] : p; 1263 remotePoints[gp].rank = lowners[p].rank; 1264 remotePoints[gp].index = lowners[p].index; 1265 ++gp; 1266 } 1267 } 1268 ierr = PetscFree2(rowners,lowners);CHKERRQ(ierr); 1269 ierr = PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);CHKERRQ(ierr); 1270 ierr = PetscSFSetFromOptions((dmParallel)->sf);CHKERRQ(ierr); 1271 } 1272 pmesh->useCone = mesh->useCone; 1273 pmesh->useClosure = mesh->useClosure; 1274 ierr = PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1275 PetscFunctionReturn(0); 1276 } 1277 1278 #undef __FUNCT__ 1279 #define __FUNCT__ "DMPlexCreatePointSF" 1280 /*@C 1281 DMPlexDerivePointSF - Build a point SF from an SF describing a point migration 1282 1283 Input Parameter: 1284 + dm - The source DMPlex object 1285 . migrationSF - The star forest that describes the parallel point remapping 1286 . ownership - Flag causing a vote to determine point ownership 1287 1288 Output Parameter: 1289 - pointSF - The star forest describing the point overlap in the remapped DM 1290 1291 Level: developer 1292 1293 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1294 @*/ 1295 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF) 1296 { 1297 PetscMPIInt rank; 1298 PetscInt p, nroots, nleaves, idx, npointLeaves; 1299 PetscInt *pointLocal; 1300 const PetscInt *leaves; 1301 const PetscSFNode *roots; 1302 PetscSFNode *rootNodes, *leafNodes, *pointRemote; 1303 PetscErrorCode ierr; 1304 1305 PetscFunctionBegin; 1306 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1307 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 1308 1309 ierr = PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);CHKERRQ(ierr); 1310 ierr = PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);CHKERRQ(ierr); 1311 if (ownership) { 1312 /* Point ownership vote: Process with highest rank ownes shared points */ 1313 for (p = 0; p < nleaves; ++p) { 1314 /* Either put in a bid or we know we own it */ 1315 leafNodes[p].rank = rank; 1316 leafNodes[p].index = p; 1317 } 1318 for (p = 0; p < nroots; p++) { 1319 /* Root must not participate in the reduction, flag so that MAXLOC does not use */ 1320 rootNodes[p].rank = -3; 1321 rootNodes[p].index = -3; 1322 } 1323 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1324 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1325 } else { 1326 for (p = 0; p < nroots; p++) { 1327 rootNodes[p].index = -1; 1328 rootNodes[p].rank = rank; 1329 }; 1330 for (p = 0; p < nleaves; p++) { 1331 /* Write new local id into old location */ 1332 if (roots[p].rank == rank) { 1333 rootNodes[roots[p].index].index = leaves[p]; 1334 } 1335 } 1336 } 1337 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1338 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1339 1340 for (npointLeaves = 0, p = 0; p < nleaves; p++) {if (leafNodes[p].rank != rank) npointLeaves++;} 1341 ierr = PetscMalloc1(npointLeaves, &pointLocal);CHKERRQ(ierr); 1342 ierr = PetscMalloc1(npointLeaves, &pointRemote);CHKERRQ(ierr); 1343 for (idx = 0, p = 0; p < nleaves; p++) { 1344 if (leafNodes[p].rank != rank) { 1345 pointLocal[idx] = p; 1346 pointRemote[idx] = leafNodes[p]; 1347 idx++; 1348 } 1349 } 1350 ierr = PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);CHKERRQ(ierr); 1351 ierr = PetscSFSetFromOptions(*pointSF);CHKERRQ(ierr); 1352 ierr = PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1353 ierr = PetscFree2(rootNodes, leafNodes);CHKERRQ(ierr); 1354 PetscFunctionReturn(0); 1355 } 1356 1357 #undef __FUNCT__ 1358 #define __FUNCT__ "DMPlexMigrate" 1359 /*@C 1360 DMPlexMigrate - Migrates internal DM data over the supplied star forest 1361 1362 Input Parameter: 1363 + dm - The source DMPlex object 1364 . sf - The star forest communication context describing the migration pattern 1365 1366 Output Parameter: 1367 - targetDM - The target DMPlex object 1368 1369 Level: intermediate 1370 1371 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1372 @*/ 1373 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM) 1374 { 1375 MPI_Comm comm; 1376 PetscInt dim, nroots; 1377 PetscSF sfPoint; 1378 ISLocalToGlobalMapping ltogMigration; 1379 PetscBool flg; 1380 PetscErrorCode ierr; 1381 1382 PetscFunctionBegin; 1383 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1384 ierr = PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1385 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 1386 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1387 ierr = DMSetDimension(targetDM, dim);CHKERRQ(ierr); 1388 1389 /* Check for a one-to-all distribution pattern */ 1390 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 1391 ierr = PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);CHKERRQ(ierr); 1392 if (nroots >= 0) { 1393 IS isOriginal; 1394 ISLocalToGlobalMapping ltogOriginal; 1395 PetscInt n, size, nleaves, conesSize; 1396 PetscInt *numbering_orig, *numbering_new, *cones; 1397 PetscSection coneSection; 1398 /* Get the original point numbering */ 1399 ierr = DMPlexCreatePointNumbering(dm, &isOriginal);CHKERRQ(ierr); 1400 ierr = ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);CHKERRQ(ierr); 1401 ierr = ISLocalToGlobalMappingGetSize(ltogOriginal, &size);CHKERRQ(ierr); 1402 ierr = ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1403 /* Convert to positive global numbers */ 1404 for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);} 1405 /* Derive the new local-to-global mapping from the old one */ 1406 ierr = PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1407 ierr = PetscMalloc1(nleaves, &numbering_new);CHKERRQ(ierr); 1408 ierr = PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1409 ierr = PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1410 ierr = ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);CHKERRQ(ierr); 1411 ierr = ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1412 /* Convert cones to global numbering before migrating them */ 1413 ierr = DMPlexGetConeSection(dm, &coneSection);CHKERRQ(ierr); 1414 ierr = PetscSectionGetStorageSize(coneSection, &conesSize);CHKERRQ(ierr); 1415 ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); 1416 ierr = ISLocalToGlobalMappingApplyBlock(ltogOriginal, conesSize, cones, cones);CHKERRQ(ierr); 1417 ierr = ISDestroy(&isOriginal);CHKERRQ(ierr); 1418 ierr = ISLocalToGlobalMappingDestroy(<ogOriginal); 1419 } else { 1420 /* One-to-all distribution pattern: We can derive LToG from SF */ 1421 ierr = ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);CHKERRQ(ierr); 1422 } 1423 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1424 if (flg) { 1425 ierr = PetscPrintf(comm, "Point renumbering for DM migration:\n");CHKERRQ(ierr); 1426 ierr = ISLocalToGlobalMappingView(ltogMigration, NULL);CHKERRQ(ierr); 1427 } 1428 /* Migrate DM data to target DM */ 1429 ierr = DMPlexDistributeCones(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1430 ierr = DMPlexDistributeCoordinates(dm, sf, targetDM);CHKERRQ(ierr); 1431 ierr = DMPlexDistributeLabels(dm, sf, targetDM);CHKERRQ(ierr); 1432 ierr = DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1433 ierr = DMPlexDistributeSetupTree(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1434 ierr = ISLocalToGlobalMappingDestroy(<ogMigration); 1435 ierr = PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1436 PetscFunctionReturn(0); 1437 } 1438 1439 #undef __FUNCT__ 1440 #define __FUNCT__ "DMPlexDistribute" 1441 /*@C 1442 DMPlexDistribute - Distributes the mesh and any associated sections. 1443 1444 Not Collective 1445 1446 Input Parameter: 1447 + dm - The original DMPlex object 1448 - overlap - The overlap of partitions, 0 is the default 1449 1450 Output Parameter: 1451 + sf - The PetscSF used for point distribution 1452 - parallelMesh - The distributed DMPlex object, or NULL 1453 1454 Note: If the mesh was not distributed, the return value is NULL. 1455 1456 The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and 1457 DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function 1458 representation on the mesh. 1459 1460 Level: intermediate 1461 1462 .keywords: mesh, elements 1463 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure() 1464 @*/ 1465 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel) 1466 { 1467 MPI_Comm comm; 1468 PetscPartitioner partitioner; 1469 IS cellPart; 1470 PetscSection cellPartSection; 1471 DM dmCoord; 1472 DMLabel lblPartition, lblMigration; 1473 PetscSF sfProcess, sfMigration, sfStratified, sfPoint; 1474 PetscBool flg; 1475 PetscMPIInt rank, numProcs, p; 1476 PetscErrorCode ierr; 1477 1478 PetscFunctionBegin; 1479 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1480 if (sf) PetscValidPointer(sf,4); 1481 PetscValidPointer(dmParallel,5); 1482 1483 ierr = PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1484 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1485 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1486 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 1487 1488 *dmParallel = NULL; 1489 if (numProcs == 1) PetscFunctionReturn(0); 1490 1491 /* Create cell partition */ 1492 ierr = PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1493 ierr = PetscSectionCreate(comm, &cellPartSection);CHKERRQ(ierr); 1494 ierr = DMPlexGetPartitioner(dm, &partitioner);CHKERRQ(ierr); 1495 ierr = PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);CHKERRQ(ierr); 1496 { 1497 /* Convert partition to DMLabel */ 1498 PetscInt proc, pStart, pEnd, npoints, poffset; 1499 const PetscInt *points; 1500 ierr = DMLabelCreate("Point Partition", &lblPartition);CHKERRQ(ierr); 1501 ierr = ISGetIndices(cellPart, &points);CHKERRQ(ierr); 1502 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1503 for (proc = pStart; proc < pEnd; proc++) { 1504 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1505 ierr = PetscSectionGetOffset(cellPartSection, proc, &poffset);CHKERRQ(ierr); 1506 for (p = poffset; p < poffset+npoints; p++) { 1507 ierr = DMLabelSetValue(lblPartition, points[p], proc);CHKERRQ(ierr); 1508 } 1509 } 1510 ierr = ISRestoreIndices(cellPart, &points);CHKERRQ(ierr); 1511 } 1512 ierr = DMPlexPartitionLabelClosure(dm, lblPartition);CHKERRQ(ierr); 1513 { 1514 /* Build a global process SF */ 1515 PetscSFNode *remoteProc; 1516 ierr = PetscMalloc1(numProcs, &remoteProc);CHKERRQ(ierr); 1517 for (p = 0; p < numProcs; ++p) { 1518 remoteProc[p].rank = p; 1519 remoteProc[p].index = rank; 1520 } 1521 ierr = PetscSFCreate(comm, &sfProcess);CHKERRQ(ierr); 1522 ierr = PetscObjectSetName((PetscObject) sfProcess, "Process SF");CHKERRQ(ierr); 1523 ierr = PetscSFSetGraph(sfProcess, numProcs, numProcs, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);CHKERRQ(ierr); 1524 } 1525 ierr = DMLabelCreate("Point migration", &lblMigration);CHKERRQ(ierr); 1526 ierr = DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);CHKERRQ(ierr); 1527 ierr = DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration); 1528 /* Stratify the SF in case we are migrating an already parallel plex */ 1529 ierr = DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);CHKERRQ(ierr); 1530 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1531 sfMigration = sfStratified; 1532 ierr = PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1533 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1534 if (flg) { 1535 ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD, PETSC_TRUE);CHKERRQ(ierr); 1536 ierr = DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1537 ierr = PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1538 } 1539 1540 /* Create non-overlapping parallel DM and migrate internal data */ 1541 ierr = DMPlexCreate(comm, dmParallel);CHKERRQ(ierr); 1542 ierr = PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");CHKERRQ(ierr); 1543 ierr = DMPlexMigrate(dm, sfMigration, *dmParallel);CHKERRQ(ierr); 1544 1545 /* Build the point SF without overlap */ 1546 ierr = DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);CHKERRQ(ierr); 1547 ierr = DMSetPointSF(*dmParallel, sfPoint);CHKERRQ(ierr); 1548 ierr = DMGetCoordinateDM(*dmParallel, &dmCoord);CHKERRQ(ierr); 1549 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1550 if (flg) {ierr = PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);} 1551 1552 if (overlap > 0) { 1553 DM dmOverlap; 1554 PetscInt nroots, nleaves; 1555 PetscSFNode *newRemote; 1556 const PetscSFNode *oldRemote; 1557 PetscSF sfOverlap, sfOverlapPoint; 1558 /* Add the partition overlap to the distributed DM */ 1559 ierr = DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);CHKERRQ(ierr); 1560 ierr = DMDestroy(dmParallel);CHKERRQ(ierr); 1561 *dmParallel = dmOverlap; 1562 if (flg) { 1563 ierr = PetscPrintf(comm, "Overlap Migration SF:\n");CHKERRQ(ierr); 1564 ierr = PetscSFView(sfOverlap, NULL);CHKERRQ(ierr); 1565 } 1566 1567 /* Re-map the migration SF to establish the full migration pattern */ 1568 ierr = PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);CHKERRQ(ierr); 1569 ierr = PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1570 ierr = PetscMalloc1(nleaves, &newRemote);CHKERRQ(ierr); 1571 ierr = PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1572 ierr = PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1573 ierr = PetscSFCreate(comm, &sfOverlapPoint);CHKERRQ(ierr); 1574 ierr = PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1575 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1576 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1577 sfMigration = sfOverlapPoint; 1578 } 1579 /* Cleanup Partition */ 1580 ierr = PetscSFDestroy(&sfProcess);CHKERRQ(ierr); 1581 ierr = DMLabelDestroy(&lblPartition);CHKERRQ(ierr); 1582 ierr = DMLabelDestroy(&lblMigration);CHKERRQ(ierr); 1583 ierr = PetscSectionDestroy(&cellPartSection);CHKERRQ(ierr); 1584 ierr = ISDestroy(&cellPart);CHKERRQ(ierr); 1585 /* Copy BC */ 1586 ierr = DMPlexCopyBoundary(dm, *dmParallel);CHKERRQ(ierr); 1587 /* Cleanup */ 1588 if (sf) {*sf = sfMigration;} 1589 else {ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr);} 1590 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1591 ierr = PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1592 PetscFunctionReturn(0); 1593 } 1594 1595 #undef __FUNCT__ 1596 #define __FUNCT__ "DMPlexDistributeOverlap" 1597 /*@C 1598 DMPlexDistribute - Add partition overlap to a distributed non-overlapping DM. 1599 1600 Not Collective 1601 1602 Input Parameter: 1603 + dm - The non-overlapping distrbuted DMPlex object 1604 - overlap - The overlap of partitions, 0 is the default 1605 1606 Output Parameter: 1607 + sf - The PetscSF used for point distribution 1608 - dmOverlap - The overlapping distributed DMPlex object, or NULL 1609 1610 Note: If the mesh was not distributed, the return value is NULL. 1611 1612 The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and 1613 DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function 1614 representation on the mesh. 1615 1616 Level: intermediate 1617 1618 .keywords: mesh, elements 1619 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure() 1620 @*/ 1621 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap) 1622 { 1623 MPI_Comm comm; 1624 PetscMPIInt rank; 1625 PetscSection rootSection, leafSection; 1626 IS rootrank, leafrank; 1627 DM dmCoord; 1628 DMLabel lblOverlap; 1629 PetscSF sfOverlap, sfStratified, sfPoint; 1630 PetscErrorCode ierr; 1631 1632 PetscFunctionBegin; 1633 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1634 if (sf) PetscValidPointer(sf, 3); 1635 PetscValidPointer(dmOverlap, 4); 1636 1637 ierr = PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1638 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1639 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1640 1641 /* Compute point overlap with neighbouring processes on the distributed DM */ 1642 ierr = PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1643 ierr = PetscSectionCreate(comm, &rootSection);CHKERRQ(ierr); 1644 ierr = PetscSectionCreate(comm, &leafSection);CHKERRQ(ierr); 1645 ierr = DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);CHKERRQ(ierr); 1646 ierr = DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);CHKERRQ(ierr); 1647 /* Convert overlap label to stratified migration SF */ 1648 ierr = DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);CHKERRQ(ierr); 1649 ierr = DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);CHKERRQ(ierr); 1650 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1651 sfOverlap = sfStratified; 1652 ierr = PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");CHKERRQ(ierr); 1653 ierr = PetscSFSetFromOptions(sfOverlap);CHKERRQ(ierr); 1654 1655 ierr = PetscSectionDestroy(&rootSection);CHKERRQ(ierr); 1656 ierr = PetscSectionDestroy(&leafSection);CHKERRQ(ierr); 1657 ierr = ISDestroy(&rootrank);CHKERRQ(ierr); 1658 ierr = ISDestroy(&leafrank);CHKERRQ(ierr); 1659 ierr = PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1660 1661 /* Build the overlapping DM */ 1662 ierr = DMPlexCreate(comm, dmOverlap);CHKERRQ(ierr); 1663 ierr = PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");CHKERRQ(ierr); 1664 ierr = DMPlexMigrate(dm, sfOverlap, *dmOverlap);CHKERRQ(ierr); 1665 /* Build the new point SF */ 1666 ierr = DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1667 ierr = DMSetPointSF(*dmOverlap, sfPoint);CHKERRQ(ierr); 1668 ierr = DMGetCoordinateDM(*dmOverlap, &dmCoord);CHKERRQ(ierr); 1669 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1670 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1671 /* Cleanup overlap partition */ 1672 ierr = DMLabelDestroy(&lblOverlap);CHKERRQ(ierr); 1673 if (sf) *sf = sfOverlap; 1674 else {ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr);} 1675 ierr = PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1676 PetscFunctionReturn(0); 1677 } 1678