1 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 2 3 #undef __FUNCT__ 4 #define __FUNCT__ "DMPlexSetAdjacencyUseCone" 5 /*@ 6 DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first 7 8 Input Parameters: 9 + dm - The DM object 10 - useCone - Flag to use the cone first 11 12 Level: intermediate 13 14 Notes: 15 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 16 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 17 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 18 19 .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator() 20 @*/ 21 PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone) 22 { 23 DM_Plex *mesh = (DM_Plex *) dm->data; 24 25 PetscFunctionBegin; 26 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 27 mesh->useCone = useCone; 28 PetscFunctionReturn(0); 29 } 30 31 #undef __FUNCT__ 32 #define __FUNCT__ "DMPlexGetAdjacencyUseCone" 33 /*@ 34 DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first 35 36 Input Parameter: 37 . dm - The DM object 38 39 Output Parameter: 40 . useCone - Flag to use the cone first 41 42 Level: intermediate 43 44 Notes: 45 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 46 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 47 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 48 49 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator() 50 @*/ 51 PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone) 52 { 53 DM_Plex *mesh = (DM_Plex *) dm->data; 54 55 PetscFunctionBegin; 56 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 57 PetscValidIntPointer(useCone, 2); 58 *useCone = mesh->useCone; 59 PetscFunctionReturn(0); 60 } 61 62 #undef __FUNCT__ 63 #define __FUNCT__ "DMPlexSetAdjacencyUseClosure" 64 /*@ 65 DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure 66 67 Input Parameters: 68 + dm - The DM object 69 - useClosure - Flag to use the closure 70 71 Level: intermediate 72 73 Notes: 74 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 75 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 76 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 77 78 .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator() 79 @*/ 80 PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure) 81 { 82 DM_Plex *mesh = (DM_Plex *) dm->data; 83 84 PetscFunctionBegin; 85 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 86 mesh->useClosure = useClosure; 87 PetscFunctionReturn(0); 88 } 89 90 #undef __FUNCT__ 91 #define __FUNCT__ "DMPlexGetAdjacencyUseClosure" 92 /*@ 93 DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure 94 95 Input Parameter: 96 . dm - The DM object 97 98 Output Parameter: 99 . useClosure - Flag to use the closure 100 101 Level: intermediate 102 103 Notes: 104 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 105 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 106 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 107 108 .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator() 109 @*/ 110 PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure) 111 { 112 DM_Plex *mesh = (DM_Plex *) dm->data; 113 114 PetscFunctionBegin; 115 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 116 PetscValidIntPointer(useClosure, 2); 117 *useClosure = mesh->useClosure; 118 PetscFunctionReturn(0); 119 } 120 121 #undef __FUNCT__ 122 #define __FUNCT__ "DMPlexSetAdjacencyUseAnchors" 123 /*@ 124 DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints. 125 126 Input Parameters: 127 + dm - The DM object 128 - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 129 130 Level: intermediate 131 132 .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 133 @*/ 134 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors) 135 { 136 DM_Plex *mesh = (DM_Plex *) dm->data; 137 138 PetscFunctionBegin; 139 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 140 mesh->useAnchors = useAnchors; 141 PetscFunctionReturn(0); 142 } 143 144 #undef __FUNCT__ 145 #define __FUNCT__ "DMPlexGetAdjacencyUseAnchors" 146 /*@ 147 DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints. 148 149 Input Parameter: 150 . dm - The DM object 151 152 Output Parameter: 153 . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 154 155 Level: intermediate 156 157 .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 158 @*/ 159 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors) 160 { 161 DM_Plex *mesh = (DM_Plex *) dm->data; 162 163 PetscFunctionBegin; 164 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 165 PetscValidIntPointer(useAnchors, 2); 166 *useAnchors = mesh->useAnchors; 167 PetscFunctionReturn(0); 168 } 169 170 #undef __FUNCT__ 171 #define __FUNCT__ "DMPlexGetAdjacency_Cone_Internal" 172 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 173 { 174 const PetscInt *cone = NULL; 175 PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c; 176 PetscErrorCode ierr; 177 178 PetscFunctionBeginHot; 179 ierr = DMPlexGetConeSize(dm, p, &coneSize);CHKERRQ(ierr); 180 ierr = DMPlexGetCone(dm, p, &cone);CHKERRQ(ierr); 181 for (c = 0; c <= coneSize; ++c) { 182 const PetscInt point = !c ? p : cone[c-1]; 183 const PetscInt *support = NULL; 184 PetscInt supportSize, s, q; 185 186 ierr = DMPlexGetSupportSize(dm, point, &supportSize);CHKERRQ(ierr); 187 ierr = DMPlexGetSupport(dm, point, &support);CHKERRQ(ierr); 188 for (s = 0; s < supportSize; ++s) { 189 for (q = 0; q < numAdj || (adj[numAdj++] = support[s],0); ++q) { 190 if (support[s] == adj[q]) break; 191 } 192 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 193 } 194 } 195 *adjSize = numAdj; 196 PetscFunctionReturn(0); 197 } 198 199 #undef __FUNCT__ 200 #define __FUNCT__ "DMPlexGetAdjacency_Support_Internal" 201 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 202 { 203 const PetscInt *support = NULL; 204 PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s; 205 PetscErrorCode ierr; 206 207 PetscFunctionBeginHot; 208 ierr = DMPlexGetSupportSize(dm, p, &supportSize);CHKERRQ(ierr); 209 ierr = DMPlexGetSupport(dm, p, &support);CHKERRQ(ierr); 210 for (s = 0; s <= supportSize; ++s) { 211 const PetscInt point = !s ? p : support[s-1]; 212 const PetscInt *cone = NULL; 213 PetscInt coneSize, c, q; 214 215 ierr = DMPlexGetConeSize(dm, point, &coneSize);CHKERRQ(ierr); 216 ierr = DMPlexGetCone(dm, point, &cone);CHKERRQ(ierr); 217 for (c = 0; c < coneSize; ++c) { 218 for (q = 0; q < numAdj || (adj[numAdj++] = cone[c],0); ++q) { 219 if (cone[c] == adj[q]) break; 220 } 221 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 222 } 223 } 224 *adjSize = numAdj; 225 PetscFunctionReturn(0); 226 } 227 228 #undef __FUNCT__ 229 #define __FUNCT__ "DMPlexGetAdjacency_Transitive_Internal" 230 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[]) 231 { 232 PetscInt *star = NULL; 233 PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s; 234 PetscErrorCode ierr; 235 236 PetscFunctionBeginHot; 237 ierr = DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 238 for (s = 0; s < starSize*2; s += 2) { 239 const PetscInt *closure = NULL; 240 PetscInt closureSize, c, q; 241 242 ierr = DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 243 for (c = 0; c < closureSize*2; c += 2) { 244 for (q = 0; q < numAdj || (adj[numAdj++] = closure[c],0); ++q) { 245 if (closure[c] == adj[q]) break; 246 } 247 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 248 } 249 ierr = DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 250 } 251 ierr = DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 252 *adjSize = numAdj; 253 PetscFunctionReturn(0); 254 } 255 256 #undef __FUNCT__ 257 #define __FUNCT__ "DMPlexGetAdjacency_Internal" 258 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[]) 259 { 260 static PetscInt asiz = 0; 261 PetscInt maxAnchors = 1; 262 PetscInt aStart = -1, aEnd = -1; 263 PetscInt maxAdjSize; 264 PetscSection aSec = NULL; 265 IS aIS = NULL; 266 const PetscInt *anchors; 267 PetscErrorCode ierr; 268 269 PetscFunctionBeginHot; 270 if (useAnchors) { 271 ierr = DMPlexGetAnchors(dm,&aSec,&aIS);CHKERRQ(ierr); 272 if (aSec) { 273 ierr = PetscSectionGetMaxDof(aSec,&maxAnchors);CHKERRQ(ierr); 274 maxAnchors = PetscMax(1,maxAnchors); 275 ierr = PetscSectionGetChart(aSec,&aStart,&aEnd);CHKERRQ(ierr); 276 ierr = ISGetIndices(aIS,&anchors);CHKERRQ(ierr); 277 } 278 } 279 if (!*adj) { 280 PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd; 281 282 ierr = DMPlexGetChart(dm, &pStart,&pEnd);CHKERRQ(ierr); 283 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 284 ierr = DMPlexGetMaxSizes(dm, &maxC, &maxS);CHKERRQ(ierr); 285 coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1; 286 supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1; 287 asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries); 288 asiz *= maxAnchors; 289 asiz = PetscMin(asiz,pEnd-pStart); 290 ierr = PetscMalloc1(asiz,adj);CHKERRQ(ierr); 291 } 292 if (*adjSize < 0) *adjSize = asiz; 293 maxAdjSize = *adjSize; 294 if (useTransitiveClosure) { 295 ierr = DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);CHKERRQ(ierr); 296 } else if (useCone) { 297 ierr = DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 298 } else { 299 ierr = DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 300 } 301 if (useAnchors && aSec) { 302 PetscInt origSize = *adjSize; 303 PetscInt numAdj = origSize; 304 PetscInt i = 0, j; 305 PetscInt *orig = *adj; 306 307 while (i < origSize) { 308 PetscInt p = orig[i]; 309 PetscInt aDof = 0; 310 311 if (p >= aStart && p < aEnd) { 312 ierr = PetscSectionGetDof(aSec,p,&aDof);CHKERRQ(ierr); 313 } 314 if (aDof) { 315 PetscInt aOff; 316 PetscInt s, q; 317 318 for (j = i + 1; j < numAdj; j++) { 319 orig[j - 1] = orig[j]; 320 } 321 origSize--; 322 numAdj--; 323 ierr = PetscSectionGetOffset(aSec,p,&aOff);CHKERRQ(ierr); 324 for (s = 0; s < aDof; ++s) { 325 for (q = 0; q < numAdj || (orig[numAdj++] = anchors[aOff+s],0); ++q) { 326 if (anchors[aOff+s] == orig[q]) break; 327 } 328 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 329 } 330 } 331 else { 332 i++; 333 } 334 } 335 *adjSize = numAdj; 336 ierr = ISRestoreIndices(aIS,&anchors);CHKERRQ(ierr); 337 } 338 PetscFunctionReturn(0); 339 } 340 341 #undef __FUNCT__ 342 #define __FUNCT__ "DMPlexGetAdjacency" 343 /*@ 344 DMPlexGetAdjacency - Return all points adjacent to the given point 345 346 Input Parameters: 347 + dm - The DM object 348 . p - The point 349 . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE 350 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize 351 352 Output Parameters: 353 + adjSize - The number of adjacent points 354 - adj - The adjacent points 355 356 Level: advanced 357 358 Notes: The user must PetscFree the adj array if it was not passed in. 359 360 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator() 361 @*/ 362 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[]) 363 { 364 DM_Plex *mesh = (DM_Plex *) dm->data; 365 PetscErrorCode ierr; 366 367 PetscFunctionBeginHot; 368 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 369 PetscValidPointer(adjSize,3); 370 PetscValidPointer(adj,4); 371 ierr = DMPlexGetAdjacency_Internal(dm, p, mesh->useCone, mesh->useClosure, mesh->useAnchors, adjSize, adj);CHKERRQ(ierr); 372 PetscFunctionReturn(0); 373 } 374 #undef __FUNCT__ 375 #define __FUNCT__ "DMPlexCreateTwoSidedProcessSF" 376 /*@ 377 DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity 378 379 Collective on DM 380 381 Input Parameters: 382 + dm - The DM 383 - sfPoint - The PetscSF which encodes point connectivity 384 385 Output Parameters: 386 + processRanks - A list of process neighbors, or NULL 387 - sfProcess - An SF encoding the two-sided process connectivity, or NULL 388 389 Level: developer 390 391 .seealso: PetscSFCreate(), DMPlexCreateProcessSF() 392 @*/ 393 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess) 394 { 395 const PetscSFNode *remotePoints; 396 PetscInt *localPointsNew; 397 PetscSFNode *remotePointsNew; 398 const PetscInt *nranks; 399 PetscInt *ranksNew; 400 PetscBT neighbors; 401 PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n; 402 PetscMPIInt numProcs, proc, rank; 403 PetscErrorCode ierr; 404 405 PetscFunctionBegin; 406 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 407 PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2); 408 if (processRanks) {PetscValidPointer(processRanks, 3);} 409 if (sfProcess) {PetscValidPointer(sfProcess, 4);} 410 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &numProcs);CHKERRQ(ierr); 411 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 412 ierr = PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);CHKERRQ(ierr); 413 ierr = PetscBTCreate(numProcs, &neighbors);CHKERRQ(ierr); 414 ierr = PetscBTMemzero(numProcs, neighbors);CHKERRQ(ierr); 415 /* Compute root-to-leaf process connectivity */ 416 ierr = PetscSectionGetChart(rootRankSection, &pStart, &pEnd);CHKERRQ(ierr); 417 ierr = ISGetIndices(rootRanks, &nranks);CHKERRQ(ierr); 418 for (p = pStart; p < pEnd; ++p) { 419 PetscInt ndof, noff, n; 420 421 ierr = PetscSectionGetDof(rootRankSection, p, &ndof);CHKERRQ(ierr); 422 ierr = PetscSectionGetOffset(rootRankSection, p, &noff);CHKERRQ(ierr); 423 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 424 } 425 ierr = ISRestoreIndices(rootRanks, &nranks);CHKERRQ(ierr); 426 /* Compute leaf-to-neighbor process connectivity */ 427 ierr = PetscSectionGetChart(leafRankSection, &pStart, &pEnd);CHKERRQ(ierr); 428 ierr = ISGetIndices(leafRanks, &nranks);CHKERRQ(ierr); 429 for (p = pStart; p < pEnd; ++p) { 430 PetscInt ndof, noff, n; 431 432 ierr = PetscSectionGetDof(leafRankSection, p, &ndof);CHKERRQ(ierr); 433 ierr = PetscSectionGetOffset(leafRankSection, p, &noff);CHKERRQ(ierr); 434 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 435 } 436 ierr = ISRestoreIndices(leafRanks, &nranks);CHKERRQ(ierr); 437 /* Compute leaf-to-root process connectivity */ 438 for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);} 439 /* Calculate edges */ 440 PetscBTClear(neighbors, rank); 441 for(proc = 0, numNeighbors = 0; proc < numProcs; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;} 442 ierr = PetscMalloc1(numNeighbors, &ranksNew);CHKERRQ(ierr); 443 ierr = PetscMalloc1(numNeighbors, &localPointsNew);CHKERRQ(ierr); 444 ierr = PetscMalloc1(numNeighbors, &remotePointsNew);CHKERRQ(ierr); 445 for(proc = 0, n = 0; proc < numProcs; ++proc) { 446 if (PetscBTLookup(neighbors, proc)) { 447 ranksNew[n] = proc; 448 localPointsNew[n] = proc; 449 remotePointsNew[n].index = rank; 450 remotePointsNew[n].rank = proc; 451 ++n; 452 } 453 } 454 ierr = PetscBTDestroy(&neighbors);CHKERRQ(ierr); 455 if (processRanks) {ierr = ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);CHKERRQ(ierr);} 456 else {ierr = PetscFree(ranksNew);CHKERRQ(ierr);} 457 if (sfProcess) { 458 ierr = PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);CHKERRQ(ierr); 459 ierr = PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");CHKERRQ(ierr); 460 ierr = PetscSFSetFromOptions(*sfProcess);CHKERRQ(ierr); 461 ierr = PetscSFSetGraph(*sfProcess, numProcs, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);CHKERRQ(ierr); 462 } 463 PetscFunctionReturn(0); 464 } 465 466 #undef __FUNCT__ 467 #define __FUNCT__ "DMPlexDistributeOwnership" 468 /*@ 469 DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF. 470 471 Collective on DM 472 473 Input Parameter: 474 . dm - The DM 475 476 Output Parameters: 477 + rootSection - The number of leaves for a given root point 478 . rootrank - The rank of each edge into the root point 479 . leafSection - The number of processes sharing a given leaf point 480 - leafrank - The rank of each process sharing a leaf point 481 482 Level: developer 483 484 .seealso: DMPlexCreateOverlap() 485 @*/ 486 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank) 487 { 488 MPI_Comm comm; 489 PetscSF sfPoint; 490 const PetscInt *rootdegree; 491 PetscInt *myrank, *remoterank; 492 PetscInt pStart, pEnd, p, nedges; 493 PetscMPIInt rank; 494 PetscErrorCode ierr; 495 496 PetscFunctionBegin; 497 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 498 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 499 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 500 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 501 /* Compute number of leaves for each root */ 502 ierr = PetscObjectSetName((PetscObject) rootSection, "Root Section");CHKERRQ(ierr); 503 ierr = PetscSectionSetChart(rootSection, pStart, pEnd);CHKERRQ(ierr); 504 ierr = PetscSFComputeDegreeBegin(sfPoint, &rootdegree);CHKERRQ(ierr); 505 ierr = PetscSFComputeDegreeEnd(sfPoint, &rootdegree);CHKERRQ(ierr); 506 for (p = pStart; p < pEnd; ++p) {ierr = PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);CHKERRQ(ierr);} 507 ierr = PetscSectionSetUp(rootSection);CHKERRQ(ierr); 508 /* Gather rank of each leaf to root */ 509 ierr = PetscSectionGetStorageSize(rootSection, &nedges);CHKERRQ(ierr); 510 ierr = PetscMalloc1(pEnd-pStart, &myrank);CHKERRQ(ierr); 511 ierr = PetscMalloc1(nedges, &remoterank);CHKERRQ(ierr); 512 for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank; 513 ierr = PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 514 ierr = PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 515 ierr = PetscFree(myrank);CHKERRQ(ierr); 516 ierr = ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);CHKERRQ(ierr); 517 /* Distribute remote ranks to leaves */ 518 ierr = PetscObjectSetName((PetscObject) leafSection, "Leaf Section");CHKERRQ(ierr); 519 ierr = DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);CHKERRQ(ierr); 520 PetscFunctionReturn(0); 521 } 522 523 #undef __FUNCT__ 524 #define __FUNCT__ "DMPlexCreateOverlap" 525 /*@C 526 DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF. 527 528 Collective on DM 529 530 Input Parameters: 531 + dm - The DM 532 . levels - Number of overlap levels 533 . rootSection - The number of leaves for a given root point 534 . rootrank - The rank of each edge into the root point 535 . leafSection - The number of processes sharing a given leaf point 536 - leafrank - The rank of each process sharing a leaf point 537 538 Output Parameters: 539 + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings 540 541 Level: developer 542 543 .seealso: DMPlexDistributeOwnership(), DMPlexDistribute() 544 @*/ 545 PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) 546 { 547 MPI_Comm comm; 548 DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */ 549 PetscSF sfPoint, sfProc; 550 const PetscSFNode *remote; 551 const PetscInt *local; 552 const PetscInt *nrank, *rrank; 553 PetscInt *adj = NULL; 554 PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l; 555 PetscMPIInt rank, numProcs; 556 PetscBool useCone, useClosure, flg; 557 PetscErrorCode ierr; 558 559 PetscFunctionBegin; 560 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 561 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 562 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 563 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 564 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 565 ierr = PetscSectionGetChart(leafSection, &sStart, &sEnd);CHKERRQ(ierr); 566 ierr = PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);CHKERRQ(ierr); 567 ierr = DMLabelCreate("Overlap adjacency", &ovAdjByRank);CHKERRQ(ierr); 568 /* Handle leaves: shared with the root point */ 569 for (l = 0; l < nleaves; ++l) { 570 PetscInt adjSize = PETSC_DETERMINE, a; 571 572 ierr = DMPlexGetAdjacency(dm, local[l], &adjSize, &adj);CHKERRQ(ierr); 573 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);CHKERRQ(ierr);} 574 } 575 ierr = ISGetIndices(rootrank, &rrank);CHKERRQ(ierr); 576 ierr = ISGetIndices(leafrank, &nrank);CHKERRQ(ierr); 577 /* Handle roots */ 578 for (p = pStart; p < pEnd; ++p) { 579 PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a; 580 581 if ((p >= sStart) && (p < sEnd)) { 582 /* Some leaves share a root with other leaves on different processes */ 583 ierr = PetscSectionGetDof(leafSection, p, &neighbors);CHKERRQ(ierr); 584 if (neighbors) { 585 ierr = PetscSectionGetOffset(leafSection, p, &noff);CHKERRQ(ierr); 586 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 587 for (n = 0; n < neighbors; ++n) { 588 const PetscInt remoteRank = nrank[noff+n]; 589 590 if (remoteRank == rank) continue; 591 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 592 } 593 } 594 } 595 /* Roots are shared with leaves */ 596 ierr = PetscSectionGetDof(rootSection, p, &neighbors);CHKERRQ(ierr); 597 if (!neighbors) continue; 598 ierr = PetscSectionGetOffset(rootSection, p, &noff);CHKERRQ(ierr); 599 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 600 for (n = 0; n < neighbors; ++n) { 601 const PetscInt remoteRank = rrank[noff+n]; 602 603 if (remoteRank == rank) continue; 604 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 605 } 606 } 607 ierr = PetscFree(adj);CHKERRQ(ierr); 608 ierr = ISRestoreIndices(rootrank, &rrank);CHKERRQ(ierr); 609 ierr = ISRestoreIndices(leafrank, &nrank);CHKERRQ(ierr); 610 /* Add additional overlap levels */ 611 for (l = 1; l < levels; l++) {ierr = DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);CHKERRQ(ierr);} 612 /* We require the closure in the overlap */ 613 ierr = DMPlexGetAdjacencyUseCone(dm, &useCone);CHKERRQ(ierr); 614 ierr = DMPlexGetAdjacencyUseClosure(dm, &useClosure);CHKERRQ(ierr); 615 if (useCone || !useClosure) { 616 ierr = DMPlexPartitionLabelClosure(dm, ovAdjByRank);CHKERRQ(ierr); 617 } 618 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-overlap_view", &flg);CHKERRQ(ierr); 619 if (flg) { 620 ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD, PETSC_TRUE);CHKERRQ(ierr); 621 ierr = DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 622 } 623 /* Make process SF and invert sender to receiver label */ 624 ierr = DMPlexCreateTwoSidedProcessSF(dm, sfPoint, rootSection, rootrank, leafSection, leafrank, NULL, &sfProc);CHKERRQ(ierr); 625 ierr = DMLabelCreate("Overlap label", ovLabel);CHKERRQ(ierr); 626 ierr = DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);CHKERRQ(ierr); 627 /* Add owned points, except for shared local points */ 628 for (p = pStart; p < pEnd; ++p) {ierr = DMLabelSetValue(*ovLabel, p, rank);CHKERRQ(ierr);} 629 for (l = 0; l < nleaves; ++l) { 630 ierr = DMLabelClearValue(*ovLabel, local[l], rank);CHKERRQ(ierr); 631 ierr = DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);CHKERRQ(ierr); 632 } 633 /* Clean up */ 634 ierr = DMLabelDestroy(&ovAdjByRank);CHKERRQ(ierr); 635 ierr = PetscSFDestroy(&sfProc);CHKERRQ(ierr); 636 PetscFunctionReturn(0); 637 } 638 639 #undef __FUNCT__ 640 #define __FUNCT__ "DMPlexCreateOverlapMigrationSF" 641 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF) 642 { 643 MPI_Comm comm; 644 PetscMPIInt rank, numProcs; 645 PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints; 646 PetscInt *pointDepths, *remoteDepths, *ilocal; 647 PetscInt *depthRecv, *depthShift, *depthIdx; 648 PetscSFNode *iremote; 649 PetscSF pointSF; 650 const PetscInt *sharedLocal; 651 const PetscSFNode *overlapRemote, *sharedRemote; 652 PetscErrorCode ierr; 653 654 PetscFunctionBegin; 655 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 656 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 657 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 658 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 659 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 660 661 /* Before building the migration SF we need to know the new stratum offsets */ 662 ierr = PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);CHKERRQ(ierr); 663 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 664 for (d=0; d<dim+1; d++) { 665 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 666 for (p=pStart; p<pEnd; p++) pointDepths[p] = d; 667 } 668 for (p=0; p<nleaves; p++) remoteDepths[p] = -1; 669 ierr = PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 670 ierr = PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 671 672 /* Count recevied points in each stratum and compute the internal strata shift */ 673 ierr = PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);CHKERRQ(ierr); 674 for (d=0; d<dim+1; d++) depthRecv[d]=0; 675 for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++; 676 depthShift[dim] = 0; 677 for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim]; 678 for (d=1; d<dim; d++) depthShift[d] += depthRecv[0]; 679 for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1]; 680 for (d=0; d<dim+1; d++) { 681 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 682 depthIdx[d] = pStart + depthShift[d]; 683 } 684 685 /* Form the overlap SF build an SF that describes the full overlap migration SF */ 686 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 687 newLeaves = pEnd - pStart + nleaves; 688 ierr = PetscMalloc1(newLeaves, &ilocal);CHKERRQ(ierr); 689 ierr = PetscMalloc1(newLeaves, &iremote);CHKERRQ(ierr); 690 /* First map local points to themselves */ 691 for (d=0; d<dim+1; d++) { 692 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 693 for (p=pStart; p<pEnd; p++) { 694 point = p + depthShift[d]; 695 ilocal[point] = point; 696 iremote[point].index = p; 697 iremote[point].rank = rank; 698 depthIdx[d]++; 699 } 700 } 701 702 /* Add in the remote roots for currently shared points */ 703 ierr = DMGetPointSF(dm, &pointSF);CHKERRQ(ierr); 704 ierr = PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);CHKERRQ(ierr); 705 for (d=0; d<dim+1; d++) { 706 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 707 for (p=0; p<numSharedPoints; p++) { 708 if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) { 709 point = sharedLocal[p] + depthShift[d]; 710 iremote[point].index = sharedRemote[p].index; 711 iremote[point].rank = sharedRemote[p].rank; 712 } 713 } 714 } 715 716 /* Now add the incoming overlap points */ 717 for (p=0; p<nleaves; p++) { 718 point = depthIdx[remoteDepths[p]]; 719 ilocal[point] = point; 720 iremote[point].index = overlapRemote[p].index; 721 iremote[point].rank = overlapRemote[p].rank; 722 depthIdx[remoteDepths[p]]++; 723 } 724 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 725 726 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 727 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");CHKERRQ(ierr); 728 ierr = PetscSFSetFromOptions(*migrationSF);CHKERRQ(ierr); 729 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 730 ierr = PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);CHKERRQ(ierr); 731 732 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 733 PetscFunctionReturn(0); 734 } 735 736 #undef __FUNCT__ 737 #define __FUNCT__ "DMPlexStratifyMigrationSF" 738 /*@ 739 DMPlexStratifyMigrationSF - Add partition overlap to a distributed non-overlapping DM. 740 741 Input Parameter: 742 + dm - The DM 743 - sf - A star forest with non-ordered leaves, usually defining a DM point migration 744 745 Output Parameter: 746 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified 747 748 Level: developer 749 750 .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap() 751 @*/ 752 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF) 753 { 754 MPI_Comm comm; 755 PetscMPIInt rank, numProcs; 756 PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves; 757 PetscInt *pointDepths, *remoteDepths, *ilocal; 758 PetscInt *depthRecv, *depthShift, *depthIdx; 759 const PetscSFNode *iremote; 760 PetscErrorCode ierr; 761 762 PetscFunctionBegin; 763 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 764 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 765 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 766 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 767 ierr = DMPlexGetDepth(dm, &ldepth);CHKERRQ(ierr); 768 ierr = MPI_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 769 if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth); 770 771 /* Before building the migration SF we need to know the new stratum offsets */ 772 ierr = PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);CHKERRQ(ierr); 773 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 774 for (d = 0; d < depth+1; ++d) { 775 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 776 for (p = pStart; p < pEnd; ++p) pointDepths[p] = d; 777 } 778 for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1; 779 ierr = PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 780 ierr = PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 781 /* Count recevied points in each stratum and compute the internal strata shift */ 782 ierr = PetscMalloc3(depth+1, &depthRecv, depth+1, &depthShift, depth+1, &depthIdx);CHKERRQ(ierr); 783 for (d = 0; d < depth+1; ++d) depthRecv[d] = 0; 784 for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++; 785 depthShift[depth] = 0; 786 for (d = 0; d < depth; ++d) depthShift[d] = depthRecv[depth]; 787 for (d = 1; d < depth; ++d) depthShift[d] += depthRecv[0]; 788 for (d = depth-2; d > 0; --d) depthShift[d] += depthRecv[d+1]; 789 for (d = 0; d < depth+1; ++d) {depthIdx[d] = 0;} 790 /* Derive a new local permutation based on stratified indices */ 791 ierr = PetscMalloc1(nleaves, &ilocal);CHKERRQ(ierr); 792 for (p = 0; p < nleaves; ++p) { 793 const PetscInt dep = remoteDepths[p]; 794 795 ilocal[p] = depthShift[dep] + depthIdx[dep]; 796 depthIdx[dep]++; 797 } 798 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 799 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");CHKERRQ(ierr); 800 ierr = PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);CHKERRQ(ierr); 801 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 802 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 803 PetscFunctionReturn(0); 804 } 805 806 #undef __FUNCT__ 807 #define __FUNCT__ "DMPlexDistributeField" 808 /*@ 809 DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 810 811 Collective on DM 812 813 Input Parameters: 814 + dm - The DMPlex object 815 . pointSF - The PetscSF describing the communication pattern 816 . originalSection - The PetscSection for existing data layout 817 - originalVec - The existing data 818 819 Output Parameters: 820 + newSection - The PetscSF describing the new data layout 821 - newVec - The new data 822 823 Level: developer 824 825 .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData() 826 @*/ 827 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec) 828 { 829 PetscSF fieldSF; 830 PetscInt *remoteOffsets, fieldSize; 831 PetscScalar *originalValues, *newValues; 832 PetscErrorCode ierr; 833 834 PetscFunctionBegin; 835 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 836 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 837 838 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 839 ierr = VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);CHKERRQ(ierr); 840 ierr = VecSetType(newVec,dm->vectype);CHKERRQ(ierr); 841 842 ierr = VecGetArray(originalVec, &originalValues);CHKERRQ(ierr); 843 ierr = VecGetArray(newVec, &newValues);CHKERRQ(ierr); 844 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 845 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 846 ierr = PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 847 ierr = PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 848 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 849 ierr = VecRestoreArray(newVec, &newValues);CHKERRQ(ierr); 850 ierr = VecRestoreArray(originalVec, &originalValues);CHKERRQ(ierr); 851 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 852 PetscFunctionReturn(0); 853 } 854 855 #undef __FUNCT__ 856 #define __FUNCT__ "DMPlexDistributeFieldIS" 857 /*@ 858 DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 859 860 Collective on DM 861 862 Input Parameters: 863 + dm - The DMPlex object 864 . pointSF - The PetscSF describing the communication pattern 865 . originalSection - The PetscSection for existing data layout 866 - originalIS - The existing data 867 868 Output Parameters: 869 + newSection - The PetscSF describing the new data layout 870 - newIS - The new data 871 872 Level: developer 873 874 .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData() 875 @*/ 876 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS) 877 { 878 PetscSF fieldSF; 879 PetscInt *newValues, *remoteOffsets, fieldSize; 880 const PetscInt *originalValues; 881 PetscErrorCode ierr; 882 883 PetscFunctionBegin; 884 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 885 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 886 887 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 888 ierr = PetscMalloc1(fieldSize, &newValues);CHKERRQ(ierr); 889 890 ierr = ISGetIndices(originalIS, &originalValues);CHKERRQ(ierr); 891 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 892 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 893 ierr = PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 894 ierr = PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 895 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 896 ierr = ISRestoreIndices(originalIS, &originalValues);CHKERRQ(ierr); 897 ierr = ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);CHKERRQ(ierr); 898 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 899 PetscFunctionReturn(0); 900 } 901 902 #undef __FUNCT__ 903 #define __FUNCT__ "DMPlexDistributeData" 904 /*@ 905 DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 906 907 Collective on DM 908 909 Input Parameters: 910 + dm - The DMPlex object 911 . pointSF - The PetscSF describing the communication pattern 912 . originalSection - The PetscSection for existing data layout 913 . datatype - The type of data 914 - originalData - The existing data 915 916 Output Parameters: 917 + newSection - The PetscSection describing the new data layout 918 - newData - The new data 919 920 Level: developer 921 922 .seealso: DMPlexDistribute(), DMPlexDistributeField() 923 @*/ 924 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData) 925 { 926 PetscSF fieldSF; 927 PetscInt *remoteOffsets, fieldSize; 928 PetscMPIInt dataSize; 929 PetscErrorCode ierr; 930 931 PetscFunctionBegin; 932 ierr = PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 933 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 934 935 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 936 ierr = MPI_Type_size(datatype, &dataSize);CHKERRQ(ierr); 937 ierr = PetscMalloc(fieldSize * dataSize, newData);CHKERRQ(ierr); 938 939 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 940 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 941 ierr = PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 942 ierr = PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 943 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 944 ierr = PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 945 PetscFunctionReturn(0); 946 } 947 948 #undef __FUNCT__ 949 #define __FUNCT__ "DMPlexDistributeCones" 950 PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 951 { 952 DM_Plex *mesh = (DM_Plex*) dm->data; 953 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 954 MPI_Comm comm; 955 PetscSF coneSF; 956 PetscSection originalConeSection, newConeSection; 957 PetscInt *remoteOffsets, *cones, *newCones, newConesSize; 958 PetscBool flg; 959 PetscErrorCode ierr; 960 961 PetscFunctionBegin; 962 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 963 PetscValidPointer(dmParallel,4); 964 ierr = PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 965 966 /* Distribute cone section */ 967 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 968 ierr = DMPlexGetConeSection(dm, &originalConeSection);CHKERRQ(ierr); 969 ierr = DMPlexGetConeSection(dmParallel, &newConeSection);CHKERRQ(ierr); 970 ierr = PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);CHKERRQ(ierr); 971 ierr = DMSetUp(dmParallel);CHKERRQ(ierr); 972 { 973 PetscInt pStart, pEnd, p; 974 975 ierr = PetscSectionGetChart(newConeSection, &pStart, &pEnd);CHKERRQ(ierr); 976 for (p = pStart; p < pEnd; ++p) { 977 PetscInt coneSize; 978 ierr = PetscSectionGetDof(newConeSection, p, &coneSize);CHKERRQ(ierr); 979 pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize); 980 } 981 } 982 /* Communicate and renumber cones */ 983 ierr = PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);CHKERRQ(ierr); 984 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 985 ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); 986 ierr = DMPlexGetCones(dmParallel, &newCones);CHKERRQ(ierr); 987 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 988 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 989 ierr = PetscSectionGetStorageSize(newConeSection, &newConesSize);CHKERRQ(ierr); 990 ierr = ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);CHKERRQ(ierr); 991 #if PETSC_USE_DEBUG 992 { 993 PetscInt p; 994 PetscBool valid = PETSC_TRUE; 995 for (p = 0; p < newConesSize; ++p) { 996 if (newCones[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);CHKERRQ(ierr);} 997 } 998 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 999 } 1000 #endif 1001 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-cones_view", &flg);CHKERRQ(ierr); 1002 if (flg) { 1003 ierr = PetscPrintf(comm, "Serial Cone Section:\n");CHKERRQ(ierr); 1004 ierr = PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1005 ierr = PetscPrintf(comm, "Parallel Cone Section:\n");CHKERRQ(ierr); 1006 ierr = PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1007 ierr = PetscSFView(coneSF, NULL);CHKERRQ(ierr); 1008 } 1009 ierr = DMPlexGetConeOrientations(dm, &cones);CHKERRQ(ierr); 1010 ierr = DMPlexGetConeOrientations(dmParallel, &newCones);CHKERRQ(ierr); 1011 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 1012 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 1013 ierr = PetscSFDestroy(&coneSF);CHKERRQ(ierr); 1014 ierr = PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 1015 /* Create supports and stratify sieve */ 1016 { 1017 PetscInt pStart, pEnd; 1018 1019 ierr = PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);CHKERRQ(ierr); 1020 ierr = PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);CHKERRQ(ierr); 1021 } 1022 ierr = DMPlexSymmetrize(dmParallel);CHKERRQ(ierr); 1023 ierr = DMPlexStratify(dmParallel);CHKERRQ(ierr); 1024 pmesh->useCone = mesh->useCone; 1025 pmesh->useClosure = mesh->useClosure; 1026 PetscFunctionReturn(0); 1027 } 1028 1029 #undef __FUNCT__ 1030 #define __FUNCT__ "DMPlexDistributeCoordinates" 1031 PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel) 1032 { 1033 MPI_Comm comm; 1034 PetscSection originalCoordSection, newCoordSection; 1035 Vec originalCoordinates, newCoordinates; 1036 PetscInt bs; 1037 const char *name; 1038 const PetscReal *maxCell, *L; 1039 const DMBoundaryType *bd; 1040 PetscErrorCode ierr; 1041 1042 PetscFunctionBegin; 1043 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1044 PetscValidPointer(dmParallel, 3); 1045 1046 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1047 ierr = DMGetCoordinateSection(dm, &originalCoordSection);CHKERRQ(ierr); 1048 ierr = DMGetCoordinateSection(dmParallel, &newCoordSection);CHKERRQ(ierr); 1049 ierr = DMGetCoordinatesLocal(dm, &originalCoordinates);CHKERRQ(ierr); 1050 if (originalCoordinates) { 1051 ierr = VecCreate(comm, &newCoordinates);CHKERRQ(ierr); 1052 ierr = PetscObjectGetName((PetscObject) originalCoordinates, &name);CHKERRQ(ierr); 1053 ierr = PetscObjectSetName((PetscObject) newCoordinates, name);CHKERRQ(ierr); 1054 1055 ierr = DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);CHKERRQ(ierr); 1056 ierr = DMSetCoordinatesLocal(dmParallel, newCoordinates);CHKERRQ(ierr); 1057 ierr = VecGetBlockSize(originalCoordinates, &bs);CHKERRQ(ierr); 1058 ierr = VecSetBlockSize(newCoordinates, bs);CHKERRQ(ierr); 1059 ierr = VecDestroy(&newCoordinates);CHKERRQ(ierr); 1060 } 1061 ierr = DMGetPeriodicity(dm, &maxCell, &L, &bd);CHKERRQ(ierr); 1062 if (L) {ierr = DMSetPeriodicity(dmParallel, maxCell, L, bd);CHKERRQ(ierr);} 1063 PetscFunctionReturn(0); 1064 } 1065 1066 #undef __FUNCT__ 1067 #define __FUNCT__ "DMPlexDistributeLabels" 1068 /* Here we are assuming that process 0 always has everything */ 1069 PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel) 1070 { 1071 MPI_Comm comm; 1072 PetscMPIInt rank; 1073 PetscInt numLabels, numLocalLabels, l; 1074 PetscBool hasLabels = PETSC_FALSE; 1075 PetscErrorCode ierr; 1076 1077 PetscFunctionBegin; 1078 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1079 PetscValidHeaderSpecific(dm, DM_CLASSID, 3); 1080 ierr = PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1081 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1082 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1083 1084 /* Everyone must have either the same number of labels, or none */ 1085 ierr = DMPlexGetNumLabels(dm, &numLocalLabels);CHKERRQ(ierr); 1086 numLabels = numLocalLabels; 1087 ierr = MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);CHKERRQ(ierr); 1088 if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE; 1089 for (l = numLabels-1; l >= 0; --l) { 1090 DMLabel label = NULL, labelNew = NULL; 1091 PetscBool isdepth; 1092 1093 if (hasLabels) { 1094 ierr = DMPlexGetLabelByNum(dm, l, &label);CHKERRQ(ierr); 1095 /* Skip "depth" because it is recreated */ 1096 ierr = PetscStrcmp(label->name, "depth", &isdepth);CHKERRQ(ierr); 1097 } 1098 ierr = MPI_Bcast(&isdepth, 1, MPIU_BOOL, 0, comm);CHKERRQ(ierr); 1099 if (isdepth) continue; 1100 ierr = DMLabelDistribute(label, migrationSF, &labelNew);CHKERRQ(ierr); 1101 ierr = DMPlexAddLabel(dmParallel, labelNew);CHKERRQ(ierr); 1102 } 1103 ierr = PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1104 PetscFunctionReturn(0); 1105 } 1106 1107 #undef __FUNCT__ 1108 #define __FUNCT__ "DMPlexDistributeSetupHybrid" 1109 PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 1110 { 1111 DM_Plex *mesh = (DM_Plex*) dm->data; 1112 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1113 MPI_Comm comm; 1114 const PetscInt *gpoints; 1115 PetscInt dim, depth, n, d; 1116 PetscErrorCode ierr; 1117 1118 PetscFunctionBegin; 1119 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1120 PetscValidPointer(dmParallel, 4); 1121 1122 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1123 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1124 1125 /* Setup hybrid structure */ 1126 for (d = 0; d <= dim; ++d) {pmesh->hybridPointMax[d] = mesh->hybridPointMax[d];} 1127 ierr = MPI_Bcast(pmesh->hybridPointMax, dim+1, MPIU_INT, 0, comm);CHKERRQ(ierr); 1128 ierr = ISLocalToGlobalMappingGetSize(renumbering, &n);CHKERRQ(ierr); 1129 ierr = ISLocalToGlobalMappingGetIndices(renumbering, &gpoints);CHKERRQ(ierr); 1130 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1131 for (d = 0; d <= dim; ++d) { 1132 PetscInt pmax = pmesh->hybridPointMax[d], newmax = 0, pEnd, stratum[2], p; 1133 1134 if (pmax < 0) continue; 1135 ierr = DMPlexGetDepthStratum(dm, d > depth ? depth : d, &stratum[0], &stratum[1]);CHKERRQ(ierr); 1136 ierr = DMPlexGetDepthStratum(dmParallel, d, NULL, &pEnd);CHKERRQ(ierr); 1137 ierr = MPI_Bcast(stratum, 2, MPIU_INT, 0, comm);CHKERRQ(ierr); 1138 for (p = 0; p < n; ++p) { 1139 const PetscInt point = gpoints[p]; 1140 1141 if ((point >= stratum[0]) && (point < stratum[1]) && (point >= pmax)) ++newmax; 1142 } 1143 if (newmax > 0) pmesh->hybridPointMax[d] = pEnd - newmax; 1144 else pmesh->hybridPointMax[d] = -1; 1145 } 1146 ierr = ISLocalToGlobalMappingRestoreIndices(renumbering, &gpoints);CHKERRQ(ierr); 1147 PetscFunctionReturn(0); 1148 } 1149 1150 #undef __FUNCT__ 1151 #define __FUNCT__ "DMPlexDistributeSetupTree" 1152 PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 1153 { 1154 DM_Plex *mesh = (DM_Plex*) dm->data; 1155 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1156 MPI_Comm comm; 1157 DM refTree; 1158 PetscSection origParentSection, newParentSection; 1159 PetscInt *origParents, *origChildIDs; 1160 PetscBool flg; 1161 PetscErrorCode ierr; 1162 1163 PetscFunctionBegin; 1164 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1165 PetscValidHeaderSpecific(dm, DM_CLASSID, 4); 1166 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1167 1168 /* Set up tree */ 1169 ierr = DMPlexGetReferenceTree(dm,&refTree);CHKERRQ(ierr); 1170 ierr = DMPlexSetReferenceTree(dmParallel,refTree);CHKERRQ(ierr); 1171 ierr = DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);CHKERRQ(ierr); 1172 if (origParentSection) { 1173 PetscInt pStart, pEnd; 1174 PetscInt *newParents, *newChildIDs; 1175 PetscInt *remoteOffsetsParents, newParentSize; 1176 PetscSF parentSF; 1177 1178 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1179 ierr = PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);CHKERRQ(ierr); 1180 ierr = PetscSectionSetChart(newParentSection,pStart,pEnd);CHKERRQ(ierr); 1181 ierr = PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);CHKERRQ(ierr); 1182 ierr = PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);CHKERRQ(ierr); 1183 ierr = PetscFree(remoteOffsetsParents);CHKERRQ(ierr); 1184 ierr = PetscSectionGetStorageSize(newParentSection,&newParentSize);CHKERRQ(ierr); 1185 ierr = PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);CHKERRQ(ierr); 1186 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, origParents, newParents);CHKERRQ(ierr); 1187 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, origParents, newParents);CHKERRQ(ierr); 1188 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1189 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1190 ierr = ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);CHKERRQ(ierr); 1191 #if PETSC_USE_DEBUG 1192 { 1193 PetscInt p; 1194 PetscBool valid = PETSC_TRUE; 1195 for (p = 0; p < newParentSize; ++p) { 1196 if (newParents[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);CHKERRQ(ierr);} 1197 } 1198 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1199 } 1200 #endif 1201 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-parents_view", &flg);CHKERRQ(ierr); 1202 if (flg) { 1203 ierr = PetscPrintf(comm, "Serial Parent Section: \n");CHKERRQ(ierr); 1204 ierr = PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1205 ierr = PetscPrintf(comm, "Parallel Parent Section: \n");CHKERRQ(ierr); 1206 ierr = PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1207 ierr = PetscSFView(parentSF, NULL);CHKERRQ(ierr); 1208 } 1209 ierr = DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);CHKERRQ(ierr); 1210 ierr = PetscSectionDestroy(&newParentSection);CHKERRQ(ierr); 1211 ierr = PetscFree2(newParents,newChildIDs);CHKERRQ(ierr); 1212 ierr = PetscSFDestroy(&parentSF);CHKERRQ(ierr); 1213 } 1214 pmesh->useAnchors = mesh->useAnchors; 1215 PetscFunctionReturn(0); 1216 } 1217 1218 #undef __FUNCT__ 1219 #define __FUNCT__ "DMPlexDistributeSF" 1220 PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel) 1221 { 1222 DM_Plex *mesh = (DM_Plex*) dm->data; 1223 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1224 PetscMPIInt rank, numProcs; 1225 MPI_Comm comm; 1226 PetscErrorCode ierr; 1227 1228 PetscFunctionBegin; 1229 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1230 PetscValidPointer(dmParallel,7); 1231 1232 /* Create point SF for parallel mesh */ 1233 ierr = PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1234 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1235 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1236 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 1237 { 1238 const PetscInt *leaves; 1239 PetscSFNode *remotePoints, *rowners, *lowners; 1240 PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints; 1241 PetscInt pStart, pEnd; 1242 1243 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1244 ierr = PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);CHKERRQ(ierr); 1245 ierr = PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);CHKERRQ(ierr); 1246 for (p=0; p<numRoots; p++) { 1247 rowners[p].rank = -1; 1248 rowners[p].index = -1; 1249 } 1250 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1251 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1252 for (p = 0; p < numLeaves; ++p) { 1253 if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */ 1254 lowners[p].rank = rank; 1255 lowners[p].index = leaves ? leaves[p] : p; 1256 } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */ 1257 lowners[p].rank = -2; 1258 lowners[p].index = -2; 1259 } 1260 } 1261 for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */ 1262 rowners[p].rank = -3; 1263 rowners[p].index = -3; 1264 } 1265 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1266 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1267 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1268 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1269 for (p = 0; p < numLeaves; ++p) { 1270 if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed"); 1271 if (lowners[p].rank != rank) ++numGhostPoints; 1272 } 1273 ierr = PetscMalloc1(numGhostPoints, &ghostPoints);CHKERRQ(ierr); 1274 ierr = PetscMalloc1(numGhostPoints, &remotePoints);CHKERRQ(ierr); 1275 for (p = 0, gp = 0; p < numLeaves; ++p) { 1276 if (lowners[p].rank != rank) { 1277 ghostPoints[gp] = leaves ? leaves[p] : p; 1278 remotePoints[gp].rank = lowners[p].rank; 1279 remotePoints[gp].index = lowners[p].index; 1280 ++gp; 1281 } 1282 } 1283 ierr = PetscFree2(rowners,lowners);CHKERRQ(ierr); 1284 ierr = PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);CHKERRQ(ierr); 1285 ierr = PetscSFSetFromOptions((dmParallel)->sf);CHKERRQ(ierr); 1286 } 1287 pmesh->useCone = mesh->useCone; 1288 pmesh->useClosure = mesh->useClosure; 1289 ierr = PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1290 PetscFunctionReturn(0); 1291 } 1292 1293 #undef __FUNCT__ 1294 #define __FUNCT__ "DMPlexCreatePointSF" 1295 /*@C 1296 DMPlexDerivePointSF - Build a point SF from an SF describing a point migration 1297 1298 Input Parameter: 1299 + dm - The source DMPlex object 1300 . migrationSF - The star forest that describes the parallel point remapping 1301 . ownership - Flag causing a vote to determine point ownership 1302 1303 Output Parameter: 1304 - pointSF - The star forest describing the point overlap in the remapped DM 1305 1306 Level: developer 1307 1308 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1309 @*/ 1310 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF) 1311 { 1312 PetscMPIInt rank; 1313 PetscInt p, nroots, nleaves, idx, npointLeaves; 1314 PetscInt *pointLocal; 1315 const PetscInt *leaves; 1316 const PetscSFNode *roots; 1317 PetscSFNode *rootNodes, *leafNodes, *pointRemote; 1318 PetscErrorCode ierr; 1319 1320 PetscFunctionBegin; 1321 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1322 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 1323 1324 ierr = PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);CHKERRQ(ierr); 1325 ierr = PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);CHKERRQ(ierr); 1326 if (ownership) { 1327 /* Point ownership vote: Process with highest rank ownes shared points */ 1328 for (p = 0; p < nleaves; ++p) { 1329 /* Either put in a bid or we know we own it */ 1330 leafNodes[p].rank = rank; 1331 leafNodes[p].index = p; 1332 } 1333 for (p = 0; p < nroots; p++) { 1334 /* Root must not participate in the reduction, flag so that MAXLOC does not use */ 1335 rootNodes[p].rank = -3; 1336 rootNodes[p].index = -3; 1337 } 1338 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1339 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1340 } else { 1341 for (p = 0; p < nroots; p++) { 1342 rootNodes[p].index = -1; 1343 rootNodes[p].rank = rank; 1344 }; 1345 for (p = 0; p < nleaves; p++) { 1346 /* Write new local id into old location */ 1347 if (roots[p].rank == rank) { 1348 rootNodes[roots[p].index].index = leaves[p]; 1349 } 1350 } 1351 } 1352 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1353 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1354 1355 for (npointLeaves = 0, p = 0; p < nleaves; p++) {if (leafNodes[p].rank != rank) npointLeaves++;} 1356 ierr = PetscMalloc1(npointLeaves, &pointLocal);CHKERRQ(ierr); 1357 ierr = PetscMalloc1(npointLeaves, &pointRemote);CHKERRQ(ierr); 1358 for (idx = 0, p = 0; p < nleaves; p++) { 1359 if (leafNodes[p].rank != rank) { 1360 pointLocal[idx] = p; 1361 pointRemote[idx] = leafNodes[p]; 1362 idx++; 1363 } 1364 } 1365 ierr = PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);CHKERRQ(ierr); 1366 ierr = PetscSFSetFromOptions(*pointSF);CHKERRQ(ierr); 1367 ierr = PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1368 ierr = PetscFree2(rootNodes, leafNodes);CHKERRQ(ierr); 1369 PetscFunctionReturn(0); 1370 } 1371 1372 #undef __FUNCT__ 1373 #define __FUNCT__ "DMPlexMigrate" 1374 /*@C 1375 DMPlexMigrate - Migrates internal DM data over the supplied star forest 1376 1377 Input Parameter: 1378 + dm - The source DMPlex object 1379 . sf - The star forest communication context describing the migration pattern 1380 1381 Output Parameter: 1382 - targetDM - The target DMPlex object 1383 1384 Level: intermediate 1385 1386 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1387 @*/ 1388 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM) 1389 { 1390 MPI_Comm comm; 1391 PetscInt dim, nroots; 1392 PetscSF sfPoint; 1393 ISLocalToGlobalMapping ltogMigration; 1394 PetscBool flg; 1395 PetscErrorCode ierr; 1396 1397 PetscFunctionBegin; 1398 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1399 ierr = PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1400 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 1401 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1402 ierr = DMSetDimension(targetDM, dim);CHKERRQ(ierr); 1403 1404 /* Check for a one-to-all distribution pattern */ 1405 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 1406 ierr = PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);CHKERRQ(ierr); 1407 if (nroots >= 0) { 1408 IS isOriginal; 1409 ISLocalToGlobalMapping ltogOriginal; 1410 PetscInt n, size, nleaves, conesSize; 1411 PetscInt *numbering_orig, *numbering_new, *cones; 1412 PetscSection coneSection; 1413 /* Get the original point numbering */ 1414 ierr = DMPlexCreatePointNumbering(dm, &isOriginal);CHKERRQ(ierr); 1415 ierr = ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);CHKERRQ(ierr); 1416 ierr = ISLocalToGlobalMappingGetSize(ltogOriginal, &size);CHKERRQ(ierr); 1417 ierr = ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1418 /* Convert to positive global numbers */ 1419 for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);} 1420 /* Derive the new local-to-global mapping from the old one */ 1421 ierr = PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1422 ierr = PetscMalloc1(nleaves, &numbering_new);CHKERRQ(ierr); 1423 ierr = PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1424 ierr = PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1425 ierr = ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);CHKERRQ(ierr); 1426 ierr = ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1427 /* Convert cones to global numbering before migrating them */ 1428 ierr = DMPlexGetConeSection(dm, &coneSection);CHKERRQ(ierr); 1429 ierr = PetscSectionGetStorageSize(coneSection, &conesSize);CHKERRQ(ierr); 1430 ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); 1431 ierr = ISLocalToGlobalMappingApplyBlock(ltogOriginal, conesSize, cones, cones);CHKERRQ(ierr); 1432 ierr = ISDestroy(&isOriginal);CHKERRQ(ierr); 1433 ierr = ISLocalToGlobalMappingDestroy(<ogOriginal);CHKERRQ(ierr); 1434 } else { 1435 /* One-to-all distribution pattern: We can derive LToG from SF */ 1436 ierr = ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);CHKERRQ(ierr); 1437 } 1438 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1439 if (flg) { 1440 ierr = PetscPrintf(comm, "Point renumbering for DM migration:\n");CHKERRQ(ierr); 1441 ierr = ISLocalToGlobalMappingView(ltogMigration, NULL);CHKERRQ(ierr); 1442 } 1443 /* Migrate DM data to target DM */ 1444 ierr = DMPlexDistributeCones(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1445 ierr = DMPlexDistributeCoordinates(dm, sf, targetDM);CHKERRQ(ierr); 1446 ierr = DMPlexDistributeLabels(dm, sf, targetDM);CHKERRQ(ierr); 1447 ierr = DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1448 ierr = DMPlexDistributeSetupTree(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1449 ierr = ISLocalToGlobalMappingDestroy(<ogMigration);CHKERRQ(ierr); 1450 ierr = PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1451 PetscFunctionReturn(0); 1452 } 1453 1454 #undef __FUNCT__ 1455 #define __FUNCT__ "DMPlexDistribute" 1456 /*@C 1457 DMPlexDistribute - Distributes the mesh and any associated sections. 1458 1459 Not Collective 1460 1461 Input Parameter: 1462 + dm - The original DMPlex object 1463 - overlap - The overlap of partitions, 0 is the default 1464 1465 Output Parameter: 1466 + sf - The PetscSF used for point distribution 1467 - parallelMesh - The distributed DMPlex object, or NULL 1468 1469 Note: If the mesh was not distributed, the return value is NULL. 1470 1471 The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and 1472 DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function 1473 representation on the mesh. 1474 1475 Level: intermediate 1476 1477 .keywords: mesh, elements 1478 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure() 1479 @*/ 1480 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel) 1481 { 1482 MPI_Comm comm; 1483 PetscPartitioner partitioner; 1484 IS cellPart; 1485 PetscSection cellPartSection; 1486 DM dmCoord; 1487 DMLabel lblPartition, lblMigration; 1488 PetscSF sfProcess, sfMigration, sfStratified, sfPoint; 1489 PetscBool flg; 1490 PetscMPIInt rank, numProcs, p; 1491 PetscErrorCode ierr; 1492 1493 PetscFunctionBegin; 1494 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1495 if (sf) PetscValidPointer(sf,4); 1496 PetscValidPointer(dmParallel,5); 1497 1498 ierr = PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1499 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1500 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1501 ierr = MPI_Comm_size(comm, &numProcs);CHKERRQ(ierr); 1502 1503 *dmParallel = NULL; 1504 if (numProcs == 1) PetscFunctionReturn(0); 1505 1506 /* Create cell partition */ 1507 ierr = PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1508 ierr = PetscSectionCreate(comm, &cellPartSection);CHKERRQ(ierr); 1509 ierr = DMPlexGetPartitioner(dm, &partitioner);CHKERRQ(ierr); 1510 ierr = PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);CHKERRQ(ierr); 1511 { 1512 /* Convert partition to DMLabel */ 1513 PetscInt proc, pStart, pEnd, npoints, poffset; 1514 const PetscInt *points; 1515 ierr = DMLabelCreate("Point Partition", &lblPartition);CHKERRQ(ierr); 1516 ierr = ISGetIndices(cellPart, &points);CHKERRQ(ierr); 1517 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1518 for (proc = pStart; proc < pEnd; proc++) { 1519 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1520 ierr = PetscSectionGetOffset(cellPartSection, proc, &poffset);CHKERRQ(ierr); 1521 for (p = poffset; p < poffset+npoints; p++) { 1522 ierr = DMLabelSetValue(lblPartition, points[p], proc);CHKERRQ(ierr); 1523 } 1524 } 1525 ierr = ISRestoreIndices(cellPart, &points);CHKERRQ(ierr); 1526 } 1527 ierr = DMPlexPartitionLabelClosure(dm, lblPartition);CHKERRQ(ierr); 1528 { 1529 /* Build a global process SF */ 1530 PetscSFNode *remoteProc; 1531 ierr = PetscMalloc1(numProcs, &remoteProc);CHKERRQ(ierr); 1532 for (p = 0; p < numProcs; ++p) { 1533 remoteProc[p].rank = p; 1534 remoteProc[p].index = rank; 1535 } 1536 ierr = PetscSFCreate(comm, &sfProcess);CHKERRQ(ierr); 1537 ierr = PetscObjectSetName((PetscObject) sfProcess, "Process SF");CHKERRQ(ierr); 1538 ierr = PetscSFSetGraph(sfProcess, numProcs, numProcs, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);CHKERRQ(ierr); 1539 } 1540 ierr = DMLabelCreate("Point migration", &lblMigration);CHKERRQ(ierr); 1541 ierr = DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);CHKERRQ(ierr); 1542 ierr = DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);CHKERRQ(ierr); 1543 /* Stratify the SF in case we are migrating an already parallel plex */ 1544 ierr = DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);CHKERRQ(ierr); 1545 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1546 sfMigration = sfStratified; 1547 ierr = PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1548 ierr = PetscOptionsHasName(((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1549 if (flg) { 1550 ierr = PetscViewerASCIISynchronizedAllow(PETSC_VIEWER_STDOUT_WORLD, PETSC_TRUE);CHKERRQ(ierr); 1551 ierr = DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1552 ierr = PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1553 } 1554 1555 /* Create non-overlapping parallel DM and migrate internal data */ 1556 ierr = DMPlexCreate(comm, dmParallel);CHKERRQ(ierr); 1557 ierr = PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");CHKERRQ(ierr); 1558 ierr = DMPlexMigrate(dm, sfMigration, *dmParallel);CHKERRQ(ierr); 1559 1560 /* Build the point SF without overlap */ 1561 ierr = DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);CHKERRQ(ierr); 1562 ierr = DMSetPointSF(*dmParallel, sfPoint);CHKERRQ(ierr); 1563 ierr = DMGetCoordinateDM(*dmParallel, &dmCoord);CHKERRQ(ierr); 1564 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1565 if (flg) {ierr = PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);} 1566 1567 if (overlap > 0) { 1568 DM dmOverlap; 1569 PetscInt nroots, nleaves; 1570 PetscSFNode *newRemote; 1571 const PetscSFNode *oldRemote; 1572 PetscSF sfOverlap, sfOverlapPoint; 1573 /* Add the partition overlap to the distributed DM */ 1574 ierr = DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);CHKERRQ(ierr); 1575 ierr = DMDestroy(dmParallel);CHKERRQ(ierr); 1576 *dmParallel = dmOverlap; 1577 if (flg) { 1578 ierr = PetscPrintf(comm, "Overlap Migration SF:\n");CHKERRQ(ierr); 1579 ierr = PetscSFView(sfOverlap, NULL);CHKERRQ(ierr); 1580 } 1581 1582 /* Re-map the migration SF to establish the full migration pattern */ 1583 ierr = PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);CHKERRQ(ierr); 1584 ierr = PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1585 ierr = PetscMalloc1(nleaves, &newRemote);CHKERRQ(ierr); 1586 ierr = PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1587 ierr = PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1588 ierr = PetscSFCreate(comm, &sfOverlapPoint);CHKERRQ(ierr); 1589 ierr = PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1590 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1591 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1592 sfMigration = sfOverlapPoint; 1593 } 1594 /* Cleanup Partition */ 1595 ierr = PetscSFDestroy(&sfProcess);CHKERRQ(ierr); 1596 ierr = DMLabelDestroy(&lblPartition);CHKERRQ(ierr); 1597 ierr = DMLabelDestroy(&lblMigration);CHKERRQ(ierr); 1598 ierr = PetscSectionDestroy(&cellPartSection);CHKERRQ(ierr); 1599 ierr = ISDestroy(&cellPart);CHKERRQ(ierr); 1600 /* Copy BC */ 1601 ierr = DMPlexCopyBoundary(dm, *dmParallel);CHKERRQ(ierr); 1602 /* Cleanup */ 1603 if (sf) {*sf = sfMigration;} 1604 else {ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr);} 1605 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1606 ierr = PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1607 PetscFunctionReturn(0); 1608 } 1609 1610 #undef __FUNCT__ 1611 #define __FUNCT__ "DMPlexDistributeOverlap" 1612 /*@C 1613 DMPlexDistribute - Add partition overlap to a distributed non-overlapping DM. 1614 1615 Not Collective 1616 1617 Input Parameter: 1618 + dm - The non-overlapping distrbuted DMPlex object 1619 - overlap - The overlap of partitions, 0 is the default 1620 1621 Output Parameter: 1622 + sf - The PetscSF used for point distribution 1623 - dmOverlap - The overlapping distributed DMPlex object, or NULL 1624 1625 Note: If the mesh was not distributed, the return value is NULL. 1626 1627 The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and 1628 DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function 1629 representation on the mesh. 1630 1631 Level: intermediate 1632 1633 .keywords: mesh, elements 1634 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure() 1635 @*/ 1636 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap) 1637 { 1638 MPI_Comm comm; 1639 PetscMPIInt rank; 1640 PetscSection rootSection, leafSection; 1641 IS rootrank, leafrank; 1642 DM dmCoord; 1643 DMLabel lblOverlap; 1644 PetscSF sfOverlap, sfStratified, sfPoint; 1645 PetscErrorCode ierr; 1646 1647 PetscFunctionBegin; 1648 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1649 if (sf) PetscValidPointer(sf, 3); 1650 PetscValidPointer(dmOverlap, 4); 1651 1652 ierr = PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1653 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1654 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1655 1656 /* Compute point overlap with neighbouring processes on the distributed DM */ 1657 ierr = PetscLogEventBegin(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1658 ierr = PetscSectionCreate(comm, &rootSection);CHKERRQ(ierr); 1659 ierr = PetscSectionCreate(comm, &leafSection);CHKERRQ(ierr); 1660 ierr = DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);CHKERRQ(ierr); 1661 ierr = DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);CHKERRQ(ierr); 1662 /* Convert overlap label to stratified migration SF */ 1663 ierr = DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);CHKERRQ(ierr); 1664 ierr = DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);CHKERRQ(ierr); 1665 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1666 sfOverlap = sfStratified; 1667 ierr = PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");CHKERRQ(ierr); 1668 ierr = PetscSFSetFromOptions(sfOverlap);CHKERRQ(ierr); 1669 1670 ierr = PetscSectionDestroy(&rootSection);CHKERRQ(ierr); 1671 ierr = PetscSectionDestroy(&leafSection);CHKERRQ(ierr); 1672 ierr = ISDestroy(&rootrank);CHKERRQ(ierr); 1673 ierr = ISDestroy(&leafrank);CHKERRQ(ierr); 1674 ierr = PetscLogEventEnd(PETSCPARTITIONER_Partition,dm,0,0,0);CHKERRQ(ierr); 1675 1676 /* Build the overlapping DM */ 1677 ierr = DMPlexCreate(comm, dmOverlap);CHKERRQ(ierr); 1678 ierr = PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");CHKERRQ(ierr); 1679 ierr = DMPlexMigrate(dm, sfOverlap, *dmOverlap);CHKERRQ(ierr); 1680 /* Build the new point SF */ 1681 ierr = DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1682 ierr = DMSetPointSF(*dmOverlap, sfPoint);CHKERRQ(ierr); 1683 ierr = DMGetCoordinateDM(*dmOverlap, &dmCoord);CHKERRQ(ierr); 1684 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1685 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1686 /* Cleanup overlap partition */ 1687 ierr = DMLabelDestroy(&lblOverlap);CHKERRQ(ierr); 1688 if (sf) *sf = sfOverlap; 1689 else {ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr);} 1690 ierr = PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1691 PetscFunctionReturn(0); 1692 } 1693