1 #include <petsc/private/dmpleximpl.h> /*I "petscdmplex.h" I*/ 2 #include <petsc/private/dmlabelimpl.h> /*I "petscdmlabel.h" I*/ 3 4 /*@C 5 DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback 6 7 Input Parameters: 8 + dm - The DM object 9 . user - The user callback, may be NULL (to clear the callback) 10 - ctx - context for callback evaluation, may be NULL 11 12 Level: advanced 13 14 Notes: 15 The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency. 16 17 Any setting here overrides other configuration of DMPlex adjacency determination. 18 19 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexGetAdjacencyUser() 20 @*/ 21 PetscErrorCode DMPlexSetAdjacencyUser(DM dm,PetscErrorCode (*user)(DM,PetscInt,PetscInt*,PetscInt[],void*),void *ctx) 22 { 23 DM_Plex *mesh = (DM_Plex *)dm->data; 24 25 PetscFunctionBegin; 26 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 27 mesh->useradjacency = user; 28 mesh->useradjacencyctx = ctx; 29 PetscFunctionReturn(0); 30 } 31 32 /*@C 33 DMPlexGetAdjacencyUser - get the user-defined adjacency callback 34 35 Input Parameter: 36 . dm - The DM object 37 38 Output Parameters: 39 - user - The user callback 40 - ctx - context for callback evaluation 41 42 Level: advanced 43 44 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexGetAdjacency(), DMPlexSetAdjacencyUser() 45 @*/ 46 PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM,PetscInt,PetscInt*,PetscInt[],void*), void **ctx) 47 { 48 DM_Plex *mesh = (DM_Plex *)dm->data; 49 50 PetscFunctionBegin; 51 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 52 if (user) *user = mesh->useradjacency; 53 if (ctx) *ctx = mesh->useradjacencyctx; 54 PetscFunctionReturn(0); 55 } 56 57 /*@ 58 DMPlexSetAdjacencyUseCone - Define adjacency in the mesh using either the cone or the support first 59 60 Input Parameters: 61 + dm - The DM object 62 - useCone - Flag to use the cone first 63 64 Level: intermediate 65 66 Notes: 67 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 68 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 69 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 70 71 .seealso: DMPlexGetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator() 72 @*/ 73 PetscErrorCode DMPlexSetAdjacencyUseCone(DM dm, PetscBool useCone) 74 { 75 PetscDS prob; 76 PetscBool useClosure; 77 PetscInt Nf; 78 PetscErrorCode ierr; 79 80 PetscFunctionBegin; 81 ierr = DMGetDS(dm, &prob);CHKERRQ(ierr); 82 ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr); 83 if (!Nf) { 84 ierr = PetscDSGetAdjacency(prob, PETSC_DEFAULT, NULL, &useClosure);CHKERRQ(ierr); 85 ierr = PetscDSSetAdjacency(prob, PETSC_DEFAULT, useCone, useClosure);CHKERRQ(ierr); 86 } else { 87 ierr = PetscDSGetAdjacency(prob, 0, NULL, &useClosure);CHKERRQ(ierr); 88 ierr = PetscDSSetAdjacency(prob, 0, useCone, useClosure);CHKERRQ(ierr); 89 } 90 PetscFunctionReturn(0); 91 } 92 93 /*@ 94 DMPlexGetAdjacencyUseCone - Query whether adjacency in the mesh uses the cone or the support first 95 96 Input Parameter: 97 . dm - The DM object 98 99 Output Parameter: 100 . useCone - Flag to use the cone first 101 102 Level: intermediate 103 104 Notes: 105 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 106 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 107 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 108 109 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexGetAdjacencyUseClosure(), DMPlexDistribute(), DMPlexPreallocateOperator() 110 @*/ 111 PetscErrorCode DMPlexGetAdjacencyUseCone(DM dm, PetscBool *useCone) 112 { 113 PetscDS prob; 114 PetscInt Nf; 115 PetscErrorCode ierr; 116 117 PetscFunctionBegin; 118 ierr = DMGetDS(dm, &prob);CHKERRQ(ierr); 119 ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr); 120 if (!Nf) { 121 ierr = PetscDSGetAdjacency(prob, PETSC_DEFAULT, useCone, NULL);CHKERRQ(ierr); 122 } else { 123 ierr = PetscDSGetAdjacency(prob, 0, useCone, NULL);CHKERRQ(ierr); 124 } 125 PetscFunctionReturn(0); 126 } 127 128 /*@ 129 DMPlexSetAdjacencyUseClosure - Define adjacency in the mesh using the transitive closure 130 131 Input Parameters: 132 + dm - The DM object 133 - useClosure - Flag to use the closure 134 135 Level: intermediate 136 137 Notes: 138 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 139 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 140 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 141 142 .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator() 143 @*/ 144 PetscErrorCode DMPlexSetAdjacencyUseClosure(DM dm, PetscBool useClosure) 145 { 146 PetscDS prob; 147 PetscBool useCone; 148 PetscInt Nf; 149 PetscErrorCode ierr; 150 151 PetscFunctionBegin; 152 ierr = DMGetDS(dm, &prob);CHKERRQ(ierr); 153 ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr); 154 if (!Nf) { 155 ierr = PetscDSGetAdjacency(prob, PETSC_DEFAULT, &useCone, NULL);CHKERRQ(ierr); 156 ierr = PetscDSSetAdjacency(prob, PETSC_DEFAULT, useCone, useClosure);CHKERRQ(ierr); 157 } else { 158 ierr = PetscDSGetAdjacency(prob, 0, &useCone, NULL);CHKERRQ(ierr); 159 ierr = PetscDSSetAdjacency(prob, 0, useCone, useClosure);CHKERRQ(ierr); 160 } 161 PetscFunctionReturn(0); 162 } 163 164 /*@ 165 DMPlexGetAdjacencyUseClosure - Query whether adjacency in the mesh uses the transitive closure 166 167 Input Parameter: 168 . dm - The DM object 169 170 Output Parameter: 171 . useClosure - Flag to use the closure 172 173 Level: intermediate 174 175 Notes: 176 $ FEM: Two points p and q are adjacent if q \in closure(star(p)), useCone = PETSC_FALSE, useClosure = PETSC_TRUE 177 $ FVM: Two points p and q are adjacent if q \in support(p+cone(p)), useCone = PETSC_TRUE, useClosure = PETSC_FALSE 178 $ FVM++: Two points p and q are adjacent if q \in star(closure(p)), useCone = PETSC_TRUE, useClosure = PETSC_TRUE 179 180 .seealso: DMPlexSetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator() 181 @*/ 182 PetscErrorCode DMPlexGetAdjacencyUseClosure(DM dm, PetscBool *useClosure) 183 { 184 PetscDS prob; 185 PetscInt Nf; 186 PetscErrorCode ierr; 187 188 PetscFunctionBegin; 189 ierr = DMGetDS(dm, &prob);CHKERRQ(ierr); 190 ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr); 191 if (!Nf) { 192 ierr = PetscDSGetAdjacency(prob, PETSC_DEFAULT, NULL, useClosure);CHKERRQ(ierr); 193 } else { 194 ierr = PetscDSGetAdjacency(prob, 0, NULL, useClosure);CHKERRQ(ierr); 195 } 196 PetscFunctionReturn(0); 197 } 198 199 /*@ 200 DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints. 201 202 Input Parameters: 203 + dm - The DM object 204 - useAnchors - Flag to use the constraints. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 205 206 Level: intermediate 207 208 .seealso: DMPlexGetAdjacencyUseClosure(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 209 @*/ 210 PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors) 211 { 212 DM_Plex *mesh = (DM_Plex *) dm->data; 213 214 PetscFunctionBegin; 215 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 216 mesh->useAnchors = useAnchors; 217 PetscFunctionReturn(0); 218 } 219 220 /*@ 221 DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints. 222 223 Input Parameter: 224 . dm - The DM object 225 226 Output Parameter: 227 . useAnchors - Flag to use the closure. If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place. 228 229 Level: intermediate 230 231 .seealso: DMPlexSetAdjacencyUseAnchors(), DMPlexSetAdjacencyUseCone(), DMPlexGetAdjacencyUseCone(), DMPlexDistribute(), DMPlexPreallocateOperator(), DMPlexSetAnchors() 232 @*/ 233 PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors) 234 { 235 DM_Plex *mesh = (DM_Plex *) dm->data; 236 237 PetscFunctionBegin; 238 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 239 PetscValidIntPointer(useAnchors, 2); 240 *useAnchors = mesh->useAnchors; 241 PetscFunctionReturn(0); 242 } 243 244 static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 245 { 246 const PetscInt *cone = NULL; 247 PetscInt numAdj = 0, maxAdjSize = *adjSize, coneSize, c; 248 PetscErrorCode ierr; 249 250 PetscFunctionBeginHot; 251 ierr = DMPlexGetConeSize(dm, p, &coneSize);CHKERRQ(ierr); 252 ierr = DMPlexGetCone(dm, p, &cone);CHKERRQ(ierr); 253 for (c = 0; c <= coneSize; ++c) { 254 const PetscInt point = !c ? p : cone[c-1]; 255 const PetscInt *support = NULL; 256 PetscInt supportSize, s, q; 257 258 ierr = DMPlexGetSupportSize(dm, point, &supportSize);CHKERRQ(ierr); 259 ierr = DMPlexGetSupport(dm, point, &support);CHKERRQ(ierr); 260 for (s = 0; s < supportSize; ++s) { 261 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]),0); ++q) { 262 if (support[s] == adj[q]) break; 263 } 264 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 265 } 266 } 267 *adjSize = numAdj; 268 PetscFunctionReturn(0); 269 } 270 271 static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[]) 272 { 273 const PetscInt *support = NULL; 274 PetscInt numAdj = 0, maxAdjSize = *adjSize, supportSize, s; 275 PetscErrorCode ierr; 276 277 PetscFunctionBeginHot; 278 ierr = DMPlexGetSupportSize(dm, p, &supportSize);CHKERRQ(ierr); 279 ierr = DMPlexGetSupport(dm, p, &support);CHKERRQ(ierr); 280 for (s = 0; s <= supportSize; ++s) { 281 const PetscInt point = !s ? p : support[s-1]; 282 const PetscInt *cone = NULL; 283 PetscInt coneSize, c, q; 284 285 ierr = DMPlexGetConeSize(dm, point, &coneSize);CHKERRQ(ierr); 286 ierr = DMPlexGetCone(dm, point, &cone);CHKERRQ(ierr); 287 for (c = 0; c < coneSize; ++c) { 288 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]),0); ++q) { 289 if (cone[c] == adj[q]) break; 290 } 291 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 292 } 293 } 294 *adjSize = numAdj; 295 PetscFunctionReturn(0); 296 } 297 298 static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[]) 299 { 300 PetscInt *star = NULL; 301 PetscInt numAdj = 0, maxAdjSize = *adjSize, starSize, s; 302 PetscErrorCode ierr; 303 304 PetscFunctionBeginHot; 305 ierr = DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 306 for (s = 0; s < starSize*2; s += 2) { 307 const PetscInt *closure = NULL; 308 PetscInt closureSize, c, q; 309 310 ierr = DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 311 for (c = 0; c < closureSize*2; c += 2) { 312 for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]),0); ++q) { 313 if (closure[c] == adj[q]) break; 314 } 315 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 316 } 317 ierr = DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt**) &closure);CHKERRQ(ierr); 318 } 319 ierr = DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);CHKERRQ(ierr); 320 *adjSize = numAdj; 321 PetscFunctionReturn(0); 322 } 323 324 PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[]) 325 { 326 static PetscInt asiz = 0; 327 PetscInt maxAnchors = 1; 328 PetscInt aStart = -1, aEnd = -1; 329 PetscInt maxAdjSize; 330 PetscSection aSec = NULL; 331 IS aIS = NULL; 332 const PetscInt *anchors; 333 DM_Plex *mesh = (DM_Plex *)dm->data; 334 PetscErrorCode ierr; 335 336 PetscFunctionBeginHot; 337 if (useAnchors) { 338 ierr = DMPlexGetAnchors(dm,&aSec,&aIS);CHKERRQ(ierr); 339 if (aSec) { 340 ierr = PetscSectionGetMaxDof(aSec,&maxAnchors);CHKERRQ(ierr); 341 maxAnchors = PetscMax(1,maxAnchors); 342 ierr = PetscSectionGetChart(aSec,&aStart,&aEnd);CHKERRQ(ierr); 343 ierr = ISGetIndices(aIS,&anchors);CHKERRQ(ierr); 344 } 345 } 346 if (!*adj) { 347 PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd; 348 349 ierr = DMPlexGetChart(dm, &pStart,&pEnd);CHKERRQ(ierr); 350 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 351 ierr = DMPlexGetMaxSizes(dm, &maxC, &maxS);CHKERRQ(ierr); 352 coneSeries = (maxC > 1) ? ((PetscPowInt(maxC,depth+1)-1)/(maxC-1)) : depth+1; 353 supportSeries = (maxS > 1) ? ((PetscPowInt(maxS,depth+1)-1)/(maxS-1)) : depth+1; 354 asiz = PetscMax(PetscPowInt(maxS,depth)*coneSeries,PetscPowInt(maxC,depth)*supportSeries); 355 asiz *= maxAnchors; 356 asiz = PetscMin(asiz,pEnd-pStart); 357 ierr = PetscMalloc1(asiz,adj);CHKERRQ(ierr); 358 } 359 if (*adjSize < 0) *adjSize = asiz; 360 maxAdjSize = *adjSize; 361 if (mesh->useradjacency) { 362 ierr = mesh->useradjacency(dm, p, adjSize, *adj, mesh->useradjacencyctx);CHKERRQ(ierr); 363 } else if (useTransitiveClosure) { 364 ierr = DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);CHKERRQ(ierr); 365 } else if (useCone) { 366 ierr = DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 367 } else { 368 ierr = DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);CHKERRQ(ierr); 369 } 370 if (useAnchors && aSec) { 371 PetscInt origSize = *adjSize; 372 PetscInt numAdj = origSize; 373 PetscInt i = 0, j; 374 PetscInt *orig = *adj; 375 376 while (i < origSize) { 377 PetscInt p = orig[i]; 378 PetscInt aDof = 0; 379 380 if (p >= aStart && p < aEnd) { 381 ierr = PetscSectionGetDof(aSec,p,&aDof);CHKERRQ(ierr); 382 } 383 if (aDof) { 384 PetscInt aOff; 385 PetscInt s, q; 386 387 for (j = i + 1; j < numAdj; j++) { 388 orig[j - 1] = orig[j]; 389 } 390 origSize--; 391 numAdj--; 392 ierr = PetscSectionGetOffset(aSec,p,&aOff);CHKERRQ(ierr); 393 for (s = 0; s < aDof; ++s) { 394 for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff+s]),0); ++q) { 395 if (anchors[aOff+s] == orig[q]) break; 396 } 397 if (numAdj > maxAdjSize) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Invalid mesh exceeded adjacency allocation (%D)", maxAdjSize); 398 } 399 } 400 else { 401 i++; 402 } 403 } 404 *adjSize = numAdj; 405 ierr = ISRestoreIndices(aIS,&anchors);CHKERRQ(ierr); 406 } 407 PetscFunctionReturn(0); 408 } 409 410 /*@ 411 DMPlexGetAdjacency - Return all points adjacent to the given point 412 413 Input Parameters: 414 + dm - The DM object 415 . p - The point 416 . adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE 417 - adj - Either NULL so that the array is allocated, or an existing array with size adjSize 418 419 Output Parameters: 420 + adjSize - The number of adjacent points 421 - adj - The adjacent points 422 423 Level: advanced 424 425 Notes: 426 The user must PetscFree the adj array if it was not passed in. 427 428 .seealso: DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure(), DMPlexDistribute(), DMCreateMatrix(), DMPlexPreallocateOperator() 429 @*/ 430 PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[]) 431 { 432 PetscBool useCone, useClosure, useAnchors; 433 PetscErrorCode ierr; 434 435 PetscFunctionBeginHot; 436 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 437 PetscValidPointer(adjSize,3); 438 PetscValidPointer(adj,4); 439 ierr = DMPlexGetAdjacencyUseCone(dm, &useCone);CHKERRQ(ierr); 440 ierr = DMPlexGetAdjacencyUseClosure(dm, &useClosure);CHKERRQ(ierr); 441 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 442 ierr = DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);CHKERRQ(ierr); 443 PetscFunctionReturn(0); 444 } 445 446 /*@ 447 DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity 448 449 Collective on DM 450 451 Input Parameters: 452 + dm - The DM 453 - sfPoint - The PetscSF which encodes point connectivity 454 455 Output Parameters: 456 + processRanks - A list of process neighbors, or NULL 457 - sfProcess - An SF encoding the two-sided process connectivity, or NULL 458 459 Level: developer 460 461 .seealso: PetscSFCreate(), DMPlexCreateProcessSF() 462 @*/ 463 PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess) 464 { 465 const PetscSFNode *remotePoints; 466 PetscInt *localPointsNew; 467 PetscSFNode *remotePointsNew; 468 const PetscInt *nranks; 469 PetscInt *ranksNew; 470 PetscBT neighbors; 471 PetscInt pStart, pEnd, p, numLeaves, l, numNeighbors, n; 472 PetscMPIInt size, proc, rank; 473 PetscErrorCode ierr; 474 475 PetscFunctionBegin; 476 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 477 PetscValidHeaderSpecific(sfPoint, PETSCSF_CLASSID, 2); 478 if (processRanks) {PetscValidPointer(processRanks, 3);} 479 if (sfProcess) {PetscValidPointer(sfProcess, 4);} 480 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);CHKERRQ(ierr); 481 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 482 ierr = PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);CHKERRQ(ierr); 483 ierr = PetscBTCreate(size, &neighbors);CHKERRQ(ierr); 484 ierr = PetscBTMemzero(size, neighbors);CHKERRQ(ierr); 485 /* Compute root-to-leaf process connectivity */ 486 ierr = PetscSectionGetChart(rootRankSection, &pStart, &pEnd);CHKERRQ(ierr); 487 ierr = ISGetIndices(rootRanks, &nranks);CHKERRQ(ierr); 488 for (p = pStart; p < pEnd; ++p) { 489 PetscInt ndof, noff, n; 490 491 ierr = PetscSectionGetDof(rootRankSection, p, &ndof);CHKERRQ(ierr); 492 ierr = PetscSectionGetOffset(rootRankSection, p, &noff);CHKERRQ(ierr); 493 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 494 } 495 ierr = ISRestoreIndices(rootRanks, &nranks);CHKERRQ(ierr); 496 /* Compute leaf-to-neighbor process connectivity */ 497 ierr = PetscSectionGetChart(leafRankSection, &pStart, &pEnd);CHKERRQ(ierr); 498 ierr = ISGetIndices(leafRanks, &nranks);CHKERRQ(ierr); 499 for (p = pStart; p < pEnd; ++p) { 500 PetscInt ndof, noff, n; 501 502 ierr = PetscSectionGetDof(leafRankSection, p, &ndof);CHKERRQ(ierr); 503 ierr = PetscSectionGetOffset(leafRankSection, p, &noff);CHKERRQ(ierr); 504 for (n = 0; n < ndof; ++n) {ierr = PetscBTSet(neighbors, nranks[noff+n]);CHKERRQ(ierr);} 505 } 506 ierr = ISRestoreIndices(leafRanks, &nranks);CHKERRQ(ierr); 507 /* Compute leaf-to-root process connectivity */ 508 for (l = 0; l < numLeaves; ++l) {PetscBTSet(neighbors, remotePoints[l].rank);} 509 /* Calculate edges */ 510 PetscBTClear(neighbors, rank); 511 for(proc = 0, numNeighbors = 0; proc < size; ++proc) {if (PetscBTLookup(neighbors, proc)) ++numNeighbors;} 512 ierr = PetscMalloc1(numNeighbors, &ranksNew);CHKERRQ(ierr); 513 ierr = PetscMalloc1(numNeighbors, &localPointsNew);CHKERRQ(ierr); 514 ierr = PetscMalloc1(numNeighbors, &remotePointsNew);CHKERRQ(ierr); 515 for(proc = 0, n = 0; proc < size; ++proc) { 516 if (PetscBTLookup(neighbors, proc)) { 517 ranksNew[n] = proc; 518 localPointsNew[n] = proc; 519 remotePointsNew[n].index = rank; 520 remotePointsNew[n].rank = proc; 521 ++n; 522 } 523 } 524 ierr = PetscBTDestroy(&neighbors);CHKERRQ(ierr); 525 if (processRanks) {ierr = ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);CHKERRQ(ierr);} 526 else {ierr = PetscFree(ranksNew);CHKERRQ(ierr);} 527 if (sfProcess) { 528 ierr = PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);CHKERRQ(ierr); 529 ierr = PetscObjectSetName((PetscObject) *sfProcess, "Two-Sided Process SF");CHKERRQ(ierr); 530 ierr = PetscSFSetFromOptions(*sfProcess);CHKERRQ(ierr); 531 ierr = PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);CHKERRQ(ierr); 532 } 533 PetscFunctionReturn(0); 534 } 535 536 /*@ 537 DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF. 538 539 Collective on DM 540 541 Input Parameter: 542 . dm - The DM 543 544 Output Parameters: 545 + rootSection - The number of leaves for a given root point 546 . rootrank - The rank of each edge into the root point 547 . leafSection - The number of processes sharing a given leaf point 548 - leafrank - The rank of each process sharing a leaf point 549 550 Level: developer 551 552 .seealso: DMPlexCreateOverlap() 553 @*/ 554 PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank) 555 { 556 MPI_Comm comm; 557 PetscSF sfPoint; 558 const PetscInt *rootdegree; 559 PetscInt *myrank, *remoterank; 560 PetscInt pStart, pEnd, p, nedges; 561 PetscMPIInt rank; 562 PetscErrorCode ierr; 563 564 PetscFunctionBegin; 565 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 566 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 567 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 568 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 569 /* Compute number of leaves for each root */ 570 ierr = PetscObjectSetName((PetscObject) rootSection, "Root Section");CHKERRQ(ierr); 571 ierr = PetscSectionSetChart(rootSection, pStart, pEnd);CHKERRQ(ierr); 572 ierr = PetscSFComputeDegreeBegin(sfPoint, &rootdegree);CHKERRQ(ierr); 573 ierr = PetscSFComputeDegreeEnd(sfPoint, &rootdegree);CHKERRQ(ierr); 574 for (p = pStart; p < pEnd; ++p) {ierr = PetscSectionSetDof(rootSection, p, rootdegree[p-pStart]);CHKERRQ(ierr);} 575 ierr = PetscSectionSetUp(rootSection);CHKERRQ(ierr); 576 /* Gather rank of each leaf to root */ 577 ierr = PetscSectionGetStorageSize(rootSection, &nedges);CHKERRQ(ierr); 578 ierr = PetscMalloc1(pEnd-pStart, &myrank);CHKERRQ(ierr); 579 ierr = PetscMalloc1(nedges, &remoterank);CHKERRQ(ierr); 580 for (p = 0; p < pEnd-pStart; ++p) myrank[p] = rank; 581 ierr = PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 582 ierr = PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);CHKERRQ(ierr); 583 ierr = PetscFree(myrank);CHKERRQ(ierr); 584 ierr = ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);CHKERRQ(ierr); 585 /* Distribute remote ranks to leaves */ 586 ierr = PetscObjectSetName((PetscObject) leafSection, "Leaf Section");CHKERRQ(ierr); 587 ierr = DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);CHKERRQ(ierr); 588 PetscFunctionReturn(0); 589 } 590 591 /*@C 592 DMPlexCreateOverlap - Compute owner information for shared points. This basically gets two-sided for an SF. 593 594 Collective on DM 595 596 Input Parameters: 597 + dm - The DM 598 . levels - Number of overlap levels 599 . rootSection - The number of leaves for a given root point 600 . rootrank - The rank of each edge into the root point 601 . leafSection - The number of processes sharing a given leaf point 602 - leafrank - The rank of each process sharing a leaf point 603 604 Output Parameters: 605 + ovLabel - DMLabel containing remote overlap contributions as point/rank pairings 606 607 Level: developer 608 609 .seealso: DMPlexDistributeOwnership(), DMPlexDistribute() 610 @*/ 611 PetscErrorCode DMPlexCreateOverlap(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel) 612 { 613 MPI_Comm comm; 614 DMLabel ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */ 615 PetscSF sfPoint, sfProc; 616 const PetscSFNode *remote; 617 const PetscInt *local; 618 const PetscInt *nrank, *rrank; 619 PetscInt *adj = NULL; 620 PetscInt pStart, pEnd, p, sStart, sEnd, nleaves, l; 621 PetscMPIInt rank, size; 622 PetscBool useCone, useClosure, flg; 623 PetscErrorCode ierr; 624 625 PetscFunctionBegin; 626 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 627 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 628 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 629 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 630 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 631 ierr = PetscSectionGetChart(leafSection, &sStart, &sEnd);CHKERRQ(ierr); 632 ierr = PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);CHKERRQ(ierr); 633 ierr = DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);CHKERRQ(ierr); 634 /* Handle leaves: shared with the root point */ 635 for (l = 0; l < nleaves; ++l) { 636 PetscInt adjSize = PETSC_DETERMINE, a; 637 638 ierr = DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);CHKERRQ(ierr); 639 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);CHKERRQ(ierr);} 640 } 641 ierr = ISGetIndices(rootrank, &rrank);CHKERRQ(ierr); 642 ierr = ISGetIndices(leafrank, &nrank);CHKERRQ(ierr); 643 /* Handle roots */ 644 for (p = pStart; p < pEnd; ++p) { 645 PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a; 646 647 if ((p >= sStart) && (p < sEnd)) { 648 /* Some leaves share a root with other leaves on different processes */ 649 ierr = PetscSectionGetDof(leafSection, p, &neighbors);CHKERRQ(ierr); 650 if (neighbors) { 651 ierr = PetscSectionGetOffset(leafSection, p, &noff);CHKERRQ(ierr); 652 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 653 for (n = 0; n < neighbors; ++n) { 654 const PetscInt remoteRank = nrank[noff+n]; 655 656 if (remoteRank == rank) continue; 657 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 658 } 659 } 660 } 661 /* Roots are shared with leaves */ 662 ierr = PetscSectionGetDof(rootSection, p, &neighbors);CHKERRQ(ierr); 663 if (!neighbors) continue; 664 ierr = PetscSectionGetOffset(rootSection, p, &noff);CHKERRQ(ierr); 665 ierr = DMPlexGetAdjacency(dm, p, &adjSize, &adj);CHKERRQ(ierr); 666 for (n = 0; n < neighbors; ++n) { 667 const PetscInt remoteRank = rrank[noff+n]; 668 669 if (remoteRank == rank) continue; 670 for (a = 0; a < adjSize; ++a) {ierr = DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);CHKERRQ(ierr);} 671 } 672 } 673 ierr = PetscFree(adj);CHKERRQ(ierr); 674 ierr = ISRestoreIndices(rootrank, &rrank);CHKERRQ(ierr); 675 ierr = ISRestoreIndices(leafrank, &nrank);CHKERRQ(ierr); 676 /* Add additional overlap levels */ 677 for (l = 1; l < levels; l++) { 678 /* Propagate point donations over SF to capture remote connections */ 679 ierr = DMPlexPartitionLabelPropagate(dm, ovAdjByRank);CHKERRQ(ierr); 680 /* Add next level of point donations to the label */ 681 ierr = DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);CHKERRQ(ierr); 682 } 683 /* We require the closure in the overlap */ 684 ierr = DMPlexGetAdjacencyUseCone(dm, &useCone);CHKERRQ(ierr); 685 ierr = DMPlexGetAdjacencyUseClosure(dm, &useClosure);CHKERRQ(ierr); 686 if (useCone || !useClosure) { 687 ierr = DMPlexPartitionLabelClosure(dm, ovAdjByRank);CHKERRQ(ierr); 688 } 689 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-overlap_view", &flg);CHKERRQ(ierr); 690 if (flg) { 691 ierr = DMLabelView(ovAdjByRank, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 692 } 693 /* Make global process SF and invert sender to receiver label */ 694 { 695 /* Build a global process SF */ 696 PetscSFNode *remoteProc; 697 ierr = PetscMalloc1(size, &remoteProc);CHKERRQ(ierr); 698 for (p = 0; p < size; ++p) { 699 remoteProc[p].rank = p; 700 remoteProc[p].index = rank; 701 } 702 ierr = PetscSFCreate(comm, &sfProc);CHKERRQ(ierr); 703 ierr = PetscObjectSetName((PetscObject) sfProc, "Process SF");CHKERRQ(ierr); 704 ierr = PetscSFSetGraph(sfProc, size, size, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);CHKERRQ(ierr); 705 } 706 ierr = DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);CHKERRQ(ierr); 707 ierr = DMPlexPartitionLabelInvert(dm, ovAdjByRank, sfProc, *ovLabel);CHKERRQ(ierr); 708 /* Add owned points, except for shared local points */ 709 for (p = pStart; p < pEnd; ++p) {ierr = DMLabelSetValue(*ovLabel, p, rank);CHKERRQ(ierr);} 710 for (l = 0; l < nleaves; ++l) { 711 ierr = DMLabelClearValue(*ovLabel, local[l], rank);CHKERRQ(ierr); 712 ierr = DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);CHKERRQ(ierr); 713 } 714 /* Clean up */ 715 ierr = DMLabelDestroy(&ovAdjByRank);CHKERRQ(ierr); 716 ierr = PetscSFDestroy(&sfProc);CHKERRQ(ierr); 717 PetscFunctionReturn(0); 718 } 719 720 /*@C 721 DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF 722 723 Collective on DM 724 725 Input Parameters: 726 + dm - The DM 727 - overlapSF - The SF mapping ghost points in overlap to owner points on other processes 728 729 Output Parameters: 730 + migrationSF - An SF that maps original points in old locations to points in new locations 731 732 Level: developer 733 734 .seealso: DMPlexCreateOverlap(), DMPlexDistribute() 735 @*/ 736 PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF) 737 { 738 MPI_Comm comm; 739 PetscMPIInt rank, size; 740 PetscInt d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints; 741 PetscInt *pointDepths, *remoteDepths, *ilocal; 742 PetscInt *depthRecv, *depthShift, *depthIdx; 743 PetscSFNode *iremote; 744 PetscSF pointSF; 745 const PetscInt *sharedLocal; 746 const PetscSFNode *overlapRemote, *sharedRemote; 747 PetscErrorCode ierr; 748 749 PetscFunctionBegin; 750 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 751 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 752 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 753 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 754 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 755 756 /* Before building the migration SF we need to know the new stratum offsets */ 757 ierr = PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);CHKERRQ(ierr); 758 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 759 for (d=0; d<dim+1; d++) { 760 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 761 for (p=pStart; p<pEnd; p++) pointDepths[p] = d; 762 } 763 for (p=0; p<nleaves; p++) remoteDepths[p] = -1; 764 ierr = PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 765 ierr = PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 766 767 /* Count recevied points in each stratum and compute the internal strata shift */ 768 ierr = PetscMalloc3(dim+1, &depthRecv, dim+1, &depthShift, dim+1, &depthIdx);CHKERRQ(ierr); 769 for (d=0; d<dim+1; d++) depthRecv[d]=0; 770 for (p=0; p<nleaves; p++) depthRecv[remoteDepths[p]]++; 771 depthShift[dim] = 0; 772 for (d=0; d<dim; d++) depthShift[d] = depthRecv[dim]; 773 for (d=1; d<dim; d++) depthShift[d] += depthRecv[0]; 774 for (d=dim-2; d>0; d--) depthShift[d] += depthRecv[d+1]; 775 for (d=0; d<dim+1; d++) { 776 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 777 depthIdx[d] = pStart + depthShift[d]; 778 } 779 780 /* Form the overlap SF build an SF that describes the full overlap migration SF */ 781 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 782 newLeaves = pEnd - pStart + nleaves; 783 ierr = PetscMalloc1(newLeaves, &ilocal);CHKERRQ(ierr); 784 ierr = PetscMalloc1(newLeaves, &iremote);CHKERRQ(ierr); 785 /* First map local points to themselves */ 786 for (d=0; d<dim+1; d++) { 787 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 788 for (p=pStart; p<pEnd; p++) { 789 point = p + depthShift[d]; 790 ilocal[point] = point; 791 iremote[point].index = p; 792 iremote[point].rank = rank; 793 depthIdx[d]++; 794 } 795 } 796 797 /* Add in the remote roots for currently shared points */ 798 ierr = DMGetPointSF(dm, &pointSF);CHKERRQ(ierr); 799 ierr = PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);CHKERRQ(ierr); 800 for (d=0; d<dim+1; d++) { 801 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 802 for (p=0; p<numSharedPoints; p++) { 803 if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) { 804 point = sharedLocal[p] + depthShift[d]; 805 iremote[point].index = sharedRemote[p].index; 806 iremote[point].rank = sharedRemote[p].rank; 807 } 808 } 809 } 810 811 /* Now add the incoming overlap points */ 812 for (p=0; p<nleaves; p++) { 813 point = depthIdx[remoteDepths[p]]; 814 ilocal[point] = point; 815 iremote[point].index = overlapRemote[p].index; 816 iremote[point].rank = overlapRemote[p].rank; 817 depthIdx[remoteDepths[p]]++; 818 } 819 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 820 821 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 822 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Overlap Migration SF");CHKERRQ(ierr); 823 ierr = PetscSFSetFromOptions(*migrationSF);CHKERRQ(ierr); 824 ierr = DMPlexGetChart(dm, &pStart, &pEnd);CHKERRQ(ierr); 825 ierr = PetscSFSetGraph(*migrationSF, pEnd-pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);CHKERRQ(ierr); 826 827 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 828 PetscFunctionReturn(0); 829 } 830 831 /*@ 832 DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification. 833 834 Input Parameter: 835 + dm - The DM 836 - sf - A star forest with non-ordered leaves, usually defining a DM point migration 837 838 Output Parameter: 839 . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified 840 841 Level: developer 842 843 .seealso: DMPlexPartitionLabelCreateSF(), DMPlexDistribute(), DMPlexDistributeOverlap() 844 @*/ 845 PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF) 846 { 847 MPI_Comm comm; 848 PetscMPIInt rank, size; 849 PetscInt d, ldepth, depth, p, pStart, pEnd, nroots, nleaves; 850 PetscInt *pointDepths, *remoteDepths, *ilocal; 851 PetscInt *depthRecv, *depthShift, *depthIdx; 852 PetscInt hybEnd[4]; 853 const PetscSFNode *iremote; 854 PetscErrorCode ierr; 855 856 PetscFunctionBegin; 857 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 858 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 859 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 860 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 861 ierr = DMPlexGetDepth(dm, &ldepth);CHKERRQ(ierr); 862 ierr = MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 863 if ((ldepth >= 0) && (depth != ldepth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", ldepth, depth); 864 865 /* Before building the migration SF we need to know the new stratum offsets */ 866 ierr = PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);CHKERRQ(ierr); 867 ierr = PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);CHKERRQ(ierr); 868 ierr = DMPlexGetHybridBounds(dm,&hybEnd[depth],&hybEnd[depth-1],&hybEnd[1],&hybEnd[0]);CHKERRQ(ierr); 869 for (d = 0; d < depth+1; ++d) { 870 ierr = DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);CHKERRQ(ierr); 871 for (p = pStart; p < pEnd; ++p) { 872 if (hybEnd[d] >= 0 && p >= hybEnd[d]) { /* put in a separate value for hybrid points */ 873 pointDepths[p] = 2 * d; 874 } else { 875 pointDepths[p] = 2 * d + 1; 876 } 877 } 878 } 879 for (p = 0; p < nleaves; ++p) remoteDepths[p] = -1; 880 ierr = PetscSFBcastBegin(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 881 ierr = PetscSFBcastEnd(sf, MPIU_INT, pointDepths, remoteDepths);CHKERRQ(ierr); 882 /* Count recevied points in each stratum and compute the internal strata shift */ 883 ierr = PetscMalloc3(2*(depth+1), &depthRecv, 2*(depth+1), &depthShift, 2*(depth+1), &depthIdx);CHKERRQ(ierr); 884 for (d = 0; d < 2*(depth+1); ++d) depthRecv[d] = 0; 885 for (p = 0; p < nleaves; ++p) depthRecv[remoteDepths[p]]++; 886 depthShift[2*depth+1] = 0; 887 for (d = 0; d < 2*depth+1; ++d) depthShift[d] = depthRecv[2 * depth + 1]; 888 for (d = 0; d < 2*depth; ++d) depthShift[d] += depthRecv[2 * depth]; 889 depthShift[0] += depthRecv[1]; 890 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[1]; 891 for (d = 2; d < 2*depth; ++d) depthShift[d] += depthRecv[0]; 892 for (d = 2 * depth-1; d > 2; --d) { 893 PetscInt e; 894 895 for (e = d -1; e > 1; --e) depthShift[e] += depthRecv[d]; 896 } 897 for (d = 0; d < 2*(depth+1); ++d) {depthIdx[d] = 0;} 898 /* Derive a new local permutation based on stratified indices */ 899 ierr = PetscMalloc1(nleaves, &ilocal);CHKERRQ(ierr); 900 for (p = 0; p < nleaves; ++p) { 901 const PetscInt dep = remoteDepths[p]; 902 903 ilocal[p] = depthShift[dep] + depthIdx[dep]; 904 depthIdx[dep]++; 905 } 906 ierr = PetscSFCreate(comm, migrationSF);CHKERRQ(ierr); 907 ierr = PetscObjectSetName((PetscObject) *migrationSF, "Migration SF");CHKERRQ(ierr); 908 ierr = PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_COPY_VALUES);CHKERRQ(ierr); 909 ierr = PetscFree2(pointDepths,remoteDepths);CHKERRQ(ierr); 910 ierr = PetscFree3(depthRecv, depthShift, depthIdx);CHKERRQ(ierr); 911 PetscFunctionReturn(0); 912 } 913 914 /*@ 915 DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 916 917 Collective on DM 918 919 Input Parameters: 920 + dm - The DMPlex object 921 . pointSF - The PetscSF describing the communication pattern 922 . originalSection - The PetscSection for existing data layout 923 - originalVec - The existing data 924 925 Output Parameters: 926 + newSection - The PetscSF describing the new data layout 927 - newVec - The new data 928 929 Level: developer 930 931 .seealso: DMPlexDistribute(), DMPlexDistributeFieldIS(), DMPlexDistributeData() 932 @*/ 933 PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec) 934 { 935 PetscSF fieldSF; 936 PetscInt *remoteOffsets, fieldSize; 937 PetscScalar *originalValues, *newValues; 938 PetscErrorCode ierr; 939 940 PetscFunctionBegin; 941 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 942 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 943 944 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 945 ierr = VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);CHKERRQ(ierr); 946 ierr = VecSetType(newVec,dm->vectype);CHKERRQ(ierr); 947 948 ierr = VecGetArray(originalVec, &originalValues);CHKERRQ(ierr); 949 ierr = VecGetArray(newVec, &newValues);CHKERRQ(ierr); 950 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 951 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 952 ierr = PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 953 ierr = PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues);CHKERRQ(ierr); 954 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 955 ierr = VecRestoreArray(newVec, &newValues);CHKERRQ(ierr); 956 ierr = VecRestoreArray(originalVec, &originalValues);CHKERRQ(ierr); 957 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 958 PetscFunctionReturn(0); 959 } 960 961 /*@ 962 DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 963 964 Collective on DM 965 966 Input Parameters: 967 + dm - The DMPlex object 968 . pointSF - The PetscSF describing the communication pattern 969 . originalSection - The PetscSection for existing data layout 970 - originalIS - The existing data 971 972 Output Parameters: 973 + newSection - The PetscSF describing the new data layout 974 - newIS - The new data 975 976 Level: developer 977 978 .seealso: DMPlexDistribute(), DMPlexDistributeField(), DMPlexDistributeData() 979 @*/ 980 PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS) 981 { 982 PetscSF fieldSF; 983 PetscInt *newValues, *remoteOffsets, fieldSize; 984 const PetscInt *originalValues; 985 PetscErrorCode ierr; 986 987 PetscFunctionBegin; 988 ierr = PetscLogEventBegin(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 989 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 990 991 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 992 ierr = PetscMalloc1(fieldSize, &newValues);CHKERRQ(ierr); 993 994 ierr = ISGetIndices(originalIS, &originalValues);CHKERRQ(ierr); 995 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 996 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 997 ierr = PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 998 ierr = PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *) originalValues, newValues);CHKERRQ(ierr); 999 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 1000 ierr = ISRestoreIndices(originalIS, &originalValues);CHKERRQ(ierr); 1001 ierr = ISCreateGeneral(PetscObjectComm((PetscObject) pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);CHKERRQ(ierr); 1002 ierr = PetscLogEventEnd(DMPLEX_DistributeField,dm,0,0,0);CHKERRQ(ierr); 1003 PetscFunctionReturn(0); 1004 } 1005 1006 /*@ 1007 DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution 1008 1009 Collective on DM 1010 1011 Input Parameters: 1012 + dm - The DMPlex object 1013 . pointSF - The PetscSF describing the communication pattern 1014 . originalSection - The PetscSection for existing data layout 1015 . datatype - The type of data 1016 - originalData - The existing data 1017 1018 Output Parameters: 1019 + newSection - The PetscSection describing the new data layout 1020 - newData - The new data 1021 1022 Level: developer 1023 1024 .seealso: DMPlexDistribute(), DMPlexDistributeField() 1025 @*/ 1026 PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData) 1027 { 1028 PetscSF fieldSF; 1029 PetscInt *remoteOffsets, fieldSize; 1030 PetscMPIInt dataSize; 1031 PetscErrorCode ierr; 1032 1033 PetscFunctionBegin; 1034 ierr = PetscLogEventBegin(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 1035 ierr = PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);CHKERRQ(ierr); 1036 1037 ierr = PetscSectionGetStorageSize(newSection, &fieldSize);CHKERRQ(ierr); 1038 ierr = MPI_Type_size(datatype, &dataSize);CHKERRQ(ierr); 1039 ierr = PetscMalloc(fieldSize * dataSize, newData);CHKERRQ(ierr); 1040 1041 ierr = PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);CHKERRQ(ierr); 1042 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 1043 ierr = PetscSFBcastBegin(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 1044 ierr = PetscSFBcastEnd(fieldSF, datatype, originalData, *newData);CHKERRQ(ierr); 1045 ierr = PetscSFDestroy(&fieldSF);CHKERRQ(ierr); 1046 ierr = PetscLogEventEnd(DMPLEX_DistributeData,dm,0,0,0);CHKERRQ(ierr); 1047 PetscFunctionReturn(0); 1048 } 1049 1050 static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 1051 { 1052 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1053 MPI_Comm comm; 1054 PetscSF coneSF; 1055 PetscSection originalConeSection, newConeSection; 1056 PetscInt *remoteOffsets, *cones, *globCones, *newCones, newConesSize; 1057 PetscBool flg; 1058 PetscErrorCode ierr; 1059 1060 PetscFunctionBegin; 1061 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1062 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 1063 1064 ierr = PetscLogEventBegin(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 1065 /* Distribute cone section */ 1066 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1067 ierr = DMPlexGetConeSection(dm, &originalConeSection);CHKERRQ(ierr); 1068 ierr = DMPlexGetConeSection(dmParallel, &newConeSection);CHKERRQ(ierr); 1069 ierr = PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);CHKERRQ(ierr); 1070 ierr = DMSetUp(dmParallel);CHKERRQ(ierr); 1071 { 1072 PetscInt pStart, pEnd, p; 1073 1074 ierr = PetscSectionGetChart(newConeSection, &pStart, &pEnd);CHKERRQ(ierr); 1075 for (p = pStart; p < pEnd; ++p) { 1076 PetscInt coneSize; 1077 ierr = PetscSectionGetDof(newConeSection, p, &coneSize);CHKERRQ(ierr); 1078 pmesh->maxConeSize = PetscMax(pmesh->maxConeSize, coneSize); 1079 } 1080 } 1081 /* Communicate and renumber cones */ 1082 ierr = PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);CHKERRQ(ierr); 1083 ierr = PetscFree(remoteOffsets);CHKERRQ(ierr); 1084 ierr = DMPlexGetCones(dm, &cones);CHKERRQ(ierr); 1085 if (original) { 1086 PetscInt numCones; 1087 1088 ierr = PetscSectionGetStorageSize(originalConeSection,&numCones);CHKERRQ(ierr); 1089 ierr = PetscMalloc1(numCones,&globCones);CHKERRQ(ierr); 1090 ierr = ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);CHKERRQ(ierr); 1091 } else { 1092 globCones = cones; 1093 } 1094 ierr = DMPlexGetCones(dmParallel, &newCones);CHKERRQ(ierr); 1095 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 1096 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones);CHKERRQ(ierr); 1097 if (original) { 1098 ierr = PetscFree(globCones);CHKERRQ(ierr); 1099 } 1100 ierr = PetscSectionGetStorageSize(newConeSection, &newConesSize);CHKERRQ(ierr); 1101 ierr = ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);CHKERRQ(ierr); 1102 #if defined(PETSC_USE_DEBUG) 1103 { 1104 PetscInt p; 1105 PetscBool valid = PETSC_TRUE; 1106 for (p = 0; p < newConesSize; ++p) { 1107 if (newCones[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] Point %D not in overlap SF\n", PetscGlobalRank,p);CHKERRQ(ierr);} 1108 } 1109 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1110 } 1111 #endif 1112 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-cones_view", &flg);CHKERRQ(ierr); 1113 if (flg) { 1114 ierr = PetscPrintf(comm, "Serial Cone Section:\n");CHKERRQ(ierr); 1115 ierr = PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1116 ierr = PetscPrintf(comm, "Parallel Cone Section:\n");CHKERRQ(ierr); 1117 ierr = PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1118 ierr = PetscSFView(coneSF, NULL);CHKERRQ(ierr); 1119 } 1120 ierr = DMPlexGetConeOrientations(dm, &cones);CHKERRQ(ierr); 1121 ierr = DMPlexGetConeOrientations(dmParallel, &newCones);CHKERRQ(ierr); 1122 ierr = PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 1123 ierr = PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones);CHKERRQ(ierr); 1124 ierr = PetscSFDestroy(&coneSF);CHKERRQ(ierr); 1125 ierr = PetscLogEventEnd(DMPLEX_DistributeCones,dm,0,0,0);CHKERRQ(ierr); 1126 /* Create supports and stratify DMPlex */ 1127 { 1128 PetscInt pStart, pEnd; 1129 1130 ierr = PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);CHKERRQ(ierr); 1131 ierr = PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);CHKERRQ(ierr); 1132 } 1133 ierr = DMPlexSymmetrize(dmParallel);CHKERRQ(ierr); 1134 ierr = DMPlexStratify(dmParallel);CHKERRQ(ierr); 1135 { 1136 PetscBool useCone, useClosure, useAnchors; 1137 1138 ierr = DMPlexGetAdjacencyUseCone(dm, &useCone);CHKERRQ(ierr); 1139 ierr = DMPlexGetAdjacencyUseClosure(dm, &useClosure);CHKERRQ(ierr); 1140 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 1141 ierr = DMPlexSetAdjacencyUseCone(dmParallel, useCone);CHKERRQ(ierr); 1142 ierr = DMPlexSetAdjacencyUseClosure(dmParallel, useClosure);CHKERRQ(ierr); 1143 ierr = DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);CHKERRQ(ierr); 1144 } 1145 PetscFunctionReturn(0); 1146 } 1147 1148 static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel) 1149 { 1150 MPI_Comm comm; 1151 PetscSection originalCoordSection, newCoordSection; 1152 Vec originalCoordinates, newCoordinates; 1153 PetscInt bs; 1154 PetscBool isper; 1155 const char *name; 1156 const PetscReal *maxCell, *L; 1157 const DMBoundaryType *bd; 1158 PetscErrorCode ierr; 1159 1160 PetscFunctionBegin; 1161 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1162 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1163 1164 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1165 ierr = DMGetCoordinateSection(dm, &originalCoordSection);CHKERRQ(ierr); 1166 ierr = DMGetCoordinateSection(dmParallel, &newCoordSection);CHKERRQ(ierr); 1167 ierr = DMGetCoordinatesLocal(dm, &originalCoordinates);CHKERRQ(ierr); 1168 if (originalCoordinates) { 1169 ierr = VecCreate(PETSC_COMM_SELF, &newCoordinates);CHKERRQ(ierr); 1170 ierr = PetscObjectGetName((PetscObject) originalCoordinates, &name);CHKERRQ(ierr); 1171 ierr = PetscObjectSetName((PetscObject) newCoordinates, name);CHKERRQ(ierr); 1172 1173 ierr = DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);CHKERRQ(ierr); 1174 ierr = DMSetCoordinatesLocal(dmParallel, newCoordinates);CHKERRQ(ierr); 1175 ierr = VecGetBlockSize(originalCoordinates, &bs);CHKERRQ(ierr); 1176 ierr = VecSetBlockSize(newCoordinates, bs);CHKERRQ(ierr); 1177 ierr = VecDestroy(&newCoordinates);CHKERRQ(ierr); 1178 } 1179 ierr = DMGetPeriodicity(dm, &isper, &maxCell, &L, &bd);CHKERRQ(ierr); 1180 ierr = DMSetPeriodicity(dmParallel, isper, maxCell, L, bd);CHKERRQ(ierr); 1181 PetscFunctionReturn(0); 1182 } 1183 1184 /* Here we are assuming that process 0 always has everything */ 1185 static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel) 1186 { 1187 DM_Plex *mesh = (DM_Plex*) dm->data; 1188 MPI_Comm comm; 1189 DMLabel depthLabel; 1190 PetscMPIInt rank; 1191 PetscInt depth, d, numLabels, numLocalLabels, l; 1192 PetscBool hasLabels = PETSC_FALSE, lsendDepth, sendDepth; 1193 PetscObjectState depthState = -1; 1194 PetscErrorCode ierr; 1195 1196 PetscFunctionBegin; 1197 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1198 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1199 1200 ierr = PetscLogEventBegin(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1201 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1202 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1203 1204 /* If the user has changed the depth label, communicate it instead */ 1205 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1206 ierr = DMPlexGetDepthLabel(dm, &depthLabel);CHKERRQ(ierr); 1207 if (depthLabel) {ierr = PetscObjectStateGet((PetscObject) depthLabel, &depthState);CHKERRQ(ierr);} 1208 lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE; 1209 ierr = MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);CHKERRQ(ierr); 1210 if (sendDepth) { 1211 ierr = DMRemoveLabel(dmParallel, "depth", &depthLabel);CHKERRQ(ierr); 1212 ierr = DMLabelDestroy(&depthLabel);CHKERRQ(ierr); 1213 } 1214 /* Everyone must have either the same number of labels, or none */ 1215 ierr = DMGetNumLabels(dm, &numLocalLabels);CHKERRQ(ierr); 1216 numLabels = numLocalLabels; 1217 ierr = MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);CHKERRQ(ierr); 1218 if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE; 1219 for (l = numLabels-1; l >= 0; --l) { 1220 DMLabel label = NULL, labelNew = NULL; 1221 PetscBool isDepth, lisOutput = PETSC_TRUE, isOutput; 1222 const char *name = NULL; 1223 1224 if (hasLabels) { 1225 ierr = DMGetLabelByNum(dm, l, &label);CHKERRQ(ierr); 1226 /* Skip "depth" because it is recreated */ 1227 ierr = PetscObjectGetName((PetscObject) label, &name);CHKERRQ(ierr); 1228 ierr = PetscStrcmp(name, "depth", &isDepth);CHKERRQ(ierr); 1229 } 1230 ierr = MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);CHKERRQ(ierr); 1231 if (isDepth && !sendDepth) continue; 1232 ierr = DMLabelDistribute(label, migrationSF, &labelNew);CHKERRQ(ierr); 1233 if (isDepth) { 1234 /* Put in any missing strata which can occur if users are managing the depth label themselves */ 1235 PetscInt gdepth; 1236 1237 ierr = MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);CHKERRQ(ierr); 1238 if ((depth >= 0) && (gdepth != depth)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Inconsistent Plex depth %d != %d", depth, gdepth); 1239 for (d = 0; d <= gdepth; ++d) { 1240 PetscBool has; 1241 1242 ierr = DMLabelHasStratum(labelNew, d, &has);CHKERRQ(ierr); 1243 if (!has) {ierr = DMLabelAddStratum(labelNew, d);CHKERRQ(ierr);} 1244 } 1245 } 1246 ierr = DMAddLabel(dmParallel, labelNew);CHKERRQ(ierr); 1247 /* Put the output flag in the new label */ 1248 if (hasLabels) {ierr = DMGetLabelOutput(dm, name, &lisOutput);CHKERRQ(ierr);} 1249 ierr = MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);CHKERRQ(ierr); 1250 ierr = PetscObjectGetName((PetscObject) labelNew, &name);CHKERRQ(ierr); 1251 ierr = DMSetLabelOutput(dmParallel, name, isOutput);CHKERRQ(ierr); 1252 } 1253 ierr = PetscLogEventEnd(DMPLEX_DistributeLabels,dm,0,0,0);CHKERRQ(ierr); 1254 PetscFunctionReturn(0); 1255 } 1256 1257 static PetscErrorCode DMPlexDistributeSetupHybrid(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping renumbering, DM dmParallel) 1258 { 1259 DM_Plex *mesh = (DM_Plex*) dm->data; 1260 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1261 PetscBool *isHybrid, *isHybridParallel; 1262 PetscInt dim, depth, d; 1263 PetscInt pStart, pEnd, pStartP, pEndP; 1264 PetscErrorCode ierr; 1265 1266 PetscFunctionBegin; 1267 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1268 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1269 1270 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1271 ierr = DMPlexGetDepth(dm, &depth);CHKERRQ(ierr); 1272 ierr = DMPlexGetChart(dm,&pStart,&pEnd);CHKERRQ(ierr); 1273 ierr = DMPlexGetChart(dmParallel,&pStartP,&pEndP);CHKERRQ(ierr); 1274 ierr = PetscCalloc2(pEnd-pStart,&isHybrid,pEndP-pStartP,&isHybridParallel);CHKERRQ(ierr); 1275 for (d = 0; d <= depth; d++) { 1276 PetscInt hybridMax = (depth == 1 && d == 1) ? mesh->hybridPointMax[dim] : mesh->hybridPointMax[d]; 1277 1278 if (hybridMax >= 0) { 1279 PetscInt sStart, sEnd, p; 1280 1281 ierr = DMPlexGetDepthStratum(dm,d,&sStart,&sEnd);CHKERRQ(ierr); 1282 for (p = hybridMax; p < sEnd; p++) isHybrid[p-pStart] = PETSC_TRUE; 1283 } 1284 } 1285 ierr = PetscSFBcastBegin(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);CHKERRQ(ierr); 1286 ierr = PetscSFBcastEnd(migrationSF,MPIU_BOOL,isHybrid,isHybridParallel);CHKERRQ(ierr); 1287 for (d = 0; d <= dim; d++) pmesh->hybridPointMax[d] = -1; 1288 for (d = 0; d <= depth; d++) { 1289 PetscInt sStart, sEnd, p, dd; 1290 1291 ierr = DMPlexGetDepthStratum(dmParallel,d,&sStart,&sEnd);CHKERRQ(ierr); 1292 dd = (depth == 1 && d == 1) ? dim : d; 1293 for (p = sStart; p < sEnd; p++) { 1294 if (isHybridParallel[p-pStartP]) { 1295 pmesh->hybridPointMax[dd] = p; 1296 break; 1297 } 1298 } 1299 } 1300 ierr = PetscFree2(isHybrid,isHybridParallel);CHKERRQ(ierr); 1301 PetscFunctionReturn(0); 1302 } 1303 1304 static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel) 1305 { 1306 DM_Plex *mesh = (DM_Plex*) dm->data; 1307 DM_Plex *pmesh = (DM_Plex*) (dmParallel)->data; 1308 MPI_Comm comm; 1309 DM refTree; 1310 PetscSection origParentSection, newParentSection; 1311 PetscInt *origParents, *origChildIDs; 1312 PetscBool flg; 1313 PetscErrorCode ierr; 1314 1315 PetscFunctionBegin; 1316 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1317 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 5); 1318 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1319 1320 /* Set up tree */ 1321 ierr = DMPlexGetReferenceTree(dm,&refTree);CHKERRQ(ierr); 1322 ierr = DMPlexSetReferenceTree(dmParallel,refTree);CHKERRQ(ierr); 1323 ierr = DMPlexGetTree(dm,&origParentSection,&origParents,&origChildIDs,NULL,NULL);CHKERRQ(ierr); 1324 if (origParentSection) { 1325 PetscInt pStart, pEnd; 1326 PetscInt *newParents, *newChildIDs, *globParents; 1327 PetscInt *remoteOffsetsParents, newParentSize; 1328 PetscSF parentSF; 1329 1330 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1331 ierr = PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel),&newParentSection);CHKERRQ(ierr); 1332 ierr = PetscSectionSetChart(newParentSection,pStart,pEnd);CHKERRQ(ierr); 1333 ierr = PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);CHKERRQ(ierr); 1334 ierr = PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);CHKERRQ(ierr); 1335 ierr = PetscFree(remoteOffsetsParents);CHKERRQ(ierr); 1336 ierr = PetscSectionGetStorageSize(newParentSection,&newParentSize);CHKERRQ(ierr); 1337 ierr = PetscMalloc2(newParentSize,&newParents,newParentSize,&newChildIDs);CHKERRQ(ierr); 1338 if (original) { 1339 PetscInt numParents; 1340 1341 ierr = PetscSectionGetStorageSize(origParentSection,&numParents);CHKERRQ(ierr); 1342 ierr = PetscMalloc1(numParents,&globParents);CHKERRQ(ierr); 1343 ierr = ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);CHKERRQ(ierr); 1344 } 1345 else { 1346 globParents = origParents; 1347 } 1348 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1349 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents);CHKERRQ(ierr); 1350 if (original) { 1351 ierr = PetscFree(globParents);CHKERRQ(ierr); 1352 } 1353 ierr = PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1354 ierr = PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs);CHKERRQ(ierr); 1355 ierr = ISGlobalToLocalMappingApplyBlock(renumbering,IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);CHKERRQ(ierr); 1356 #if defined(PETSC_USE_DEBUG) 1357 { 1358 PetscInt p; 1359 PetscBool valid = PETSC_TRUE; 1360 for (p = 0; p < newParentSize; ++p) { 1361 if (newParents[p] < 0) {valid = PETSC_FALSE; ierr = PetscPrintf(PETSC_COMM_SELF, "Point %d not in overlap SF\n", p);CHKERRQ(ierr);} 1362 } 1363 if (!valid) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Invalid global to local map"); 1364 } 1365 #endif 1366 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-parents_view", &flg);CHKERRQ(ierr); 1367 if (flg) { 1368 ierr = PetscPrintf(comm, "Serial Parent Section: \n");CHKERRQ(ierr); 1369 ierr = PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1370 ierr = PetscPrintf(comm, "Parallel Parent Section: \n");CHKERRQ(ierr); 1371 ierr = PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1372 ierr = PetscSFView(parentSF, NULL);CHKERRQ(ierr); 1373 } 1374 ierr = DMPlexSetTree(dmParallel,newParentSection,newParents,newChildIDs);CHKERRQ(ierr); 1375 ierr = PetscSectionDestroy(&newParentSection);CHKERRQ(ierr); 1376 ierr = PetscFree2(newParents,newChildIDs);CHKERRQ(ierr); 1377 ierr = PetscSFDestroy(&parentSF);CHKERRQ(ierr); 1378 } 1379 pmesh->useAnchors = mesh->useAnchors; 1380 PetscFunctionReturn(0); 1381 } 1382 1383 PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel) 1384 { 1385 PetscMPIInt rank, size; 1386 MPI_Comm comm; 1387 PetscErrorCode ierr; 1388 1389 PetscFunctionBegin; 1390 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1391 PetscValidHeaderSpecific(dmParallel, DM_CLASSID, 3); 1392 1393 /* Create point SF for parallel mesh */ 1394 ierr = PetscLogEventBegin(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1395 ierr = PetscObjectGetComm((PetscObject)dm, &comm);CHKERRQ(ierr); 1396 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1397 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1398 { 1399 const PetscInt *leaves; 1400 PetscSFNode *remotePoints, *rowners, *lowners; 1401 PetscInt numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints; 1402 PetscInt pStart, pEnd; 1403 1404 ierr = DMPlexGetChart(dmParallel, &pStart, &pEnd);CHKERRQ(ierr); 1405 ierr = PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);CHKERRQ(ierr); 1406 ierr = PetscMalloc2(numRoots,&rowners,numLeaves,&lowners);CHKERRQ(ierr); 1407 for (p=0; p<numRoots; p++) { 1408 rowners[p].rank = -1; 1409 rowners[p].index = -1; 1410 } 1411 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1412 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1413 for (p = 0; p < numLeaves; ++p) { 1414 if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */ 1415 lowners[p].rank = rank; 1416 lowners[p].index = leaves ? leaves[p] : p; 1417 } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */ 1418 lowners[p].rank = -2; 1419 lowners[p].index = -2; 1420 } 1421 } 1422 for (p=0; p<numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */ 1423 rowners[p].rank = -3; 1424 rowners[p].index = -3; 1425 } 1426 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1427 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);CHKERRQ(ierr); 1428 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1429 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners);CHKERRQ(ierr); 1430 for (p = 0; p < numLeaves; ++p) { 1431 if (lowners[p].rank < 0 || lowners[p].index < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cell partition corrupt: point not claimed"); 1432 if (lowners[p].rank != rank) ++numGhostPoints; 1433 } 1434 ierr = PetscMalloc1(numGhostPoints, &ghostPoints);CHKERRQ(ierr); 1435 ierr = PetscMalloc1(numGhostPoints, &remotePoints);CHKERRQ(ierr); 1436 for (p = 0, gp = 0; p < numLeaves; ++p) { 1437 if (lowners[p].rank != rank) { 1438 ghostPoints[gp] = leaves ? leaves[p] : p; 1439 remotePoints[gp].rank = lowners[p].rank; 1440 remotePoints[gp].index = lowners[p].index; 1441 ++gp; 1442 } 1443 } 1444 ierr = PetscFree2(rowners,lowners);CHKERRQ(ierr); 1445 ierr = PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);CHKERRQ(ierr); 1446 ierr = PetscSFSetFromOptions((dmParallel)->sf);CHKERRQ(ierr); 1447 } 1448 { 1449 PetscBool useCone, useClosure, useAnchors; 1450 1451 ierr = DMPlexGetAdjacencyUseCone(dm, &useCone);CHKERRQ(ierr); 1452 ierr = DMPlexGetAdjacencyUseClosure(dm, &useClosure);CHKERRQ(ierr); 1453 ierr = DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);CHKERRQ(ierr); 1454 ierr = DMPlexSetAdjacencyUseCone(dmParallel, useCone);CHKERRQ(ierr); 1455 ierr = DMPlexSetAdjacencyUseClosure(dmParallel, useClosure);CHKERRQ(ierr); 1456 ierr = DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);CHKERRQ(ierr); 1457 } 1458 ierr = PetscLogEventEnd(DMPLEX_DistributeSF,dm,0,0,0);CHKERRQ(ierr); 1459 PetscFunctionReturn(0); 1460 } 1461 1462 /*@ 1463 DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition? 1464 1465 Input Parameters: 1466 + dm - The DMPlex object 1467 - flg - Balance the partition? 1468 1469 Level: intermediate 1470 1471 .seealso: DMPlexDistribute(), DMPlexGetPartitionBalance() 1472 @*/ 1473 PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg) 1474 { 1475 DM_Plex *mesh = (DM_Plex *)dm->data; 1476 1477 PetscFunctionBegin; 1478 mesh->partitionBalance = flg; 1479 PetscFunctionReturn(0); 1480 } 1481 1482 /*@ 1483 DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition? 1484 1485 Input Parameter: 1486 + dm - The DMPlex object 1487 1488 Output Parameter: 1489 + flg - Balance the partition? 1490 1491 Level: intermediate 1492 1493 .seealso: DMPlexDistribute(), DMPlexSetPartitionBalance() 1494 @*/ 1495 PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg) 1496 { 1497 DM_Plex *mesh = (DM_Plex *)dm->data; 1498 1499 PetscFunctionBegin; 1500 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1501 PetscValidIntPointer(flg, 2); 1502 *flg = mesh->partitionBalance; 1503 PetscFunctionReturn(0); 1504 } 1505 1506 /*@C 1507 DMPlexDerivePointSF - Build a point SF from an SF describing a point migration 1508 1509 Input Parameter: 1510 + dm - The source DMPlex object 1511 . migrationSF - The star forest that describes the parallel point remapping 1512 . ownership - Flag causing a vote to determine point ownership 1513 1514 Output Parameter: 1515 - pointSF - The star forest describing the point overlap in the remapped DM 1516 1517 Level: developer 1518 1519 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1520 @*/ 1521 PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF) 1522 { 1523 PetscMPIInt rank, size; 1524 PetscInt p, nroots, nleaves, idx, npointLeaves; 1525 PetscInt *pointLocal; 1526 const PetscInt *leaves; 1527 const PetscSFNode *roots; 1528 PetscSFNode *rootNodes, *leafNodes, *pointRemote; 1529 Vec shifts; 1530 const PetscInt numShifts = 13759; 1531 const PetscScalar *shift = NULL; 1532 const PetscBool shiftDebug = PETSC_FALSE; 1533 PetscBool balance; 1534 PetscErrorCode ierr; 1535 1536 PetscFunctionBegin; 1537 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1538 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) dm), &rank);CHKERRQ(ierr); 1539 ierr = MPI_Comm_size(PetscObjectComm((PetscObject) dm), &size);CHKERRQ(ierr); 1540 1541 ierr = DMPlexGetPartitionBalance(dm, &balance);CHKERRQ(ierr); 1542 ierr = PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);CHKERRQ(ierr); 1543 ierr = PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);CHKERRQ(ierr); 1544 if (ownership) { 1545 /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */ 1546 if (balance) { 1547 PetscRandom r; 1548 1549 ierr = PetscRandomCreate(PETSC_COMM_SELF, &r);CHKERRQ(ierr); 1550 ierr = PetscRandomSetInterval(r, 0, 2467*size);CHKERRQ(ierr); 1551 ierr = VecCreate(PETSC_COMM_SELF, &shifts);CHKERRQ(ierr); 1552 ierr = VecSetSizes(shifts, numShifts, numShifts);CHKERRQ(ierr); 1553 ierr = VecSetType(shifts, VECSTANDARD);CHKERRQ(ierr); 1554 ierr = VecSetRandom(shifts, r);CHKERRQ(ierr); 1555 ierr = PetscRandomDestroy(&r);CHKERRQ(ierr); 1556 ierr = VecGetArrayRead(shifts, &shift);CHKERRQ(ierr); 1557 } 1558 1559 /* Point ownership vote: Process with highest rank owns shared points */ 1560 for (p = 0; p < nleaves; ++p) { 1561 if (shiftDebug) { 1562 ierr = PetscSynchronizedPrintf(PetscObjectComm((PetscObject) dm), "[%d] Point %D RemotePoint %D Shift %D MyRank %D\n", rank, leaves ? leaves[p] : p, roots[p].index, (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]), (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size);CHKERRQ(ierr); 1563 } 1564 /* Either put in a bid or we know we own it */ 1565 leafNodes[p].rank = (rank + (shift ? (PetscInt) PetscRealPart(shift[roots[p].index%numShifts]) : 0))%size; 1566 leafNodes[p].index = p; 1567 } 1568 for (p = 0; p < nroots; p++) { 1569 /* Root must not participate in the reduction, flag so that MAXLOC does not use */ 1570 rootNodes[p].rank = -3; 1571 rootNodes[p].index = -3; 1572 } 1573 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1574 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootNodes, MPI_MAXLOC);CHKERRQ(ierr); 1575 if (balance) { 1576 /* We've voted, now we need to get the rank. When we're balancing the partition, the "rank" in rootNotes is not 1577 * the rank but rather (rank + random)%size. So we do another reduction, voting the same way, but sending the 1578 * rank instead of the index. */ 1579 PetscSFNode *rootRanks = NULL; 1580 ierr = PetscMalloc1(nroots, &rootRanks);CHKERRQ(ierr); 1581 for (p = 0; p < nroots; p++) { 1582 rootRanks[p].rank = -3; 1583 rootRanks[p].index = -3; 1584 } 1585 for (p = 0; p < nleaves; p++) leafNodes[p].index = rank; 1586 ierr = PetscSFReduceBegin(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);CHKERRQ(ierr); 1587 ierr = PetscSFReduceEnd(migrationSF, MPIU_2INT, leafNodes, rootRanks, MPI_MAXLOC);CHKERRQ(ierr); 1588 for (p = 0; p < nroots; p++) rootNodes[p].rank = rootRanks[p].index; 1589 ierr = PetscFree(rootRanks);CHKERRQ(ierr); 1590 } 1591 } else { 1592 for (p = 0; p < nroots; p++) { 1593 rootNodes[p].index = -1; 1594 rootNodes[p].rank = rank; 1595 }; 1596 for (p = 0; p < nleaves; p++) { 1597 /* Write new local id into old location */ 1598 if (roots[p].rank == rank) { 1599 rootNodes[roots[p].index].index = leaves ? leaves[p] : p; 1600 } 1601 } 1602 } 1603 ierr = PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1604 ierr = PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes);CHKERRQ(ierr); 1605 1606 for (npointLeaves = 0, p = 0; p < nleaves; p++) { 1607 if (leafNodes[p].rank != rank) npointLeaves++; 1608 } 1609 ierr = PetscMalloc1(npointLeaves, &pointLocal);CHKERRQ(ierr); 1610 ierr = PetscMalloc1(npointLeaves, &pointRemote);CHKERRQ(ierr); 1611 for (idx = 0, p = 0; p < nleaves; p++) { 1612 if (leafNodes[p].rank != rank) { 1613 pointLocal[idx] = p; 1614 pointRemote[idx] = leafNodes[p]; 1615 idx++; 1616 } 1617 } 1618 if (shift) { 1619 ierr = VecRestoreArrayRead(shifts, &shift);CHKERRQ(ierr); 1620 ierr = VecDestroy(&shifts);CHKERRQ(ierr); 1621 } 1622 if (shiftDebug) {ierr = PetscSynchronizedFlush(PetscObjectComm((PetscObject) dm), PETSC_STDOUT);CHKERRQ(ierr);} 1623 ierr = PetscSFCreate(PetscObjectComm((PetscObject) dm), pointSF);CHKERRQ(ierr); 1624 ierr = PetscSFSetFromOptions(*pointSF);CHKERRQ(ierr); 1625 ierr = PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1626 ierr = PetscFree2(rootNodes, leafNodes);CHKERRQ(ierr); 1627 PetscFunctionReturn(0); 1628 } 1629 1630 /*@C 1631 DMPlexMigrate - Migrates internal DM data over the supplied star forest 1632 1633 Input Parameter: 1634 + dm - The source DMPlex object 1635 . sf - The star forest communication context describing the migration pattern 1636 1637 Output Parameter: 1638 - targetDM - The target DMPlex object 1639 1640 Level: intermediate 1641 1642 .seealso: DMPlexDistribute(), DMPlexDistributeOverlap() 1643 @*/ 1644 PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM) 1645 { 1646 MPI_Comm comm; 1647 PetscInt dim, cdim, nroots; 1648 PetscSF sfPoint; 1649 ISLocalToGlobalMapping ltogMigration; 1650 ISLocalToGlobalMapping ltogOriginal = NULL; 1651 PetscBool flg; 1652 PetscErrorCode ierr; 1653 1654 PetscFunctionBegin; 1655 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1656 ierr = PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1657 ierr = PetscObjectGetComm((PetscObject) dm, &comm);CHKERRQ(ierr); 1658 ierr = DMGetDimension(dm, &dim);CHKERRQ(ierr); 1659 ierr = DMSetDimension(targetDM, dim);CHKERRQ(ierr); 1660 ierr = DMGetCoordinateDim(dm, &cdim);CHKERRQ(ierr); 1661 ierr = DMSetCoordinateDim(targetDM, cdim);CHKERRQ(ierr); 1662 1663 /* Check for a one-to-all distribution pattern */ 1664 ierr = DMGetPointSF(dm, &sfPoint);CHKERRQ(ierr); 1665 ierr = PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);CHKERRQ(ierr); 1666 if (nroots >= 0) { 1667 IS isOriginal; 1668 PetscInt n, size, nleaves; 1669 PetscInt *numbering_orig, *numbering_new; 1670 1671 /* Get the original point numbering */ 1672 ierr = DMPlexCreatePointNumbering(dm, &isOriginal);CHKERRQ(ierr); 1673 ierr = ISLocalToGlobalMappingCreateIS(isOriginal, <ogOriginal);CHKERRQ(ierr); 1674 ierr = ISLocalToGlobalMappingGetSize(ltogOriginal, &size);CHKERRQ(ierr); 1675 ierr = ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1676 /* Convert to positive global numbers */ 1677 for (n=0; n<size; n++) {if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n]+1);} 1678 /* Derive the new local-to-global mapping from the old one */ 1679 ierr = PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1680 ierr = PetscMalloc1(nleaves, &numbering_new);CHKERRQ(ierr); 1681 ierr = PetscSFBcastBegin(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1682 ierr = PetscSFBcastEnd(sf, MPIU_INT, (PetscInt *) numbering_orig, numbering_new);CHKERRQ(ierr); 1683 ierr = ISLocalToGlobalMappingCreate(comm, 1, nleaves, (const PetscInt*) numbering_new, PETSC_OWN_POINTER, <ogMigration);CHKERRQ(ierr); 1684 ierr = ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt**)&numbering_orig);CHKERRQ(ierr); 1685 ierr = ISDestroy(&isOriginal);CHKERRQ(ierr); 1686 } else { 1687 /* One-to-all distribution pattern: We can derive LToG from SF */ 1688 ierr = ISLocalToGlobalMappingCreateSF(sf, 0, <ogMigration);CHKERRQ(ierr); 1689 } 1690 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1691 if (flg) { 1692 ierr = PetscPrintf(comm, "Point renumbering for DM migration:\n");CHKERRQ(ierr); 1693 ierr = ISLocalToGlobalMappingView(ltogMigration, NULL);CHKERRQ(ierr); 1694 } 1695 /* Migrate DM data to target DM */ 1696 ierr = DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1697 ierr = DMPlexDistributeLabels(dm, sf, targetDM);CHKERRQ(ierr); 1698 ierr = DMPlexDistributeCoordinates(dm, sf, targetDM);CHKERRQ(ierr); 1699 ierr = DMPlexDistributeSetupHybrid(dm, sf, ltogMigration, targetDM);CHKERRQ(ierr); 1700 ierr = DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);CHKERRQ(ierr); 1701 ierr = ISLocalToGlobalMappingDestroy(<ogOriginal);CHKERRQ(ierr); 1702 ierr = ISLocalToGlobalMappingDestroy(<ogMigration);CHKERRQ(ierr); 1703 ierr = PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);CHKERRQ(ierr); 1704 PetscFunctionReturn(0); 1705 } 1706 1707 /*@C 1708 DMPlexDistribute - Distributes the mesh and any associated sections. 1709 1710 Not Collective 1711 1712 Input Parameter: 1713 + dm - The original DMPlex object 1714 - overlap - The overlap of partitions, 0 is the default 1715 1716 Output Parameter: 1717 + sf - The PetscSF used for point distribution, or NULL if not needed 1718 - dmParallel - The distributed DMPlex object 1719 1720 Note: If the mesh was not distributed, the output dmParallel will be NULL. 1721 1722 The user can control the definition of adjacency for the mesh using DMPlexSetAdjacencyUseCone() and 1723 DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function 1724 representation on the mesh. 1725 1726 Level: intermediate 1727 1728 .keywords: mesh, elements 1729 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure() 1730 @*/ 1731 PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel) 1732 { 1733 MPI_Comm comm; 1734 PetscPartitioner partitioner; 1735 IS cellPart; 1736 PetscSection cellPartSection; 1737 DM dmCoord; 1738 DMLabel lblPartition, lblMigration; 1739 PetscSF sfProcess, sfMigration, sfStratified, sfPoint; 1740 PetscBool flg, balance; 1741 PetscMPIInt rank, size, p; 1742 PetscErrorCode ierr; 1743 1744 PetscFunctionBegin; 1745 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1746 PetscValidLogicalCollectiveInt(dm, overlap, 2); 1747 if (sf) PetscValidPointer(sf,3); 1748 PetscValidPointer(dmParallel,4); 1749 1750 if (sf) *sf = NULL; 1751 *dmParallel = NULL; 1752 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1753 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1754 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1755 if (size == 1) PetscFunctionReturn(0); 1756 1757 ierr = PetscLogEventBegin(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1758 /* Create cell partition */ 1759 ierr = PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1760 ierr = PetscSectionCreate(comm, &cellPartSection);CHKERRQ(ierr); 1761 ierr = DMPlexGetPartitioner(dm, &partitioner);CHKERRQ(ierr); 1762 ierr = PetscPartitionerPartition(partitioner, dm, cellPartSection, &cellPart);CHKERRQ(ierr); 1763 { 1764 /* Convert partition to DMLabel */ 1765 PetscInt proc, pStart, pEnd, npoints, poffset; 1766 const PetscInt *points; 1767 ierr = DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);CHKERRQ(ierr); 1768 ierr = ISGetIndices(cellPart, &points);CHKERRQ(ierr); 1769 ierr = PetscSectionGetChart(cellPartSection, &pStart, &pEnd);CHKERRQ(ierr); 1770 for (proc = pStart; proc < pEnd; proc++) { 1771 ierr = PetscSectionGetDof(cellPartSection, proc, &npoints);CHKERRQ(ierr); 1772 ierr = PetscSectionGetOffset(cellPartSection, proc, &poffset);CHKERRQ(ierr); 1773 for (p = poffset; p < poffset+npoints; p++) { 1774 ierr = DMLabelSetValue(lblPartition, points[p], proc);CHKERRQ(ierr); 1775 } 1776 } 1777 ierr = ISRestoreIndices(cellPart, &points);CHKERRQ(ierr); 1778 } 1779 ierr = DMPlexPartitionLabelClosure(dm, lblPartition);CHKERRQ(ierr); 1780 { 1781 /* Build a global process SF */ 1782 PetscSFNode *remoteProc; 1783 ierr = PetscMalloc1(size, &remoteProc);CHKERRQ(ierr); 1784 for (p = 0; p < size; ++p) { 1785 remoteProc[p].rank = p; 1786 remoteProc[p].index = rank; 1787 } 1788 ierr = PetscSFCreate(comm, &sfProcess);CHKERRQ(ierr); 1789 ierr = PetscObjectSetName((PetscObject) sfProcess, "Process SF");CHKERRQ(ierr); 1790 ierr = PetscSFSetGraph(sfProcess, size, size, NULL, PETSC_OWN_POINTER, remoteProc, PETSC_OWN_POINTER);CHKERRQ(ierr); 1791 } 1792 ierr = DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);CHKERRQ(ierr); 1793 ierr = DMPlexPartitionLabelInvert(dm, lblPartition, sfProcess, lblMigration);CHKERRQ(ierr); 1794 ierr = DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);CHKERRQ(ierr); 1795 /* Stratify the SF in case we are migrating an already parallel plex */ 1796 ierr = DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);CHKERRQ(ierr); 1797 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1798 sfMigration = sfStratified; 1799 ierr = PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1800 ierr = PetscOptionsHasName(((PetscObject) dm)->options,((PetscObject) dm)->prefix, "-partition_view", &flg);CHKERRQ(ierr); 1801 if (flg) { 1802 ierr = DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1803 ierr = PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); 1804 } 1805 1806 /* Create non-overlapping parallel DM and migrate internal data */ 1807 ierr = DMPlexCreate(comm, dmParallel);CHKERRQ(ierr); 1808 ierr = PetscObjectSetName((PetscObject) *dmParallel, "Parallel Mesh");CHKERRQ(ierr); 1809 ierr = DMPlexMigrate(dm, sfMigration, *dmParallel);CHKERRQ(ierr); 1810 1811 /* Build the point SF without overlap */ 1812 ierr = DMPlexGetPartitionBalance(dm, &balance);CHKERRQ(ierr); 1813 ierr = DMPlexSetPartitionBalance(*dmParallel, balance);CHKERRQ(ierr); 1814 ierr = DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);CHKERRQ(ierr); 1815 ierr = DMSetPointSF(*dmParallel, sfPoint);CHKERRQ(ierr); 1816 ierr = DMGetCoordinateDM(*dmParallel, &dmCoord);CHKERRQ(ierr); 1817 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1818 if (flg) {ierr = PetscSFView(sfPoint, PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);} 1819 1820 if (overlap > 0) { 1821 DM dmOverlap; 1822 PetscInt nroots, nleaves; 1823 PetscSFNode *newRemote; 1824 const PetscSFNode *oldRemote; 1825 PetscSF sfOverlap, sfOverlapPoint; 1826 /* Add the partition overlap to the distributed DM */ 1827 ierr = DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);CHKERRQ(ierr); 1828 ierr = DMDestroy(dmParallel);CHKERRQ(ierr); 1829 *dmParallel = dmOverlap; 1830 if (flg) { 1831 ierr = PetscPrintf(comm, "Overlap Migration SF:\n");CHKERRQ(ierr); 1832 ierr = PetscSFView(sfOverlap, NULL);CHKERRQ(ierr); 1833 } 1834 1835 /* Re-map the migration SF to establish the full migration pattern */ 1836 ierr = PetscSFGetGraph(sfMigration, &nroots, NULL, NULL, &oldRemote);CHKERRQ(ierr); 1837 ierr = PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);CHKERRQ(ierr); 1838 ierr = PetscMalloc1(nleaves, &newRemote);CHKERRQ(ierr); 1839 ierr = PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1840 ierr = PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote);CHKERRQ(ierr); 1841 ierr = PetscSFCreate(comm, &sfOverlapPoint);CHKERRQ(ierr); 1842 ierr = PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);CHKERRQ(ierr); 1843 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1844 ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr); 1845 sfMigration = sfOverlapPoint; 1846 } 1847 /* Cleanup Partition */ 1848 ierr = PetscSFDestroy(&sfProcess);CHKERRQ(ierr); 1849 ierr = DMLabelDestroy(&lblPartition);CHKERRQ(ierr); 1850 ierr = DMLabelDestroy(&lblMigration);CHKERRQ(ierr); 1851 ierr = PetscSectionDestroy(&cellPartSection);CHKERRQ(ierr); 1852 ierr = ISDestroy(&cellPart);CHKERRQ(ierr); 1853 /* Copy BC */ 1854 ierr = DMCopyBoundary(dm, *dmParallel);CHKERRQ(ierr); 1855 /* Create sfNatural */ 1856 if (dm->useNatural) { 1857 PetscSection section; 1858 1859 ierr = DMGetSection(dm, §ion);CHKERRQ(ierr); 1860 ierr = DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);CHKERRQ(ierr); 1861 ierr = DMSetUseNatural(*dmParallel, PETSC_TRUE);CHKERRQ(ierr); 1862 } 1863 /* Cleanup */ 1864 if (sf) {*sf = sfMigration;} 1865 else {ierr = PetscSFDestroy(&sfMigration);CHKERRQ(ierr);} 1866 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1867 ierr = PetscLogEventEnd(DMPLEX_Distribute,dm,0,0,0);CHKERRQ(ierr); 1868 PetscFunctionReturn(0); 1869 } 1870 1871 /*@C 1872 DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM. 1873 1874 Not Collective 1875 1876 Input Parameter: 1877 + dm - The non-overlapping distrbuted DMPlex object 1878 - overlap - The overlap of partitions 1879 1880 Output Parameter: 1881 + sf - The PetscSF used for point distribution 1882 - dmOverlap - The overlapping distributed DMPlex object, or NULL 1883 1884 Note: If the mesh was not distributed, the return value is NULL. 1885 1886 The user can control the definition of adjacency for the mesh using DMPlexGetAdjacencyUseCone() and 1887 DMPlexSetAdjacencyUseClosure(). They should choose the combination appropriate for the function 1888 representation on the mesh. 1889 1890 Level: intermediate 1891 1892 .keywords: mesh, elements 1893 .seealso: DMPlexCreate(), DMPlexDistributeByFace(), DMPlexSetAdjacencyUseCone(), DMPlexSetAdjacencyUseClosure() 1894 @*/ 1895 PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap) 1896 { 1897 MPI_Comm comm; 1898 PetscMPIInt size, rank; 1899 PetscSection rootSection, leafSection; 1900 IS rootrank, leafrank; 1901 DM dmCoord; 1902 DMLabel lblOverlap; 1903 PetscSF sfOverlap, sfStratified, sfPoint; 1904 PetscErrorCode ierr; 1905 1906 PetscFunctionBegin; 1907 PetscValidHeaderSpecific(dm, DM_CLASSID, 1); 1908 if (sf) PetscValidPointer(sf, 3); 1909 PetscValidPointer(dmOverlap, 4); 1910 1911 if (sf) *sf = NULL; 1912 *dmOverlap = NULL; 1913 ierr = PetscObjectGetComm((PetscObject)dm,&comm);CHKERRQ(ierr); 1914 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 1915 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1916 if (size == 1) PetscFunctionReturn(0); 1917 1918 ierr = PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1919 /* Compute point overlap with neighbouring processes on the distributed DM */ 1920 ierr = PetscLogEventBegin(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1921 ierr = PetscSectionCreate(comm, &rootSection);CHKERRQ(ierr); 1922 ierr = PetscSectionCreate(comm, &leafSection);CHKERRQ(ierr); 1923 ierr = DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);CHKERRQ(ierr); 1924 ierr = DMPlexCreateOverlap(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);CHKERRQ(ierr); 1925 /* Convert overlap label to stratified migration SF */ 1926 ierr = DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);CHKERRQ(ierr); 1927 ierr = DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);CHKERRQ(ierr); 1928 ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr); 1929 sfOverlap = sfStratified; 1930 ierr = PetscObjectSetName((PetscObject) sfOverlap, "Overlap SF");CHKERRQ(ierr); 1931 ierr = PetscSFSetFromOptions(sfOverlap);CHKERRQ(ierr); 1932 1933 ierr = PetscSectionDestroy(&rootSection);CHKERRQ(ierr); 1934 ierr = PetscSectionDestroy(&leafSection);CHKERRQ(ierr); 1935 ierr = ISDestroy(&rootrank);CHKERRQ(ierr); 1936 ierr = ISDestroy(&leafrank);CHKERRQ(ierr); 1937 ierr = PetscLogEventEnd(DMPLEX_Partition,dm,0,0,0);CHKERRQ(ierr); 1938 1939 /* Build the overlapping DM */ 1940 ierr = DMPlexCreate(comm, dmOverlap);CHKERRQ(ierr); 1941 ierr = PetscObjectSetName((PetscObject) *dmOverlap, "Parallel Mesh");CHKERRQ(ierr); 1942 ierr = DMPlexMigrate(dm, sfOverlap, *dmOverlap);CHKERRQ(ierr); 1943 /* Build the new point SF */ 1944 ierr = DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 1945 ierr = DMSetPointSF(*dmOverlap, sfPoint);CHKERRQ(ierr); 1946 ierr = DMGetCoordinateDM(*dmOverlap, &dmCoord);CHKERRQ(ierr); 1947 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 1948 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 1949 /* Cleanup overlap partition */ 1950 ierr = DMLabelDestroy(&lblOverlap);CHKERRQ(ierr); 1951 if (sf) *sf = sfOverlap; 1952 else {ierr = PetscSFDestroy(&sfOverlap);CHKERRQ(ierr);} 1953 ierr = PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);CHKERRQ(ierr); 1954 PetscFunctionReturn(0); 1955 } 1956 1957 /*@C 1958 DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the 1959 root process of the original's communicator. 1960 1961 Input Parameters: 1962 . dm - the original DMPlex object 1963 1964 Output Parameters: 1965 + sf - the PetscSF used for point distribution (optional) 1966 - gatherMesh - the gathered DM object, or NULL 1967 1968 Level: intermediate 1969 1970 .keywords: mesh 1971 .seealso: DMPlexDistribute(), DMPlexGetRedundantDM() 1972 @*/ 1973 PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh) 1974 { 1975 MPI_Comm comm; 1976 PetscMPIInt size; 1977 PetscPartitioner oldPart, gatherPart; 1978 PetscErrorCode ierr; 1979 1980 PetscFunctionBegin; 1981 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 1982 PetscValidPointer(gatherMesh,2); 1983 *gatherMesh = NULL; 1984 if (sf) *sf = NULL; 1985 comm = PetscObjectComm((PetscObject)dm); 1986 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1987 if (size == 1) PetscFunctionReturn(0); 1988 ierr = DMPlexGetPartitioner(dm,&oldPart);CHKERRQ(ierr); 1989 ierr = PetscObjectReference((PetscObject)oldPart);CHKERRQ(ierr); 1990 ierr = PetscPartitionerCreate(comm,&gatherPart);CHKERRQ(ierr); 1991 ierr = PetscPartitionerSetType(gatherPart,PETSCPARTITIONERGATHER);CHKERRQ(ierr); 1992 ierr = DMPlexSetPartitioner(dm,gatherPart);CHKERRQ(ierr); 1993 ierr = DMPlexDistribute(dm,0,sf,gatherMesh);CHKERRQ(ierr); 1994 1995 ierr = DMPlexSetPartitioner(dm,oldPart);CHKERRQ(ierr); 1996 ierr = PetscPartitionerDestroy(&gatherPart);CHKERRQ(ierr); 1997 ierr = PetscPartitionerDestroy(&oldPart);CHKERRQ(ierr); 1998 PetscFunctionReturn(0); 1999 } 2000 2001 /*@C 2002 DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process. 2003 2004 Input Parameters: 2005 . dm - the original DMPlex object 2006 2007 Output Parameters: 2008 + sf - the PetscSF used for point distribution (optional) 2009 - redundantMesh - the redundant DM object, or NULL 2010 2011 Level: intermediate 2012 2013 .keywords: mesh 2014 .seealso: DMPlexDistribute(), DMPlexGetGatherDM() 2015 @*/ 2016 PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh) 2017 { 2018 MPI_Comm comm; 2019 PetscMPIInt size, rank; 2020 PetscInt pStart, pEnd, p; 2021 PetscInt numPoints = -1; 2022 PetscSF migrationSF, sfPoint, gatherSF; 2023 DM gatherDM, dmCoord; 2024 PetscSFNode *points; 2025 PetscErrorCode ierr; 2026 2027 PetscFunctionBegin; 2028 PetscValidHeaderSpecific(dm,DM_CLASSID,1); 2029 PetscValidPointer(redundantMesh,2); 2030 *redundantMesh = NULL; 2031 comm = PetscObjectComm((PetscObject)dm); 2032 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 2033 if (size == 1) { 2034 ierr = PetscObjectReference((PetscObject) dm);CHKERRQ(ierr); 2035 *redundantMesh = dm; 2036 if (sf) *sf = NULL; 2037 PetscFunctionReturn(0); 2038 } 2039 ierr = DMPlexGetGatherDM(dm,&gatherSF,&gatherDM);CHKERRQ(ierr); 2040 if (!gatherDM) PetscFunctionReturn(0); 2041 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 2042 ierr = DMPlexGetChart(gatherDM,&pStart,&pEnd);CHKERRQ(ierr); 2043 numPoints = pEnd - pStart; 2044 ierr = MPI_Bcast(&numPoints,1,MPIU_INT,0,comm);CHKERRQ(ierr); 2045 ierr = PetscMalloc1(numPoints,&points);CHKERRQ(ierr); 2046 ierr = PetscSFCreate(comm,&migrationSF);CHKERRQ(ierr); 2047 for (p = 0; p < numPoints; p++) { 2048 points[p].index = p; 2049 points[p].rank = 0; 2050 } 2051 ierr = PetscSFSetGraph(migrationSF,pEnd-pStart,numPoints,NULL,PETSC_OWN_POINTER,points,PETSC_OWN_POINTER);CHKERRQ(ierr); 2052 ierr = DMPlexCreate(comm, redundantMesh);CHKERRQ(ierr); 2053 ierr = PetscObjectSetName((PetscObject) *redundantMesh, "Redundant Mesh");CHKERRQ(ierr); 2054 ierr = DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);CHKERRQ(ierr); 2055 ierr = DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);CHKERRQ(ierr); 2056 ierr = DMSetPointSF(*redundantMesh, sfPoint);CHKERRQ(ierr); 2057 ierr = DMGetCoordinateDM(*redundantMesh, &dmCoord);CHKERRQ(ierr); 2058 if (dmCoord) {ierr = DMSetPointSF(dmCoord, sfPoint);CHKERRQ(ierr);} 2059 ierr = PetscSFDestroy(&sfPoint);CHKERRQ(ierr); 2060 if (sf) { 2061 PetscSF tsf; 2062 2063 ierr = PetscSFCompose(gatherSF,migrationSF,&tsf);CHKERRQ(ierr); 2064 ierr = DMPlexStratifyMigrationSF(dm, tsf, sf);CHKERRQ(ierr); 2065 ierr = PetscSFDestroy(&tsf);CHKERRQ(ierr); 2066 } 2067 ierr = PetscSFDestroy(&migrationSF);CHKERRQ(ierr); 2068 ierr = PetscSFDestroy(&gatherSF);CHKERRQ(ierr); 2069 ierr = DMDestroy(&gatherDM);CHKERRQ(ierr); 2070 PetscFunctionReturn(0); 2071 } 2072