1 2 #include <petsc-private/isimpl.h> /*I "petscis.h" I*/ 3 4 PetscClassId IS_LTOGM_CLASSID; 5 6 #undef __FUNCT__ 7 #define __FUNCT__ "ISLocalToGlobalMappingGetSize" 8 /*@C 9 ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping. 10 11 Not Collective 12 13 Input Parameter: 14 . ltog - local to global mapping 15 16 Output Parameter: 17 . n - the number of entries in the local mapping 18 19 Level: advanced 20 21 Concepts: mapping^local to global 22 23 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate() 24 @*/ 25 PetscErrorCode ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n) 26 { 27 PetscFunctionBegin; 28 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 29 PetscValidIntPointer(n,2); 30 *n = mapping->n; 31 PetscFunctionReturn(0); 32 } 33 34 #undef __FUNCT__ 35 #define __FUNCT__ "ISLocalToGlobalMappingView" 36 /*@C 37 ISLocalToGlobalMappingView - View a local to global mapping 38 39 Not Collective 40 41 Input Parameters: 42 + ltog - local to global mapping 43 - viewer - viewer 44 45 Level: advanced 46 47 Concepts: mapping^local to global 48 49 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate() 50 @*/ 51 PetscErrorCode ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer) 52 { 53 PetscInt i; 54 PetscMPIInt rank; 55 PetscBool iascii; 56 PetscErrorCode ierr; 57 58 PetscFunctionBegin; 59 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 60 if (!viewer) { 61 ierr = PetscViewerASCIIGetStdout(((PetscObject)mapping)->comm,&viewer);CHKERRQ(ierr); 62 } 63 PetscValidHeaderSpecific(viewer,PETSC_VIEWER_CLASSID,2); 64 65 ierr = MPI_Comm_rank(((PetscObject)mapping)->comm,&rank);CHKERRQ(ierr); 66 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 67 if (iascii) { 68 ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);CHKERRQ(ierr); 69 for (i=0; i<mapping->n; i++) { 70 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr); 71 } 72 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 73 ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);CHKERRQ(ierr); 74 } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name); 75 PetscFunctionReturn(0); 76 } 77 78 #undef __FUNCT__ 79 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS" 80 /*@ 81 ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n) 82 ordering and a global parallel ordering. 83 84 Not collective 85 86 Input Parameter: 87 . is - index set containing the global numbers for each local number 88 89 Output Parameter: 90 . mapping - new mapping data structure 91 92 Level: advanced 93 94 Concepts: mapping^local to global 95 96 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate() 97 @*/ 98 PetscErrorCode ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping) 99 { 100 PetscErrorCode ierr; 101 PetscInt n; 102 const PetscInt *indices; 103 MPI_Comm comm; 104 105 PetscFunctionBegin; 106 PetscValidHeaderSpecific(is,IS_CLASSID,1); 107 PetscValidPointer(mapping,2); 108 109 ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr); 110 ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr); 111 ierr = ISGetIndices(is,&indices);CHKERRQ(ierr); 112 ierr = ISLocalToGlobalMappingCreate(comm,n,indices,PETSC_COPY_VALUES,mapping);CHKERRQ(ierr); 113 ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr); 114 PetscFunctionReturn(0); 115 } 116 117 #undef __FUNCT__ 118 #define __FUNCT__ "ISLocalToGlobalMappingCreateSF" 119 /*@C 120 ISLocalToGlobalMappingCreateSF - Creates a mapping between a local (0 to n) 121 ordering and a global parallel ordering. 122 123 Collective 124 125 Input Parameter: 126 + sf - star forest mapping contiguous local indices to (rank, offset) 127 - start - first global index on this process 128 129 Output Parameter: 130 . mapping - new mapping data structure 131 132 Level: advanced 133 134 Concepts: mapping^local to global 135 136 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS() 137 @*/ 138 PetscErrorCode ISLocalToGlobalMappingCreateSF(PetscSF sf,PetscInt start,ISLocalToGlobalMapping *mapping) 139 { 140 PetscErrorCode ierr; 141 PetscInt i,maxlocal,nroots,nleaves,*globals,*ltog; 142 const PetscInt *ilocal; 143 MPI_Comm comm; 144 145 PetscFunctionBegin; 146 PetscValidHeaderSpecific(sf,PETSCSF_CLASSID,1); 147 PetscValidPointer(mapping,3); 148 149 ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr); 150 ierr = PetscSFGetGraph(sf,&nroots,&nleaves,&ilocal,PETSC_NULL);CHKERRQ(ierr); 151 if (ilocal) {for (i=0,maxlocal=0; i<nleaves; i++) maxlocal = PetscMax(maxlocal,ilocal[i]+1);} 152 else maxlocal = nleaves; 153 ierr = PetscMalloc(nroots*sizeof(PetscInt),&globals);CHKERRQ(ierr); 154 ierr = PetscMalloc(maxlocal*sizeof(PetscInt),<og);CHKERRQ(ierr); 155 for (i=0; i<nroots; i++) globals[i] = start + i; 156 for (i=0; i<maxlocal; i++) ltog[i] = -1; 157 ierr = PetscSFBcastBegin(sf,MPIU_INT,globals,ltog);CHKERRQ(ierr); 158 ierr = PetscSFBcastEnd(sf,MPIU_INT,globals,ltog);CHKERRQ(ierr); 159 ierr = ISLocalToGlobalMappingCreate(comm,maxlocal,ltog,PETSC_OWN_POINTER,mapping);CHKERRQ(ierr); 160 ierr = PetscFree(globals);CHKERRQ(ierr); 161 PetscFunctionReturn(0); 162 } 163 164 #undef __FUNCT__ 165 #define __FUNCT__ "ISLocalToGlobalMappingCreate" 166 /*@ 167 ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n) 168 ordering and a global parallel ordering. 169 170 Not Collective, but communicator may have more than one process 171 172 Input Parameters: 173 + comm - MPI communicator 174 . n - the number of local elements 175 . indices - the global index for each local element, these do not need to be in increasing order (sorted) 176 - mode - see PetscCopyMode 177 178 Output Parameter: 179 . mapping - new mapping data structure 180 181 Level: advanced 182 183 Concepts: mapping^local to global 184 185 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS() 186 @*/ 187 PetscErrorCode ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],PetscCopyMode mode,ISLocalToGlobalMapping *mapping) 188 { 189 PetscErrorCode ierr; 190 PetscInt *in; 191 192 PetscFunctionBegin; 193 if (n) PetscValidIntPointer(indices,3); 194 PetscValidPointer(mapping,4); 195 196 *mapping = PETSC_NULL; 197 #if !defined(PETSC_USE_DYNAMIC_LIBRARIES) 198 ierr = ISInitializePackage(PETSC_NULL);CHKERRQ(ierr); 199 #endif 200 201 ierr = PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_CLASSID,0,"ISLocalToGlobalMapping","Local to global mapping","IS", 202 cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);CHKERRQ(ierr); 203 (*mapping)->n = n; 204 /* 205 Do not create the global to local mapping. This is only created if 206 ISGlobalToLocalMapping() is called 207 */ 208 (*mapping)->globals = 0; 209 if (mode == PETSC_COPY_VALUES) { 210 ierr = PetscMalloc(n*sizeof(PetscInt),&in);CHKERRQ(ierr); 211 ierr = PetscMemcpy(in,indices,n*sizeof(PetscInt));CHKERRQ(ierr); 212 ierr = PetscLogObjectMemory(*mapping,n*sizeof(PetscInt));CHKERRQ(ierr); 213 (*mapping)->indices = in; 214 } else if (mode == PETSC_OWN_POINTER) { 215 (*mapping)->indices = (PetscInt*)indices; 216 } else SETERRQ(cm,PETSC_ERR_SUP,"Cannot currently use PETSC_USE_POINTER"); 217 PetscFunctionReturn(0); 218 } 219 220 #undef __FUNCT__ 221 #define __FUNCT__ "ISLocalToGlobalMappingBlock" 222 /*@ 223 ISLocalToGlobalMappingBlock - Creates a blocked index version of an 224 ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock() 225 and VecSetLocalToGlobalMappingBlock(). 226 227 Not Collective, but communicator may have more than one process 228 229 Input Parameters: 230 + inmap - original point-wise mapping 231 - bs - block size 232 233 Output Parameter: 234 . outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries. 235 236 Level: advanced 237 238 Concepts: mapping^local to global 239 240 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS() 241 @*/ 242 PetscErrorCode ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap) 243 { 244 PetscErrorCode ierr; 245 PetscInt *ii,i,n; 246 247 PetscFunctionBegin; 248 PetscValidHeaderSpecific(inmap,IS_LTOGM_CLASSID,1); 249 PetscValidPointer(outmap,3); 250 if (bs > 1) { 251 n = inmap->n/bs; 252 if (n*bs != inmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size"); 253 ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr); 254 for (i=0; i<n; i++) { 255 ii[i] = inmap->indices[bs*i]/bs; 256 } 257 ierr = ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,PETSC_OWN_POINTER,outmap);CHKERRQ(ierr); 258 } else { 259 ierr = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr); 260 *outmap = inmap; 261 } 262 PetscFunctionReturn(0); 263 } 264 265 #undef __FUNCT__ 266 #define __FUNCT__ "ISLocalToGlobalMappingUnBlock" 267 /*@ 268 ISLocalToGlobalMappingUnBlock - Creates a scalar index version of a blocked 269 ISLocalToGlobalMapping 270 271 Not Collective, but communicator may have more than one process 272 273 Input Parameter: 274 + inmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries. 275 - bs - block size 276 277 Output Parameter: 278 . outmap - pointwise mapping 279 280 Level: advanced 281 282 Concepts: mapping^local to global 283 284 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingBlock() 285 @*/ 286 PetscErrorCode ISLocalToGlobalMappingUnBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap) 287 { 288 PetscErrorCode ierr; 289 PetscInt *ii,i,n; 290 291 PetscFunctionBegin; 292 PetscValidHeaderSpecific(inmap,IS_LTOGM_CLASSID,1); 293 PetscValidPointer(outmap,2); 294 if (bs > 1) { 295 n = inmap->n*bs; 296 ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr); 297 for (i=0; i<n; i++) { 298 ii[i] = inmap->indices[i/bs]*bs + (i%bs); 299 } 300 ierr = ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,PETSC_OWN_POINTER,outmap);CHKERRQ(ierr); 301 } else { 302 ierr = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr); 303 *outmap = inmap; 304 } 305 PetscFunctionReturn(0); 306 } 307 308 #undef __FUNCT__ 309 #define __FUNCT__ "ISLocalToGlobalMappingDestroy" 310 /*@ 311 ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n) 312 ordering and a global parallel ordering. 313 314 Note Collective 315 316 Input Parameters: 317 . mapping - mapping data structure 318 319 Level: advanced 320 321 .seealso: ISLocalToGlobalMappingCreate() 322 @*/ 323 PetscErrorCode ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping *mapping) 324 { 325 PetscErrorCode ierr; 326 327 PetscFunctionBegin; 328 if (!*mapping) PetscFunctionReturn(0); 329 PetscValidHeaderSpecific((*mapping),IS_LTOGM_CLASSID,1); 330 if (--((PetscObject)(*mapping))->refct > 0) {*mapping = 0;PetscFunctionReturn(0);} 331 ierr = PetscFree((*mapping)->indices);CHKERRQ(ierr); 332 ierr = PetscFree((*mapping)->globals);CHKERRQ(ierr); 333 ierr = PetscHeaderDestroy(mapping);CHKERRQ(ierr); 334 *mapping = 0; 335 PetscFunctionReturn(0); 336 } 337 338 #undef __FUNCT__ 339 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS" 340 /*@ 341 ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering 342 a new index set using the global numbering defined in an ISLocalToGlobalMapping 343 context. 344 345 Not collective 346 347 Input Parameters: 348 + mapping - mapping between local and global numbering 349 - is - index set in local numbering 350 351 Output Parameters: 352 . newis - index set in global numbering 353 354 Level: advanced 355 356 Concepts: mapping^local to global 357 358 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(), 359 ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply() 360 @*/ 361 PetscErrorCode ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis) 362 { 363 PetscErrorCode ierr; 364 PetscInt n,i,*idxmap,*idxout,Nmax = mapping->n; 365 const PetscInt *idxin; 366 367 PetscFunctionBegin; 368 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 369 PetscValidHeaderSpecific(is,IS_CLASSID,2); 370 PetscValidPointer(newis,3); 371 372 ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr); 373 ierr = ISGetIndices(is,&idxin);CHKERRQ(ierr); 374 idxmap = mapping->indices; 375 376 ierr = PetscMalloc(n*sizeof(PetscInt),&idxout);CHKERRQ(ierr); 377 for (i=0; i<n; i++) { 378 if (idxin[i] >= Nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i); 379 idxout[i] = idxmap[idxin[i]]; 380 } 381 ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr); 382 ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,PETSC_OWN_POINTER,newis);CHKERRQ(ierr); 383 PetscFunctionReturn(0); 384 } 385 386 /*MC 387 ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering 388 and converts them to the global numbering. 389 390 Synopsis: 391 #include "petscis.h" 392 PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[]) 393 394 Not collective 395 396 Input Parameters: 397 + mapping - the local to global mapping context 398 . N - number of integers 399 - in - input indices in local numbering 400 401 Output Parameter: 402 . out - indices in global numbering 403 404 Notes: 405 The in and out array parameters may be identical. 406 407 Level: advanced 408 409 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 410 ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(), 411 AOPetscToApplication(), ISGlobalToLocalMappingApply() 412 413 Concepts: mapping^local to global 414 415 M*/ 416 417 /* -----------------------------------------------------------------------------------------*/ 418 419 #undef __FUNCT__ 420 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private" 421 /* 422 Creates the global fields in the ISLocalToGlobalMapping structure 423 */ 424 static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping) 425 { 426 PetscErrorCode ierr; 427 PetscInt i,*idx = mapping->indices,n = mapping->n,end,start,*globals; 428 429 PetscFunctionBegin; 430 end = 0; 431 start = PETSC_MAX_INT; 432 433 for (i=0; i<n; i++) { 434 if (idx[i] < 0) continue; 435 if (idx[i] < start) start = idx[i]; 436 if (idx[i] > end) end = idx[i]; 437 } 438 if (start > end) {start = 0; end = -1;} 439 mapping->globalstart = start; 440 mapping->globalend = end; 441 442 ierr = PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);CHKERRQ(ierr); 443 mapping->globals = globals; 444 for (i=0; i<end-start+1; i++) { 445 globals[i] = -1; 446 } 447 for (i=0; i<n; i++) { 448 if (idx[i] < 0) continue; 449 globals[idx[i] - start] = i; 450 } 451 452 ierr = PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));CHKERRQ(ierr); 453 PetscFunctionReturn(0); 454 } 455 456 #undef __FUNCT__ 457 #define __FUNCT__ "ISGlobalToLocalMappingApply" 458 /*@ 459 ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers 460 specified with a global numbering. 461 462 Not collective 463 464 Input Parameters: 465 + mapping - mapping between local and global numbering 466 . type - IS_GTOLM_MASK - replaces global indices with no local value with -1 467 IS_GTOLM_DROP - drops the indices with no local value from the output list 468 . n - number of global indices to map 469 - idx - global indices to map 470 471 Output Parameters: 472 + nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n) 473 - idxout - local index of each global index, one must pass in an array long enough 474 to hold all the indices. You can call ISGlobalToLocalMappingApply() with 475 idxout == PETSC_NULL to determine the required length (returned in nout) 476 and then allocate the required space and call ISGlobalToLocalMappingApply() 477 a second time to set the values. 478 479 Notes: 480 Either nout or idxout may be PETSC_NULL. idx and idxout may be identical. 481 482 This is not scalable in memory usage. Each processor requires O(Nglobal) size 483 array to compute these. 484 485 Level: advanced 486 487 Developer Note: The manual page states that idx and idxout may be identical but the calling 488 sequence declares idx as const so it cannot be the same as idxout. 489 490 Concepts: mapping^global to local 491 492 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(), 493 ISLocalToGlobalMappingDestroy() 494 @*/ 495 PetscErrorCode ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type, 496 PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[]) 497 { 498 PetscInt i,*globals,nf = 0,tmp,start,end; 499 PetscErrorCode ierr; 500 501 PetscFunctionBegin; 502 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 503 if (!mapping->globals) { 504 ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr); 505 } 506 globals = mapping->globals; 507 start = mapping->globalstart; 508 end = mapping->globalend; 509 510 if (type == IS_GTOLM_MASK) { 511 if (idxout) { 512 for (i=0; i<n; i++) { 513 if (idx[i] < 0) idxout[i] = idx[i]; 514 else if (idx[i] < start) idxout[i] = -1; 515 else if (idx[i] > end) idxout[i] = -1; 516 else idxout[i] = globals[idx[i] - start]; 517 } 518 } 519 if (nout) *nout = n; 520 } else { 521 if (idxout) { 522 for (i=0; i<n; i++) { 523 if (idx[i] < 0) continue; 524 if (idx[i] < start) continue; 525 if (idx[i] > end) continue; 526 tmp = globals[idx[i] - start]; 527 if (tmp < 0) continue; 528 idxout[nf++] = tmp; 529 } 530 } else { 531 for (i=0; i<n; i++) { 532 if (idx[i] < 0) continue; 533 if (idx[i] < start) continue; 534 if (idx[i] > end) continue; 535 tmp = globals[idx[i] - start]; 536 if (tmp < 0) continue; 537 nf++; 538 } 539 } 540 if (nout) *nout = nf; 541 } 542 PetscFunctionReturn(0); 543 } 544 545 #undef __FUNCT__ 546 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo" 547 /*@C 548 ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 549 each index shared by more than one processor 550 551 Collective on ISLocalToGlobalMapping 552 553 Input Parameters: 554 . mapping - the mapping from local to global indexing 555 556 Output Parameter: 557 + nproc - number of processors that are connected to this one 558 . proc - neighboring processors 559 . numproc - number of indices for each subdomain (processor) 560 - indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering) 561 562 Level: advanced 563 564 Concepts: mapping^local to global 565 566 Fortran Usage: 567 $ ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by 568 $ ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc], 569 PetscInt indices[nproc][numprocmax],ierr) 570 There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and 571 indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough. 572 573 574 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(), 575 ISLocalToGlobalMappingRestoreInfo() 576 @*/ 577 PetscErrorCode ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[]) 578 { 579 PetscErrorCode ierr; 580 PetscMPIInt size,rank,tag1,tag2,tag3,*len,*source,imdex; 581 PetscInt i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices; 582 PetscInt *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc; 583 PetscInt cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned; 584 PetscInt node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp; 585 PetscInt first_procs,first_numprocs,*first_indices; 586 MPI_Request *recv_waits,*send_waits; 587 MPI_Status recv_status,*send_status,*recv_statuses; 588 MPI_Comm comm = ((PetscObject)mapping)->comm; 589 PetscBool debug = PETSC_FALSE; 590 591 PetscFunctionBegin; 592 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 593 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 594 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 595 if (size == 1) { 596 *nproc = 0; 597 *procs = PETSC_NULL; 598 ierr = PetscMalloc(sizeof(PetscInt),numprocs);CHKERRQ(ierr); 599 (*numprocs)[0] = 0; 600 ierr = PetscMalloc(sizeof(PetscInt*),indices);CHKERRQ(ierr); 601 (*indices)[0] = PETSC_NULL; 602 PetscFunctionReturn(0); 603 } 604 605 ierr = PetscOptionsGetBool(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,PETSC_NULL);CHKERRQ(ierr); 606 607 /* 608 Notes on ISLocalToGlobalMappingGetInfo 609 610 globally owned node - the nodes that have been assigned to this processor in global 611 numbering, just for this routine. 612 613 nontrivial globally owned node - node assigned to this processor that is on a subdomain 614 boundary (i.e. is has more than one local owner) 615 616 locally owned node - node that exists on this processors subdomain 617 618 nontrivial locally owned node - node that is not in the interior (i.e. has more than one 619 local subdomain 620 */ 621 ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr); 622 ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr); 623 ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr); 624 625 for (i=0; i<n; i++) { 626 if (lindices[i] > max) max = lindices[i]; 627 } 628 ierr = MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr); 629 Ng++; 630 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 631 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 632 scale = Ng/size + 1; 633 ng = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng); 634 rstart = scale*rank; 635 636 /* determine ownership ranges of global indices */ 637 ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); 638 ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr); 639 640 /* determine owners of each local node */ 641 ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr); 642 for (i=0; i<n; i++) { 643 proc = lindices[i]/scale; /* processor that globally owns this index */ 644 nprocs[2*proc+1] = 1; /* processor globally owns at least one of ours */ 645 owner[i] = proc; 646 nprocs[2*proc]++; /* count of how many that processor globally owns of ours */ 647 } 648 nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1]; 649 ierr = PetscInfo1(mapping,"Number of global owners for my local data %d\n",nsends);CHKERRQ(ierr); 650 651 /* inform other processors of number of messages and max length*/ 652 ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr); 653 ierr = PetscInfo1(mapping,"Number of local owners for my global data %d\n",nrecvs);CHKERRQ(ierr); 654 655 /* post receives for owned rows */ 656 ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);CHKERRQ(ierr); 657 ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr); 658 for (i=0; i<nrecvs; i++) { 659 ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr); 660 } 661 662 /* pack messages containing lists of local nodes to owners */ 663 ierr = PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);CHKERRQ(ierr); 664 ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr); 665 starts[0] = 0; 666 for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];} 667 for (i=0; i<n; i++) { 668 sends[starts[owner[i]]++] = lindices[i]; 669 sends[starts[owner[i]]++] = i; 670 } 671 ierr = PetscFree(owner);CHKERRQ(ierr); 672 starts[0] = 0; 673 for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];} 674 675 /* send the messages */ 676 ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr); 677 ierr = PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);CHKERRQ(ierr); 678 cnt = 0; 679 for (i=0; i<size; i++) { 680 if (nprocs[2*i]) { 681 ierr = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr); 682 dest[cnt] = i; 683 cnt++; 684 } 685 } 686 ierr = PetscFree(starts);CHKERRQ(ierr); 687 688 /* wait on receives */ 689 ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);CHKERRQ(ierr); 690 ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);CHKERRQ(ierr); 691 cnt = nrecvs; 692 ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);CHKERRQ(ierr); 693 ierr = PetscMemzero(nownedsenders,ng*sizeof(PetscInt));CHKERRQ(ierr); 694 while (cnt) { 695 ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr); 696 /* unpack receives into our local space */ 697 ierr = MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);CHKERRQ(ierr); 698 source[imdex] = recv_status.MPI_SOURCE; 699 len[imdex] = len[imdex]/2; 700 /* count how many local owners for each of my global owned indices */ 701 for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++; 702 cnt--; 703 } 704 ierr = PetscFree(recv_waits);CHKERRQ(ierr); 705 706 /* count how many globally owned indices are on an edge multiplied by how many processors own them. */ 707 nowned = 0; 708 nownedm = 0; 709 for (i=0; i<ng; i++) { 710 if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;} 711 } 712 713 /* create single array to contain rank of all local owners of each globally owned index */ 714 ierr = PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);CHKERRQ(ierr); 715 ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr); 716 starts[0] = 0; 717 for (i=1; i<ng; i++) { 718 if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1]; 719 else starts[i] = starts[i-1]; 720 } 721 722 /* for each nontrival globally owned node list all arriving processors */ 723 for (i=0; i<nrecvs; i++) { 724 for (j=0; j<len[i]; j++) { 725 node = recvs[2*i*nmax+2*j]-rstart; 726 if (nownedsenders[node] > 1) { 727 ownedsenders[starts[node]++] = source[i]; 728 } 729 } 730 } 731 732 if (debug) { /* ----------------------------------- */ 733 starts[0] = 0; 734 for (i=1; i<ng; i++) { 735 if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1]; 736 else starts[i] = starts[i-1]; 737 } 738 for (i=0; i<ng; i++) { 739 if (nownedsenders[i] > 1) { 740 ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr); 741 for (j=0; j<nownedsenders[i]; j++) { 742 ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr); 743 } 744 ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr); 745 } 746 } 747 ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr); 748 }/* ----------------------------------- */ 749 750 /* wait on original sends */ 751 if (nsends) { 752 ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr); 753 ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr); 754 ierr = PetscFree(send_status);CHKERRQ(ierr); 755 } 756 ierr = PetscFree(send_waits);CHKERRQ(ierr); 757 ierr = PetscFree(sends);CHKERRQ(ierr); 758 ierr = PetscFree(nprocs);CHKERRQ(ierr); 759 760 /* pack messages to send back to local owners */ 761 starts[0] = 0; 762 for (i=1; i<ng; i++) { 763 if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1]; 764 else starts[i] = starts[i-1]; 765 } 766 nsends2 = nrecvs; 767 ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); /* length of each message */ 768 for (i=0; i<nrecvs; i++) { 769 nprocs[i] = 1; 770 for (j=0; j<len[i]; j++) { 771 node = recvs[2*i*nmax+2*j]-rstart; 772 if (nownedsenders[node] > 1) { 773 nprocs[i] += 2 + nownedsenders[node]; 774 } 775 } 776 } 777 nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i]; 778 ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);CHKERRQ(ierr); 779 ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);CHKERRQ(ierr); 780 starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1]; 781 /* 782 Each message is 1 + nprocs[i] long, and consists of 783 (0) the number of nodes being sent back 784 (1) the local node number, 785 (2) the number of processors sharing it, 786 (3) the processors sharing it 787 */ 788 for (i=0; i<nsends2; i++) { 789 cnt = 1; 790 sends2[starts2[i]] = 0; 791 for (j=0; j<len[i]; j++) { 792 node = recvs[2*i*nmax+2*j]-rstart; 793 if (nownedsenders[node] > 1) { 794 sends2[starts2[i]]++; 795 sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1]; 796 sends2[starts2[i]+cnt++] = nownedsenders[node]; 797 ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));CHKERRQ(ierr); 798 cnt += nownedsenders[node]; 799 } 800 } 801 } 802 803 /* receive the message lengths */ 804 nrecvs2 = nsends; 805 ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);CHKERRQ(ierr); 806 ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);CHKERRQ(ierr); 807 ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr); 808 for (i=0; i<nrecvs2; i++) { 809 ierr = MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);CHKERRQ(ierr); 810 } 811 812 /* send the message lengths */ 813 for (i=0; i<nsends2; i++) { 814 ierr = MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);CHKERRQ(ierr); 815 } 816 817 /* wait on receives of lens */ 818 if (nrecvs2) { 819 ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr); 820 ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr); 821 ierr = PetscFree(recv_statuses);CHKERRQ(ierr); 822 } 823 ierr = PetscFree(recv_waits);CHKERRQ(ierr); 824 825 starts3[0] = 0; 826 nt = 0; 827 for (i=0; i<nrecvs2-1; i++) { 828 starts3[i+1] = starts3[i] + lens2[i]; 829 nt += lens2[i]; 830 } 831 if (nrecvs2) nt += lens2[nrecvs2-1]; 832 833 ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);CHKERRQ(ierr); 834 ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr); 835 for (i=0; i<nrecvs2; i++) { 836 ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr); 837 } 838 839 /* send the messages */ 840 ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr); 841 for (i=0; i<nsends2; i++) { 842 ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr); 843 } 844 845 /* wait on receives */ 846 if (nrecvs2) { 847 ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr); 848 ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr); 849 ierr = PetscFree(recv_statuses);CHKERRQ(ierr); 850 } 851 ierr = PetscFree(recv_waits);CHKERRQ(ierr); 852 ierr = PetscFree(nprocs);CHKERRQ(ierr); 853 854 if (debug) { /* ----------------------------------- */ 855 cnt = 0; 856 for (i=0; i<nrecvs2; i++) { 857 nt = recvs2[cnt++]; 858 for (j=0; j<nt; j++) { 859 ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr); 860 for (k=0; k<recvs2[cnt+1]; k++) { 861 ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr); 862 } 863 cnt += 2 + recvs2[cnt+1]; 864 ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr); 865 } 866 } 867 ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr); 868 } /* ----------------------------------- */ 869 870 /* count number subdomains for each local node */ 871 ierr = PetscMalloc(size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); 872 ierr = PetscMemzero(nprocs,size*sizeof(PetscInt));CHKERRQ(ierr); 873 cnt = 0; 874 for (i=0; i<nrecvs2; i++) { 875 nt = recvs2[cnt++]; 876 for (j=0; j<nt; j++) { 877 for (k=0; k<recvs2[cnt+1]; k++) { 878 nprocs[recvs2[cnt+2+k]]++; 879 } 880 cnt += 2 + recvs2[cnt+1]; 881 } 882 } 883 nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0); 884 *nproc = nt; 885 ierr = PetscMalloc((nt+1)*sizeof(PetscInt),procs);CHKERRQ(ierr); 886 ierr = PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);CHKERRQ(ierr); 887 ierr = PetscMalloc((nt+1)*sizeof(PetscInt*),indices);CHKERRQ(ierr); 888 for (i=0;i<nt+1;i++) { (*indices)[i]=PETSC_NULL; } 889 ierr = PetscMalloc(size*sizeof(PetscInt),&bprocs);CHKERRQ(ierr); 890 cnt = 0; 891 for (i=0; i<size; i++) { 892 if (nprocs[i] > 0) { 893 bprocs[i] = cnt; 894 (*procs)[cnt] = i; 895 (*numprocs)[cnt] = nprocs[i]; 896 ierr = PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);CHKERRQ(ierr); 897 cnt++; 898 } 899 } 900 901 /* make the list of subdomains for each nontrivial local node */ 902 ierr = PetscMemzero(*numprocs,nt*sizeof(PetscInt));CHKERRQ(ierr); 903 cnt = 0; 904 for (i=0; i<nrecvs2; i++) { 905 nt = recvs2[cnt++]; 906 for (j=0; j<nt; j++) { 907 for (k=0; k<recvs2[cnt+1]; k++) { 908 (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt]; 909 } 910 cnt += 2 + recvs2[cnt+1]; 911 } 912 } 913 ierr = PetscFree(bprocs);CHKERRQ(ierr); 914 ierr = PetscFree(recvs2);CHKERRQ(ierr); 915 916 /* sort the node indexing by their global numbers */ 917 nt = *nproc; 918 for (i=0; i<nt; i++) { 919 ierr = PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);CHKERRQ(ierr); 920 for (j=0; j<(*numprocs)[i]; j++) { 921 tmp[j] = lindices[(*indices)[i][j]]; 922 } 923 ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr); 924 ierr = PetscFree(tmp);CHKERRQ(ierr); 925 } 926 927 if (debug) { /* ----------------------------------- */ 928 nt = *nproc; 929 for (i=0; i<nt; i++) { 930 ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr); 931 for (j=0; j<(*numprocs)[i]; j++) { 932 ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr); 933 } 934 ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr); 935 } 936 ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr); 937 } /* ----------------------------------- */ 938 939 /* wait on sends */ 940 if (nsends2) { 941 ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr); 942 ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr); 943 ierr = PetscFree(send_status);CHKERRQ(ierr); 944 } 945 946 ierr = PetscFree(starts3);CHKERRQ(ierr); 947 ierr = PetscFree(dest);CHKERRQ(ierr); 948 ierr = PetscFree(send_waits);CHKERRQ(ierr); 949 950 ierr = PetscFree(nownedsenders);CHKERRQ(ierr); 951 ierr = PetscFree(ownedsenders);CHKERRQ(ierr); 952 ierr = PetscFree(starts);CHKERRQ(ierr); 953 ierr = PetscFree(starts2);CHKERRQ(ierr); 954 ierr = PetscFree(lens2);CHKERRQ(ierr); 955 956 ierr = PetscFree(source);CHKERRQ(ierr); 957 ierr = PetscFree(len);CHKERRQ(ierr); 958 ierr = PetscFree(recvs);CHKERRQ(ierr); 959 ierr = PetscFree(nprocs);CHKERRQ(ierr); 960 ierr = PetscFree(sends2);CHKERRQ(ierr); 961 962 /* put the information about myself as the first entry in the list */ 963 first_procs = (*procs)[0]; 964 first_numprocs = (*numprocs)[0]; 965 first_indices = (*indices)[0]; 966 for (i=0; i<*nproc; i++) { 967 if ((*procs)[i] == rank) { 968 (*procs)[0] = (*procs)[i]; 969 (*numprocs)[0] = (*numprocs)[i]; 970 (*indices)[0] = (*indices)[i]; 971 (*procs)[i] = first_procs; 972 (*numprocs)[i] = first_numprocs; 973 (*indices)[i] = first_indices; 974 break; 975 } 976 } 977 PetscFunctionReturn(0); 978 } 979 980 #undef __FUNCT__ 981 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo" 982 /*@C 983 ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo() 984 985 Collective on ISLocalToGlobalMapping 986 987 Input Parameters: 988 . mapping - the mapping from local to global indexing 989 990 Output Parameter: 991 + nproc - number of processors that are connected to this one 992 . proc - neighboring processors 993 . numproc - number of indices for each processor 994 - indices - indices of local nodes shared with neighbor (sorted by global numbering) 995 996 Level: advanced 997 998 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(), 999 ISLocalToGlobalMappingGetInfo() 1000 @*/ 1001 PetscErrorCode ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[]) 1002 { 1003 PetscErrorCode ierr; 1004 PetscInt i; 1005 1006 PetscFunctionBegin; 1007 ierr = PetscFree(*procs);CHKERRQ(ierr); 1008 ierr = PetscFree(*numprocs);CHKERRQ(ierr); 1009 if (*indices) { 1010 ierr = PetscFree((*indices)[0]);CHKERRQ(ierr); 1011 for (i=1; i<*nproc; i++) { 1012 ierr = PetscFree((*indices)[i]);CHKERRQ(ierr); 1013 } 1014 ierr = PetscFree(*indices);CHKERRQ(ierr); 1015 } 1016 PetscFunctionReturn(0); 1017 } 1018 1019 #undef __FUNCT__ 1020 #define __FUNCT__ "ISLocalToGlobalMappingGetIndices" 1021 /*@C 1022 ISLocalToGlobalMappingGetIndices - Get global indices for every local point 1023 1024 Not Collective 1025 1026 Input Arguments: 1027 . ltog - local to global mapping 1028 1029 Output Arguments: 1030 . array - array of indices 1031 1032 Level: advanced 1033 1034 .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingRestoreIndices() 1035 @*/ 1036 PetscErrorCode ISLocalToGlobalMappingGetIndices(ISLocalToGlobalMapping ltog,const PetscInt **array) 1037 { 1038 PetscFunctionBegin; 1039 PetscValidHeaderSpecific(ltog,IS_LTOGM_CLASSID,1); 1040 PetscValidPointer(array,2); 1041 *array = ltog->indices; 1042 PetscFunctionReturn(0); 1043 } 1044 1045 #undef __FUNCT__ 1046 #define __FUNCT__ "ISLocalToGlobalMappingRestoreIndices" 1047 /*@C 1048 ISLocalToGlobalMappingRestoreIndices - Restore indices obtained with ISLocalToGlobalMappingRestoreIndices() 1049 1050 Not Collective 1051 1052 Input Arguments: 1053 + ltog - local to global mapping 1054 - array - array of indices 1055 1056 Level: advanced 1057 1058 .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingGetIndices() 1059 @*/ 1060 PetscErrorCode ISLocalToGlobalMappingRestoreIndices(ISLocalToGlobalMapping ltog,const PetscInt **array) 1061 { 1062 PetscFunctionBegin; 1063 PetscValidHeaderSpecific(ltog,IS_LTOGM_CLASSID,1); 1064 PetscValidPointer(array,2); 1065 if (*array != ltog->indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Trying to return mismatched pointer"); 1066 *array = PETSC_NULL; 1067 PetscFunctionReturn(0); 1068 } 1069 1070 #undef __FUNCT__ 1071 #define __FUNCT__ "ISLocalToGlobalMappingConcatenate" 1072 /*@C 1073 ISLocalToGlobalMappingConcatenate - Create a new mapping that concatenates a list of mappings 1074 1075 Not Collective 1076 1077 Input Arguments: 1078 + comm - communicator for the new mapping, must contain the communicator of every mapping to concatenate 1079 . n - number of mappings to concatenate 1080 - ltogs - local to global mappings 1081 1082 Output Arguments: 1083 . ltogcat - new mapping 1084 1085 Level: advanced 1086 1087 .seealso: ISLocalToGlobalMappingCreate() 1088 @*/ 1089 PetscErrorCode ISLocalToGlobalMappingConcatenate(MPI_Comm comm,PetscInt n,const ISLocalToGlobalMapping ltogs[],ISLocalToGlobalMapping *ltogcat) 1090 { 1091 PetscInt i,cnt,m,*idx; 1092 PetscErrorCode ierr; 1093 1094 PetscFunctionBegin; 1095 if (n < 0) SETERRQ1(comm,PETSC_ERR_ARG_OUTOFRANGE,"Must have a non-negative number of mappings, given %D",n); 1096 if (n > 0) PetscValidPointer(ltogs,3); 1097 for (i=0; i<n; i++) PetscValidHeaderSpecific(ltogs[i],IS_LTOGM_CLASSID,3); 1098 PetscValidPointer(ltogcat,4); 1099 for (cnt=0,i=0; i<n; i++) { 1100 ierr = ISLocalToGlobalMappingGetSize(ltogs[i],&m);CHKERRQ(ierr); 1101 cnt += m; 1102 } 1103 ierr = PetscMalloc(cnt*sizeof(PetscInt),&idx);CHKERRQ(ierr); 1104 for (cnt=0,i=0; i<n; i++) { 1105 const PetscInt *subidx; 1106 ierr = ISLocalToGlobalMappingGetSize(ltogs[i],&m);CHKERRQ(ierr); 1107 ierr = ISLocalToGlobalMappingGetIndices(ltogs[i],&subidx);CHKERRQ(ierr); 1108 ierr = PetscMemcpy(&idx[cnt],subidx,m*sizeof(PetscInt));CHKERRQ(ierr); 1109 ierr = ISLocalToGlobalMappingRestoreIndices(ltogs[i],&subidx);CHKERRQ(ierr); 1110 cnt += m; 1111 } 1112 ierr = ISLocalToGlobalMappingCreate(comm,cnt,idx,PETSC_OWN_POINTER,ltogcat);CHKERRQ(ierr); 1113 PetscFunctionReturn(0); 1114 } 1115