1 2 #include <petscvec.h> /*I "petscvec.h" I*/ 3 #include <petsc-private/isimpl.h> /*I "petscis.h" I*/ 4 5 PetscClassId IS_LTOGM_CLASSID; 6 7 #undef __FUNCT__ 8 #define __FUNCT__ "ISLocalToGlobalMappingGetSize" 9 /*@C 10 ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping. 11 12 Not Collective 13 14 Input Parameter: 15 . ltog - local to global mapping 16 17 Output Parameter: 18 . n - the number of entries in the local mapping 19 20 Level: advanced 21 22 Concepts: mapping^local to global 23 24 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate() 25 @*/ 26 PetscErrorCode ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n) 27 { 28 PetscFunctionBegin; 29 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 30 PetscValidIntPointer(n,2); 31 *n = mapping->n; 32 PetscFunctionReturn(0); 33 } 34 35 #undef __FUNCT__ 36 #define __FUNCT__ "ISLocalToGlobalMappingView" 37 /*@C 38 ISLocalToGlobalMappingView - View a local to global mapping 39 40 Not Collective 41 42 Input Parameters: 43 + ltog - local to global mapping 44 - viewer - viewer 45 46 Level: advanced 47 48 Concepts: mapping^local to global 49 50 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate() 51 @*/ 52 PetscErrorCode ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer) 53 { 54 PetscInt i; 55 PetscMPIInt rank; 56 PetscBool iascii; 57 PetscErrorCode ierr; 58 59 PetscFunctionBegin; 60 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 61 if (!viewer) { 62 ierr = PetscViewerASCIIGetStdout(((PetscObject)mapping)->comm,&viewer);CHKERRQ(ierr); 63 } 64 PetscValidHeaderSpecific(viewer,PETSC_VIEWER_CLASSID,2); 65 66 ierr = MPI_Comm_rank(((PetscObject)mapping)->comm,&rank);CHKERRQ(ierr); 67 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 68 if (iascii) { 69 ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);CHKERRQ(ierr); 70 for (i=0; i<mapping->n; i++) { 71 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr); 72 } 73 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 74 ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);CHKERRQ(ierr); 75 } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name); 76 PetscFunctionReturn(0); 77 } 78 79 #undef __FUNCT__ 80 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS" 81 /*@ 82 ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n) 83 ordering and a global parallel ordering. 84 85 Not collective 86 87 Input Parameter: 88 . is - index set containing the global numbers for each local number 89 90 Output Parameter: 91 . mapping - new mapping data structure 92 93 Level: advanced 94 95 Concepts: mapping^local to global 96 97 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate() 98 @*/ 99 PetscErrorCode ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping) 100 { 101 PetscErrorCode ierr; 102 PetscInt n; 103 const PetscInt *indices; 104 MPI_Comm comm; 105 106 PetscFunctionBegin; 107 PetscValidHeaderSpecific(is,IS_CLASSID,1); 108 PetscValidPointer(mapping,2); 109 110 ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr); 111 ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr); 112 ierr = ISGetIndices(is,&indices);CHKERRQ(ierr); 113 ierr = ISLocalToGlobalMappingCreate(comm,n,indices,PETSC_COPY_VALUES,mapping);CHKERRQ(ierr); 114 ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr); 115 PetscFunctionReturn(0); 116 } 117 118 #undef __FUNCT__ 119 #define __FUNCT__ "ISLocalToGlobalMappingCreateSF" 120 /*@C 121 ISLocalToGlobalMappingCreateSF - Creates a mapping between a local (0 to n) 122 ordering and a global parallel ordering. 123 124 Collective 125 126 Input Parameter: 127 + sf - star forest mapping contiguous local indices to (rank, offset) 128 - start - first global index on this process 129 130 Output Parameter: 131 . mapping - new mapping data structure 132 133 Level: advanced 134 135 Concepts: mapping^local to global 136 137 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS() 138 @*/ 139 PetscErrorCode ISLocalToGlobalMappingCreateSF(PetscSF sf,PetscInt start,ISLocalToGlobalMapping *mapping) 140 { 141 PetscErrorCode ierr; 142 PetscInt i,maxlocal,nroots,nleaves,*globals,*ltog; 143 const PetscInt *ilocal; 144 MPI_Comm comm; 145 146 PetscFunctionBegin; 147 PetscValidHeaderSpecific(sf,PETSCSF_CLASSID,1); 148 PetscValidPointer(mapping,3); 149 150 ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr); 151 ierr = PetscSFGetGraph(sf,&nroots,&nleaves,&ilocal,PETSC_NULL);CHKERRQ(ierr); 152 if (ilocal) {for (i=0,maxlocal=0; i<nleaves; i++) maxlocal = PetscMax(maxlocal,ilocal[i]+1);} 153 else maxlocal = nleaves; 154 ierr = PetscMalloc(nroots*sizeof(PetscInt),&globals);CHKERRQ(ierr); 155 ierr = PetscMalloc(maxlocal*sizeof(PetscInt),<og);CHKERRQ(ierr); 156 for (i=0; i<nroots; i++) globals[i] = start + i; 157 for (i=0; i<maxlocal; i++) ltog[i] = -1; 158 ierr = PetscSFBcastBegin(sf,MPIU_INT,globals,ltog);CHKERRQ(ierr); 159 ierr = PetscSFBcastEnd(sf,MPIU_INT,globals,ltog);CHKERRQ(ierr); 160 ierr = ISLocalToGlobalMappingCreate(comm,maxlocal,ltog,PETSC_OWN_POINTER,mapping);CHKERRQ(ierr); 161 ierr = PetscFree(globals);CHKERRQ(ierr); 162 PetscFunctionReturn(0); 163 } 164 165 #undef __FUNCT__ 166 #define __FUNCT__ "ISLocalToGlobalMappingCreate" 167 /*@ 168 ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n) 169 ordering and a global parallel ordering. 170 171 Not Collective, but communicator may have more than one process 172 173 Input Parameters: 174 + comm - MPI communicator 175 . n - the number of local elements 176 . indices - the global index for each local element 177 - mode - see PetscCopyMode 178 179 Output Parameter: 180 . mapping - new mapping data structure 181 182 Level: advanced 183 184 Concepts: mapping^local to global 185 186 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS() 187 @*/ 188 PetscErrorCode ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],PetscCopyMode mode,ISLocalToGlobalMapping *mapping) 189 { 190 PetscErrorCode ierr; 191 PetscInt *in; 192 193 PetscFunctionBegin; 194 if (n) PetscValidIntPointer(indices,3); 195 PetscValidPointer(mapping,4); 196 197 *mapping = PETSC_NULL; 198 #ifndef PETSC_USE_DYNAMIC_LIBRARIES 199 ierr = ISInitializePackage(PETSC_NULL);CHKERRQ(ierr); 200 #endif 201 202 ierr = PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_CLASSID,0,"ISLocalToGlobalMapping","Local to global mapping","IS", 203 cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);CHKERRQ(ierr); 204 (*mapping)->n = n; 205 /* 206 Do not create the global to local mapping. This is only created if 207 ISGlobalToLocalMapping() is called 208 */ 209 (*mapping)->globals = 0; 210 if (mode == PETSC_COPY_VALUES) { 211 ierr = PetscMalloc(n*sizeof(PetscInt),&in);CHKERRQ(ierr); 212 ierr = PetscMemcpy(in,indices,n*sizeof(PetscInt));CHKERRQ(ierr); 213 ierr = PetscLogObjectMemory(*mapping,n*sizeof(PetscInt));CHKERRQ(ierr); 214 (*mapping)->indices = in; 215 } else if (mode == PETSC_OWN_POINTER) { 216 (*mapping)->indices = (PetscInt*)indices; 217 } else SETERRQ(cm,PETSC_ERR_SUP,"Cannot currently use PETSC_USE_POINTER"); 218 PetscFunctionReturn(0); 219 } 220 221 #undef __FUNCT__ 222 #define __FUNCT__ "ISLocalToGlobalMappingBlock" 223 /*@ 224 ISLocalToGlobalMappingBlock - Creates a blocked index version of an 225 ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock() 226 and VecSetLocalToGlobalMappingBlock(). 227 228 Not Collective, but communicator may have more than one process 229 230 Input Parameters: 231 + inmap - original point-wise mapping 232 - bs - block size 233 234 Output Parameter: 235 . outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries. 236 237 Level: advanced 238 239 Concepts: mapping^local to global 240 241 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS() 242 @*/ 243 PetscErrorCode ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap) 244 { 245 PetscErrorCode ierr; 246 PetscInt *ii,i,n; 247 248 PetscFunctionBegin; 249 PetscValidHeaderSpecific(inmap,IS_LTOGM_CLASSID,1); 250 PetscValidPointer(outmap,3); 251 if (bs > 1) { 252 n = inmap->n/bs; 253 if (n*bs != inmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size"); 254 ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr); 255 for (i=0; i<n; i++) { 256 ii[i] = inmap->indices[bs*i]/bs; 257 } 258 ierr = ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,PETSC_OWN_POINTER,outmap);CHKERRQ(ierr); 259 } else { 260 ierr = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr); 261 *outmap = inmap; 262 } 263 PetscFunctionReturn(0); 264 } 265 266 #undef __FUNCT__ 267 #define __FUNCT__ "ISLocalToGlobalMappingUnBlock" 268 /*@ 269 ISLocalToGlobalMappingUnBlock - Creates a scalar index version of a blocked 270 ISLocalToGlobalMapping 271 272 Not Collective, but communicator may have more than one process 273 274 Input Parameter: 275 + inmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries. 276 - bs - block size 277 278 Output Parameter: 279 . outmap - pointwise mapping 280 281 Level: advanced 282 283 Concepts: mapping^local to global 284 285 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingBlock() 286 @*/ 287 PetscErrorCode ISLocalToGlobalMappingUnBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap) 288 { 289 PetscErrorCode ierr; 290 PetscInt *ii,i,n; 291 292 PetscFunctionBegin; 293 PetscValidHeaderSpecific(inmap,IS_LTOGM_CLASSID,1); 294 PetscValidPointer(outmap,2); 295 if (bs > 1) { 296 n = inmap->n*bs; 297 ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr); 298 for (i=0; i<n; i++) { 299 ii[i] = inmap->indices[i/bs]*bs + (i%bs); 300 } 301 ierr = ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,PETSC_OWN_POINTER,outmap);CHKERRQ(ierr); 302 } else { 303 ierr = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr); 304 *outmap = inmap; 305 } 306 PetscFunctionReturn(0); 307 } 308 309 #undef __FUNCT__ 310 #define __FUNCT__ "ISLocalToGlobalMappingDestroy" 311 /*@ 312 ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n) 313 ordering and a global parallel ordering. 314 315 Note Collective 316 317 Input Parameters: 318 . mapping - mapping data structure 319 320 Level: advanced 321 322 .seealso: ISLocalToGlobalMappingCreate() 323 @*/ 324 PetscErrorCode ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping *mapping) 325 { 326 PetscErrorCode ierr; 327 PetscFunctionBegin; 328 if (!*mapping) PetscFunctionReturn(0); 329 PetscValidHeaderSpecific((*mapping),IS_LTOGM_CLASSID,1); 330 if (--((PetscObject)(*mapping))->refct > 0) {*mapping = 0;PetscFunctionReturn(0);} 331 ierr = PetscFree((*mapping)->indices);CHKERRQ(ierr); 332 ierr = PetscFree((*mapping)->globals);CHKERRQ(ierr); 333 ierr = PetscHeaderDestroy(mapping);CHKERRQ(ierr); 334 *mapping = 0; 335 PetscFunctionReturn(0); 336 } 337 338 #undef __FUNCT__ 339 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS" 340 /*@ 341 ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering 342 a new index set using the global numbering defined in an ISLocalToGlobalMapping 343 context. 344 345 Not collective 346 347 Input Parameters: 348 + mapping - mapping between local and global numbering 349 - is - index set in local numbering 350 351 Output Parameters: 352 . newis - index set in global numbering 353 354 Level: advanced 355 356 Concepts: mapping^local to global 357 358 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(), 359 ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply() 360 @*/ 361 PetscErrorCode ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis) 362 { 363 PetscErrorCode ierr; 364 PetscInt n,i,*idxmap,*idxout,Nmax = mapping->n; 365 const PetscInt *idxin; 366 367 PetscFunctionBegin; 368 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 369 PetscValidHeaderSpecific(is,IS_CLASSID,2); 370 PetscValidPointer(newis,3); 371 372 ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr); 373 ierr = ISGetIndices(is,&idxin);CHKERRQ(ierr); 374 idxmap = mapping->indices; 375 376 ierr = PetscMalloc(n*sizeof(PetscInt),&idxout);CHKERRQ(ierr); 377 for (i=0; i<n; i++) { 378 if (idxin[i] >= Nmax) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i); 379 idxout[i] = idxmap[idxin[i]]; 380 } 381 ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr); 382 ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,PETSC_OWN_POINTER,newis);CHKERRQ(ierr); 383 PetscFunctionReturn(0); 384 } 385 386 /*MC 387 ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering 388 and converts them to the global numbering. 389 390 Synopsis: 391 PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[]) 392 393 Not collective 394 395 Input Parameters: 396 + mapping - the local to global mapping context 397 . N - number of integers 398 - in - input indices in local numbering 399 400 Output Parameter: 401 . out - indices in global numbering 402 403 Notes: 404 The in and out array parameters may be identical. 405 406 Level: advanced 407 408 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 409 ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(), 410 AOPetscToApplication(), ISGlobalToLocalMappingApply() 411 412 Concepts: mapping^local to global 413 414 M*/ 415 416 /* -----------------------------------------------------------------------------------------*/ 417 418 #undef __FUNCT__ 419 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private" 420 /* 421 Creates the global fields in the ISLocalToGlobalMapping structure 422 */ 423 static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping) 424 { 425 PetscErrorCode ierr; 426 PetscInt i,*idx = mapping->indices,n = mapping->n,end,start,*globals; 427 428 PetscFunctionBegin; 429 end = 0; 430 start = PETSC_MAX_INT; 431 432 for (i=0; i<n; i++) { 433 if (idx[i] < 0) continue; 434 if (idx[i] < start) start = idx[i]; 435 if (idx[i] > end) end = idx[i]; 436 } 437 if (start > end) {start = 0; end = -1;} 438 mapping->globalstart = start; 439 mapping->globalend = end; 440 441 ierr = PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);CHKERRQ(ierr); 442 mapping->globals = globals; 443 for (i=0; i<end-start+1; i++) { 444 globals[i] = -1; 445 } 446 for (i=0; i<n; i++) { 447 if (idx[i] < 0) continue; 448 globals[idx[i] - start] = i; 449 } 450 451 ierr = PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));CHKERRQ(ierr); 452 PetscFunctionReturn(0); 453 } 454 455 #undef __FUNCT__ 456 #define __FUNCT__ "ISGlobalToLocalMappingApply" 457 /*@ 458 ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers 459 specified with a global numbering. 460 461 Not collective 462 463 Input Parameters: 464 + mapping - mapping between local and global numbering 465 . type - IS_GTOLM_MASK - replaces global indices with no local value with -1 466 IS_GTOLM_DROP - drops the indices with no local value from the output list 467 . n - number of global indices to map 468 - idx - global indices to map 469 470 Output Parameters: 471 + nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n) 472 - idxout - local index of each global index, one must pass in an array long enough 473 to hold all the indices. You can call ISGlobalToLocalMappingApply() with 474 idxout == PETSC_NULL to determine the required length (returned in nout) 475 and then allocate the required space and call ISGlobalToLocalMappingApply() 476 a second time to set the values. 477 478 Notes: 479 Either nout or idxout may be PETSC_NULL. idx and idxout may be identical. 480 481 This is not scalable in memory usage. Each processor requires O(Nglobal) size 482 array to compute these. 483 484 Level: advanced 485 486 Developer Note: The manual page states that idx and idxout may be identical but the calling 487 sequence declares idx as const so it cannot be the same as idxout. 488 489 Concepts: mapping^global to local 490 491 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(), 492 ISLocalToGlobalMappingDestroy() 493 @*/ 494 PetscErrorCode ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type, 495 PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[]) 496 { 497 PetscInt i,*globals,nf = 0,tmp,start,end; 498 PetscErrorCode ierr; 499 500 PetscFunctionBegin; 501 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 502 if (!mapping->globals) { 503 ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr); 504 } 505 globals = mapping->globals; 506 start = mapping->globalstart; 507 end = mapping->globalend; 508 509 if (type == IS_GTOLM_MASK) { 510 if (idxout) { 511 for (i=0; i<n; i++) { 512 if (idx[i] < 0) idxout[i] = idx[i]; 513 else if (idx[i] < start) idxout[i] = -1; 514 else if (idx[i] > end) idxout[i] = -1; 515 else idxout[i] = globals[idx[i] - start]; 516 } 517 } 518 if (nout) *nout = n; 519 } else { 520 if (idxout) { 521 for (i=0; i<n; i++) { 522 if (idx[i] < 0) continue; 523 if (idx[i] < start) continue; 524 if (idx[i] > end) continue; 525 tmp = globals[idx[i] - start]; 526 if (tmp < 0) continue; 527 idxout[nf++] = tmp; 528 } 529 } else { 530 for (i=0; i<n; i++) { 531 if (idx[i] < 0) continue; 532 if (idx[i] < start) continue; 533 if (idx[i] > end) continue; 534 tmp = globals[idx[i] - start]; 535 if (tmp < 0) continue; 536 nf++; 537 } 538 } 539 if (nout) *nout = nf; 540 } 541 542 PetscFunctionReturn(0); 543 } 544 545 #undef __FUNCT__ 546 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo" 547 /*@C 548 ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 549 each index shared by more than one processor 550 551 Collective on ISLocalToGlobalMapping 552 553 Input Parameters: 554 . mapping - the mapping from local to global indexing 555 556 Output Parameter: 557 + nproc - number of processors that are connected to this one 558 . proc - neighboring processors 559 . numproc - number of indices for each subdomain (processor) 560 - indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering) 561 562 Level: advanced 563 564 Concepts: mapping^local to global 565 566 Fortran Usage: 567 $ ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by 568 $ ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc], 569 PetscInt indices[nproc][numprocmax],ierr) 570 There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and 571 indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough. 572 573 574 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(), 575 ISLocalToGlobalMappingRestoreInfo() 576 @*/ 577 PetscErrorCode ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[]) 578 { 579 PetscErrorCode ierr; 580 PetscMPIInt size,rank,tag1,tag2,tag3,*len,*source,imdex; 581 PetscInt i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices; 582 PetscInt *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc; 583 PetscInt cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned; 584 PetscInt node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp; 585 PetscInt first_procs,first_numprocs,*first_indices; 586 MPI_Request *recv_waits,*send_waits; 587 MPI_Status recv_status,*send_status,*recv_statuses; 588 MPI_Comm comm = ((PetscObject)mapping)->comm; 589 PetscBool debug = PETSC_FALSE; 590 591 PetscFunctionBegin; 592 PetscValidHeaderSpecific(mapping,IS_LTOGM_CLASSID,1); 593 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 594 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 595 if (size == 1) { 596 *nproc = 0; 597 *procs = PETSC_NULL; 598 ierr = PetscMalloc(sizeof(PetscInt),numprocs);CHKERRQ(ierr); 599 (*numprocs)[0] = 0; 600 ierr = PetscMalloc(sizeof(PetscInt*),indices);CHKERRQ(ierr); 601 (*indices)[0] = PETSC_NULL; 602 PetscFunctionReturn(0); 603 } 604 605 ierr = PetscOptionsGetBool(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,PETSC_NULL);CHKERRQ(ierr); 606 607 /* 608 Notes on ISLocalToGlobalMappingGetInfo 609 610 globally owned node - the nodes that have been assigned to this processor in global 611 numbering, just for this routine. 612 613 nontrivial globally owned node - node assigned to this processor that is on a subdomain 614 boundary (i.e. is has more than one local owner) 615 616 locally owned node - node that exists on this processors subdomain 617 618 nontrivial locally owned node - node that is not in the interior (i.e. has more than one 619 local subdomain 620 */ 621 ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr); 622 ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr); 623 ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr); 624 625 for (i=0; i<n; i++) { 626 if (lindices[i] > max) max = lindices[i]; 627 } 628 ierr = MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr); 629 Ng++; 630 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 631 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 632 scale = Ng/size + 1; 633 ng = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng); 634 rstart = scale*rank; 635 636 /* determine ownership ranges of global indices */ 637 ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); 638 ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr); 639 640 /* determine owners of each local node */ 641 ierr = PetscMalloc(n*sizeof(PetscInt),&owner);CHKERRQ(ierr); 642 for (i=0; i<n; i++) { 643 proc = lindices[i]/scale; /* processor that globally owns this index */ 644 nprocs[2*proc+1] = 1; /* processor globally owns at least one of ours */ 645 owner[i] = proc; 646 nprocs[2*proc]++; /* count of how many that processor globally owns of ours */ 647 } 648 nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1]; 649 ierr = PetscInfo1(mapping,"Number of global owners for my local data %d\n",nsends);CHKERRQ(ierr); 650 651 /* inform other processors of number of messages and max length*/ 652 ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr); 653 ierr = PetscInfo1(mapping,"Number of local owners for my global data %d\n",nrecvs);CHKERRQ(ierr); 654 655 /* post receives for owned rows */ 656 ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);CHKERRQ(ierr); 657 ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr); 658 for (i=0; i<nrecvs; i++) { 659 ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr); 660 } 661 662 /* pack messages containing lists of local nodes to owners */ 663 ierr = PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);CHKERRQ(ierr); 664 ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr); 665 starts[0] = 0; 666 for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];} 667 for (i=0; i<n; i++) { 668 sends[starts[owner[i]]++] = lindices[i]; 669 sends[starts[owner[i]]++] = i; 670 } 671 ierr = PetscFree(owner);CHKERRQ(ierr); 672 starts[0] = 0; 673 for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];} 674 675 /* send the messages */ 676 ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr); 677 ierr = PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);CHKERRQ(ierr); 678 cnt = 0; 679 for (i=0; i<size; i++) { 680 if (nprocs[2*i]) { 681 ierr = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr); 682 dest[cnt] = i; 683 cnt++; 684 } 685 } 686 ierr = PetscFree(starts);CHKERRQ(ierr); 687 688 /* wait on receives */ 689 ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);CHKERRQ(ierr); 690 ierr = PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);CHKERRQ(ierr); 691 cnt = nrecvs; 692 ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);CHKERRQ(ierr); 693 ierr = PetscMemzero(nownedsenders,ng*sizeof(PetscInt));CHKERRQ(ierr); 694 while (cnt) { 695 ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr); 696 /* unpack receives into our local space */ 697 ierr = MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);CHKERRQ(ierr); 698 source[imdex] = recv_status.MPI_SOURCE; 699 len[imdex] = len[imdex]/2; 700 /* count how many local owners for each of my global owned indices */ 701 for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++; 702 cnt--; 703 } 704 ierr = PetscFree(recv_waits);CHKERRQ(ierr); 705 706 /* count how many globally owned indices are on an edge multiplied by how many processors own them. */ 707 nowned = 0; 708 nownedm = 0; 709 for (i=0; i<ng; i++) { 710 if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;} 711 } 712 713 /* create single array to contain rank of all local owners of each globally owned index */ 714 ierr = PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);CHKERRQ(ierr); 715 ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr); 716 starts[0] = 0; 717 for (i=1; i<ng; i++) { 718 if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1]; 719 else starts[i] = starts[i-1]; 720 } 721 722 /* for each nontrival globally owned node list all arriving processors */ 723 for (i=0; i<nrecvs; i++) { 724 for (j=0; j<len[i]; j++) { 725 node = recvs[2*i*nmax+2*j]-rstart; 726 if (nownedsenders[node] > 1) { 727 ownedsenders[starts[node]++] = source[i]; 728 } 729 } 730 } 731 732 if (debug) { /* ----------------------------------- */ 733 starts[0] = 0; 734 for (i=1; i<ng; i++) { 735 if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1]; 736 else starts[i] = starts[i-1]; 737 } 738 for (i=0; i<ng; i++) { 739 if (nownedsenders[i] > 1) { 740 ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr); 741 for (j=0; j<nownedsenders[i]; j++) { 742 ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr); 743 } 744 ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr); 745 } 746 } 747 ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr); 748 }/* ----------------------------------- */ 749 750 /* wait on original sends */ 751 if (nsends) { 752 ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr); 753 ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr); 754 ierr = PetscFree(send_status);CHKERRQ(ierr); 755 } 756 ierr = PetscFree(send_waits);CHKERRQ(ierr); 757 ierr = PetscFree(sends);CHKERRQ(ierr); 758 ierr = PetscFree(nprocs);CHKERRQ(ierr); 759 760 /* pack messages to send back to local owners */ 761 starts[0] = 0; 762 for (i=1; i<ng; i++) { 763 if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1]; 764 else starts[i] = starts[i-1]; 765 } 766 nsends2 = nrecvs; 767 ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); /* length of each message */ 768 for (i=0; i<nrecvs; i++) { 769 nprocs[i] = 1; 770 for (j=0; j<len[i]; j++) { 771 node = recvs[2*i*nmax+2*j]-rstart; 772 if (nownedsenders[node] > 1) { 773 nprocs[i] += 2 + nownedsenders[node]; 774 } 775 } 776 } 777 nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i]; 778 ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);CHKERRQ(ierr); 779 ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);CHKERRQ(ierr); 780 starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1]; 781 /* 782 Each message is 1 + nprocs[i] long, and consists of 783 (0) the number of nodes being sent back 784 (1) the local node number, 785 (2) the number of processors sharing it, 786 (3) the processors sharing it 787 */ 788 for (i=0; i<nsends2; i++) { 789 cnt = 1; 790 sends2[starts2[i]] = 0; 791 for (j=0; j<len[i]; j++) { 792 node = recvs[2*i*nmax+2*j]-rstart; 793 if (nownedsenders[node] > 1) { 794 sends2[starts2[i]]++; 795 sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1]; 796 sends2[starts2[i]+cnt++] = nownedsenders[node]; 797 ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));CHKERRQ(ierr); 798 cnt += nownedsenders[node]; 799 } 800 } 801 } 802 803 /* receive the message lengths */ 804 nrecvs2 = nsends; 805 ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);CHKERRQ(ierr); 806 ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);CHKERRQ(ierr); 807 ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr); 808 for (i=0; i<nrecvs2; i++) { 809 ierr = MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);CHKERRQ(ierr); 810 } 811 812 /* send the message lengths */ 813 for (i=0; i<nsends2; i++) { 814 ierr = MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);CHKERRQ(ierr); 815 } 816 817 /* wait on receives of lens */ 818 if (nrecvs2) { 819 ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr); 820 ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr); 821 ierr = PetscFree(recv_statuses);CHKERRQ(ierr); 822 } 823 ierr = PetscFree(recv_waits); 824 825 starts3[0] = 0; 826 nt = 0; 827 for (i=0; i<nrecvs2-1; i++) { 828 starts3[i+1] = starts3[i] + lens2[i]; 829 nt += lens2[i]; 830 } 831 nt += lens2[nrecvs2-1]; 832 833 ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);CHKERRQ(ierr); 834 ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr); 835 for (i=0; i<nrecvs2; i++) { 836 ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr); 837 } 838 839 /* send the messages */ 840 ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr); 841 for (i=0; i<nsends2; i++) { 842 ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr); 843 } 844 845 /* wait on receives */ 846 if (nrecvs2) { 847 ierr = PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr); 848 ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr); 849 ierr = PetscFree(recv_statuses);CHKERRQ(ierr); 850 } 851 ierr = PetscFree(recv_waits);CHKERRQ(ierr); 852 ierr = PetscFree(nprocs);CHKERRQ(ierr); 853 854 if (debug) { /* ----------------------------------- */ 855 cnt = 0; 856 for (i=0; i<nrecvs2; i++) { 857 nt = recvs2[cnt++]; 858 for (j=0; j<nt; j++) { 859 ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr); 860 for (k=0; k<recvs2[cnt+1]; k++) { 861 ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr); 862 } 863 cnt += 2 + recvs2[cnt+1]; 864 ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr); 865 } 866 } 867 ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr); 868 } /* ----------------------------------- */ 869 870 /* count number subdomains for each local node */ 871 ierr = PetscMalloc(size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); 872 ierr = PetscMemzero(nprocs,size*sizeof(PetscInt));CHKERRQ(ierr); 873 cnt = 0; 874 for (i=0; i<nrecvs2; i++) { 875 nt = recvs2[cnt++]; 876 for (j=0; j<nt; j++) { 877 for (k=0; k<recvs2[cnt+1]; k++) { 878 nprocs[recvs2[cnt+2+k]]++; 879 } 880 cnt += 2 + recvs2[cnt+1]; 881 } 882 } 883 nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0); 884 *nproc = nt; 885 ierr = PetscMalloc((nt+1)*sizeof(PetscInt),procs);CHKERRQ(ierr); 886 ierr = PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);CHKERRQ(ierr); 887 ierr = PetscMalloc((nt+1)*sizeof(PetscInt*),indices);CHKERRQ(ierr); 888 ierr = PetscMalloc(size*sizeof(PetscInt),&bprocs);CHKERRQ(ierr); 889 cnt = 0; 890 for (i=0; i<size; i++) { 891 if (nprocs[i] > 0) { 892 bprocs[i] = cnt; 893 (*procs)[cnt] = i; 894 (*numprocs)[cnt] = nprocs[i]; 895 ierr = PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);CHKERRQ(ierr); 896 cnt++; 897 } 898 } 899 900 /* make the list of subdomains for each nontrivial local node */ 901 ierr = PetscMemzero(*numprocs,nt*sizeof(PetscInt));CHKERRQ(ierr); 902 cnt = 0; 903 for (i=0; i<nrecvs2; i++) { 904 nt = recvs2[cnt++]; 905 for (j=0; j<nt; j++) { 906 for (k=0; k<recvs2[cnt+1]; k++) { 907 (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt]; 908 } 909 cnt += 2 + recvs2[cnt+1]; 910 } 911 } 912 ierr = PetscFree(bprocs);CHKERRQ(ierr); 913 ierr = PetscFree(recvs2);CHKERRQ(ierr); 914 915 /* sort the node indexing by their global numbers */ 916 nt = *nproc; 917 for (i=0; i<nt; i++) { 918 ierr = PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);CHKERRQ(ierr); 919 for (j=0; j<(*numprocs)[i]; j++) { 920 tmp[j] = lindices[(*indices)[i][j]]; 921 } 922 ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr); 923 ierr = PetscFree(tmp);CHKERRQ(ierr); 924 } 925 926 if (debug) { /* ----------------------------------- */ 927 nt = *nproc; 928 for (i=0; i<nt; i++) { 929 ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr); 930 for (j=0; j<(*numprocs)[i]; j++) { 931 ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr); 932 } 933 ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr); 934 } 935 ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr); 936 } /* ----------------------------------- */ 937 938 /* wait on sends */ 939 if (nsends2) { 940 ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr); 941 ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr); 942 ierr = PetscFree(send_status);CHKERRQ(ierr); 943 } 944 945 ierr = PetscFree(starts3);CHKERRQ(ierr); 946 ierr = PetscFree(dest);CHKERRQ(ierr); 947 ierr = PetscFree(send_waits);CHKERRQ(ierr); 948 949 ierr = PetscFree(nownedsenders);CHKERRQ(ierr); 950 ierr = PetscFree(ownedsenders);CHKERRQ(ierr); 951 ierr = PetscFree(starts);CHKERRQ(ierr); 952 ierr = PetscFree(starts2);CHKERRQ(ierr); 953 ierr = PetscFree(lens2);CHKERRQ(ierr); 954 955 ierr = PetscFree(source);CHKERRQ(ierr); 956 ierr = PetscFree(len);CHKERRQ(ierr); 957 ierr = PetscFree(recvs);CHKERRQ(ierr); 958 ierr = PetscFree(nprocs);CHKERRQ(ierr); 959 ierr = PetscFree(sends2);CHKERRQ(ierr); 960 961 /* put the information about myself as the first entry in the list */ 962 first_procs = (*procs)[0]; 963 first_numprocs = (*numprocs)[0]; 964 first_indices = (*indices)[0]; 965 for (i=0; i<*nproc; i++) { 966 if ((*procs)[i] == rank) { 967 (*procs)[0] = (*procs)[i]; 968 (*numprocs)[0] = (*numprocs)[i]; 969 (*indices)[0] = (*indices)[i]; 970 (*procs)[i] = first_procs; 971 (*numprocs)[i] = first_numprocs; 972 (*indices)[i] = first_indices; 973 break; 974 } 975 } 976 PetscFunctionReturn(0); 977 } 978 979 #undef __FUNCT__ 980 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo" 981 /*@C 982 ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo() 983 984 Collective on ISLocalToGlobalMapping 985 986 Input Parameters: 987 . mapping - the mapping from local to global indexing 988 989 Output Parameter: 990 + nproc - number of processors that are connected to this one 991 . proc - neighboring processors 992 . numproc - number of indices for each processor 993 - indices - indices of local nodes shared with neighbor (sorted by global numbering) 994 995 Level: advanced 996 997 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(), 998 ISLocalToGlobalMappingGetInfo() 999 @*/ 1000 PetscErrorCode ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[]) 1001 { 1002 PetscErrorCode ierr; 1003 PetscInt i; 1004 1005 PetscFunctionBegin; 1006 ierr = PetscFree(*procs);CHKERRQ(ierr); 1007 ierr = PetscFree(*numprocs);CHKERRQ(ierr); 1008 if (*indices) { 1009 ierr = PetscFree((*indices)[0]);CHKERRQ(ierr); 1010 for (i=1; i<*nproc; i++) { 1011 ierr = PetscFree((*indices)[i]);CHKERRQ(ierr); 1012 } 1013 ierr = PetscFree(*indices);CHKERRQ(ierr); 1014 } 1015 PetscFunctionReturn(0); 1016 } 1017 1018 #undef __FUNCT__ 1019 #define __FUNCT__ "ISLocalToGlobalMappingGetIndices" 1020 /*@C 1021 ISLocalToGlobalMappingGetIndices - Get global indices for every local point 1022 1023 Not Collective 1024 1025 Input Arguments: 1026 . ltog - local to global mapping 1027 1028 Output Arguments: 1029 . array - array of indices 1030 1031 Level: advanced 1032 1033 .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingRestoreIndices() 1034 @*/ 1035 PetscErrorCode ISLocalToGlobalMappingGetIndices(ISLocalToGlobalMapping ltog,const PetscInt **array) 1036 { 1037 1038 PetscFunctionBegin; 1039 PetscValidHeaderSpecific(ltog,IS_LTOGM_CLASSID,1); 1040 PetscValidPointer(array,2); 1041 *array = ltog->indices; 1042 PetscFunctionReturn(0); 1043 } 1044 1045 #undef __FUNCT__ 1046 #define __FUNCT__ "ISLocalToGlobalMappingRestoreIndices" 1047 /*@C 1048 ISLocalToGlobalMappingRestoreIndices - Restore indices obtained with ISLocalToGlobalMappingRestoreIndices() 1049 1050 Not Collective 1051 1052 Input Arguments: 1053 + ltog - local to global mapping 1054 - array - array of indices 1055 1056 Level: advanced 1057 1058 .seealso: ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingGetIndices() 1059 @*/ 1060 PetscErrorCode ISLocalToGlobalMappingRestoreIndices(ISLocalToGlobalMapping ltog,const PetscInt **array) 1061 { 1062 1063 PetscFunctionBegin; 1064 PetscValidHeaderSpecific(ltog,IS_LTOGM_CLASSID,1); 1065 PetscValidPointer(array,2); 1066 if (*array != ltog->indices) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Trying to return mismatched pointer"); 1067 *array = PETSC_NULL; 1068 PetscFunctionReturn(0); 1069 } 1070 1071 #undef __FUNCT__ 1072 #define __FUNCT__ "ISLocalToGlobalMappingConcatenate" 1073 /*@C 1074 ISLocalToGlobalMappingConcatenate - Create a new mapping that concatenates a list of mappings 1075 1076 Not Collective 1077 1078 Input Arguments: 1079 + comm - communicator for the new mapping, must contain the communicator of every mapping to concatenate 1080 . n - number of mappings to concatenate 1081 - ltogs - local to global mappings 1082 1083 Output Arguments: 1084 . ltogcat - new mapping 1085 1086 Level: advanced 1087 1088 .seealso: ISLocalToGlobalMappingCreate() 1089 @*/ 1090 PetscErrorCode ISLocalToGlobalMappingConcatenate(MPI_Comm comm,PetscInt n,const ISLocalToGlobalMapping ltogs[],ISLocalToGlobalMapping *ltogcat) 1091 { 1092 PetscInt i,cnt,m,*idx; 1093 PetscErrorCode ierr; 1094 1095 PetscFunctionBegin; 1096 if (n < 0) SETERRQ1(comm,PETSC_ERR_ARG_OUTOFRANGE,"Must have a non-negative number of mappings, given %D",n); 1097 if (n > 0) PetscValidPointer(ltogs,3); 1098 for (i=0; i<n; i++) PetscValidHeaderSpecific(ltogs[i],IS_LTOGM_CLASSID,3); 1099 PetscValidPointer(ltogcat,4); 1100 for (cnt=0,i=0; i<n; i++) { 1101 ierr = ISLocalToGlobalMappingGetSize(ltogs[i],&m);CHKERRQ(ierr); 1102 cnt += m; 1103 } 1104 ierr = PetscMalloc(cnt*sizeof(PetscInt),&idx);CHKERRQ(ierr); 1105 for (cnt=0,i=0; i<n; i++) { 1106 const PetscInt *subidx; 1107 ierr = ISLocalToGlobalMappingGetSize(ltogs[i],&m);CHKERRQ(ierr); 1108 ierr = ISLocalToGlobalMappingGetIndices(ltogs[i],&subidx);CHKERRQ(ierr); 1109 ierr = PetscMemcpy(&idx[cnt],subidx,m*sizeof(PetscInt));CHKERRQ(ierr); 1110 ierr = ISLocalToGlobalMappingRestoreIndices(ltogs[i],&subidx);CHKERRQ(ierr); 1111 cnt += m; 1112 } 1113 ierr = ISLocalToGlobalMappingCreate(comm,cnt,idx,PETSC_OWN_POINTER,ltogcat);CHKERRQ(ierr); 1114 PetscFunctionReturn(0); 1115 } 1116