1 /*$Id: isltog.c,v 1.46 2000/06/26 03:50:54 bsmith Exp bsmith $*/ 2 3 #include "petscsys.h" /*I "petscsys.h" I*/ 4 #include "src/vec/is/isimpl.h" /*I "petscis.h" I*/ 5 6 #undef __FUNC__ 7 #define __FUNC__ /*<a name="ISLocalToGlobalMappingGetSize"></a>*/"ISLocalToGlobalMappingGetSize" 8 /*@C 9 ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping. 10 11 Not Collective 12 13 Input Parameter: 14 . ltog - local to global mapping 15 16 Output Parameter: 17 . n - the number of entries in the local mapping 18 19 Level: advanced 20 21 .keywords: IS, local-to-global mapping, create 22 23 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate() 24 @*/ 25 int ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,int *n) 26 { 27 PetscFunctionBegin; 28 PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE); 29 *n = mapping->n; 30 PetscFunctionReturn(0); 31 } 32 33 #undef __FUNC__ 34 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingView" 35 /*@C 36 ISLocalToGlobalMappingView - View a local to global mapping 37 38 Not Collective 39 40 Input Parameters: 41 + ltog - local to global mapping 42 - viewer - viewer 43 44 Level: advanced 45 46 .keywords: IS, local-to-global mapping, create 47 48 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate() 49 @*/ 50 int ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,Viewer viewer) 51 { 52 int i,ierr,rank; 53 PetscTruth isascii; 54 55 PetscFunctionBegin; 56 PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE); 57 if (!viewer) viewer = VIEWER_STDOUT_(mapping->comm); 58 PetscValidHeaderSpecific(viewer,VIEWER_COOKIE); 59 PetscCheckSameComm(mapping,viewer); 60 61 ierr = MPI_Comm_rank(mapping->comm,&rank);CHKERRQ(ierr); 62 ierr = PetscTypeCompare((PetscObject)viewer,ASCII_VIEWER,&isascii);CHKERRQ(ierr); 63 if (isascii) { 64 for (i=0; i<mapping->n; i++) { 65 ierr = ViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr); 66 } 67 ierr = ViewerFlush(viewer);CHKERRQ(ierr); 68 } else { 69 SETERRQ1(1,1,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name); 70 } 71 72 PetscFunctionReturn(0); 73 } 74 75 #undef __FUNC__ 76 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingCreateIS" 77 /*@C 78 ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n) 79 ordering and a global parallel ordering. 80 81 Not collective 82 83 Input Parameter: 84 . is - index set containing the global numbers for each local 85 86 Output Parameter: 87 . mapping - new mapping data structure 88 89 Level: advanced 90 91 .keywords: IS, local-to-global mapping, create 92 93 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate() 94 @*/ 95 int ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping) 96 { 97 int n,*indices,ierr; 98 MPI_Comm comm; 99 100 PetscFunctionBegin; 101 PetscValidHeaderSpecific(is,IS_COOKIE); 102 103 ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr); 104 ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr); 105 ierr = ISGetIndices(is,&indices);CHKERRQ(ierr); 106 ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr); 107 ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr); 108 109 PetscFunctionReturn(0); 110 } 111 112 #undef __FUNC__ 113 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingCreate" 114 /*@C 115 ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n) 116 ordering and a global parallel ordering. 117 118 Not Collective, but communicator may have more than one process 119 120 Input Parameters: 121 + comm - MPI communicator 122 . n - the number of local elements 123 - indices - the global index for each local element 124 125 Output Parameter: 126 . mapping - new mapping data structure 127 128 Level: advanced 129 130 .keywords: IS, local-to-global mapping, create 131 132 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS() 133 @*/ 134 int ISLocalToGlobalMappingCreate(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping) 135 { 136 int ierr; 137 138 PetscFunctionBegin; 139 PetscValidIntPointer(indices); 140 PetscValidPointer(mapping); 141 142 PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping", 143 cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView); 144 PLogObjectCreate(*mapping); 145 PLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(int)); 146 147 (*mapping)->n = n; 148 (*mapping)->indices = (int*)PetscMalloc((n+1)*sizeof(int));CHKPTRQ((*mapping)->indices); 149 ierr = PetscMemcpy((*mapping)->indices,indices,n*sizeof(int));CHKERRQ(ierr); 150 151 /* 152 Do not create the global to local mapping. This is only created if 153 ISGlobalToLocalMapping() is called 154 */ 155 (*mapping)->globals = 0; 156 PetscFunctionReturn(0); 157 } 158 159 #undef __FUNC__ 160 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingDestroy" 161 /*@ 162 ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n) 163 ordering and a global parallel ordering. 164 165 Note Collective 166 167 Input Parameters: 168 . mapping - mapping data structure 169 170 Level: advanced 171 172 .keywords: IS, local-to-global mapping, destroy 173 174 .seealso: ISLocalToGlobalMappingCreate() 175 @*/ 176 int ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping) 177 { 178 int ierr; 179 PetscFunctionBegin; 180 PetscValidPointer(mapping); 181 if (--mapping->refct > 0) PetscFunctionReturn(0); 182 if (mapping->refct < 0) { 183 SETERRQ(1,1,"Mapping already destroyed"); 184 } 185 186 ierr = PetscFree(mapping->indices);CHKERRQ(ierr); 187 if (mapping->globals) {ierr = PetscFree(mapping->globals);CHKERRQ(ierr);} 188 PLogObjectDestroy(mapping); 189 PetscHeaderDestroy(mapping); 190 PetscFunctionReturn(0); 191 } 192 193 #undef __FUNC__ 194 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingApplyIS" 195 /*@ 196 ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering 197 a new index set using the global numbering defined in an ISLocalToGlobalMapping 198 context. 199 200 Not collective 201 202 Input Parameters: 203 + mapping - mapping between local and global numbering 204 - is - index set in local numbering 205 206 Output Parameters: 207 . newis - index set in global numbering 208 209 Level: advanced 210 211 .keywords: IS, local-to-global mapping, apply 212 213 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(), 214 ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply() 215 @*/ 216 int ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis) 217 { 218 int ierr,n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n; 219 220 PetscFunctionBegin; 221 PetscValidPointer(mapping); 222 PetscValidHeaderSpecific(is,IS_COOKIE); 223 PetscValidPointer(newis); 224 225 ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr); 226 ierr = ISGetIndices(is,&idxin);CHKERRQ(ierr); 227 idxmap = mapping->indices; 228 229 idxout = (int*)PetscMalloc((n+1)*sizeof(int));CHKPTRQ(idxout); 230 for (i=0; i<n; i++) { 231 if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,1,"Local index %d too large %d (max) at %d",idxin[i],Nmax,i); 232 idxout[i] = idxmap[idxin[i]]; 233 } 234 ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr); 235 ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr); 236 ierr = PetscFree(idxout);CHKERRQ(ierr); 237 PetscFunctionReturn(0); 238 } 239 240 /*MC 241 ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering 242 and converts them to the global numbering. 243 244 Not collective 245 246 Input Parameters: 247 + mapping - the local to global mapping context 248 . N - number of integers 249 - in - input indices in local numbering 250 251 Output Parameter: 252 . out - indices in global numbering 253 254 Synopsis: 255 ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[]) 256 257 Notes: 258 The in and out array parameters may be identical. 259 260 Level: advanced 261 262 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 263 ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(), 264 AOPetscToApplication(), ISGlobalToLocalMappingApply() 265 266 .keywords: local-to-global, mapping, apply 267 268 M*/ 269 270 /* -----------------------------------------------------------------------------------------*/ 271 272 #undef __FUNC__ 273 #define __FUNC__ /*<a name=""></a>*/"ISGlobalToLocalMappingSetUp_Private" 274 /* 275 Creates the global fields in the ISLocalToGlobalMapping structure 276 */ 277 static int ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping) 278 { 279 int i,*idx = mapping->indices,n = mapping->n,end,start,*globals; 280 281 PetscFunctionBegin; 282 end = 0; 283 start = 100000000; 284 285 for (i=0; i<n; i++) { 286 if (idx[i] < 0) continue; 287 if (idx[i] < start) start = idx[i]; 288 if (idx[i] > end) end = idx[i]; 289 } 290 if (start > end) {start = 0; end = -1;} 291 mapping->globalstart = start; 292 mapping->globalend = end; 293 294 globals = mapping->globals = (int*)PetscMalloc((end-start+2)*sizeof(int));CHKPTRQ(mapping->globals); 295 for (i=0; i<end-start+1; i++) { 296 globals[i] = -1; 297 } 298 for (i=0; i<n; i++) { 299 if (idx[i] < 0) continue; 300 globals[idx[i] - start] = i; 301 } 302 303 PLogObjectMemory(mapping,(end-start+1)*sizeof(int)); 304 PetscFunctionReturn(0); 305 } 306 307 #undef __FUNC__ 308 #define __FUNC__ /*<a name=""></a>*/"ISGlobalToLocalMappingApply" 309 /*@ 310 ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers 311 specified with a global numbering. 312 313 Not collective 314 315 Input Parameters: 316 + mapping - mapping between local and global numbering 317 . type - IS_GTOLM_MASK - replaces global indices with no local value with -1 318 IS_GTOLM_DROP - drops the indices with no local value from the output list 319 . n - number of global indices to map 320 - idx - global indices to map 321 322 Output Parameters: 323 + nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n) 324 - idxout - local index of each global index, one must pass in an array long enough 325 to hold all the indices. You can call ISGlobalToLocalMappingApply() with 326 idxout == PETSC_NULL to determine the required length (returned in nout) 327 and then allocate the required space and call ISGlobalToLocalMappingApply() 328 a second time to set the values. 329 330 Notes: 331 Either nout or idxout may be PETSC_NULL. idx and idxout may be identical. 332 333 This is not scalable in memory usage. Each processor requires O(Nglobal) size 334 array to compute these. 335 336 Level: advanced 337 338 .keywords: IS, global-to-local mapping, apply 339 340 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(), 341 ISLocalToGlobalMappingDestroy() 342 @*/ 343 int ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type, 344 int n,const int idx[],int *nout,int idxout[]) 345 { 346 int i,ierr,*globals,nf = 0,tmp,start,end; 347 348 PetscFunctionBegin; 349 if (!mapping->globals) { 350 ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr); 351 } 352 globals = mapping->globals; 353 start = mapping->globalstart; 354 end = mapping->globalend; 355 356 if (type == IS_GTOLM_MASK) { 357 if (idxout) { 358 for (i=0; i<n; i++) { 359 if (idx[i] < 0) idxout[i] = idx[i]; 360 else if (idx[i] < start) idxout[i] = -1; 361 else if (idx[i] > end) idxout[i] = -1; 362 else idxout[i] = globals[idx[i] - start]; 363 } 364 } 365 if (nout) *nout = n; 366 } else { 367 if (idxout) { 368 for (i=0; i<n; i++) { 369 if (idx[i] < 0) continue; 370 if (idx[i] < start) continue; 371 if (idx[i] > end) continue; 372 tmp = globals[idx[i] - start]; 373 if (tmp < 0) continue; 374 idxout[nf++] = tmp; 375 } 376 } else { 377 for (i=0; i<n; i++) { 378 if (idx[i] < 0) continue; 379 if (idx[i] < start) continue; 380 if (idx[i] > end) continue; 381 tmp = globals[idx[i] - start]; 382 if (tmp < 0) continue; 383 nf++; 384 } 385 } 386 if (nout) *nout = nf; 387 } 388 389 PetscFunctionReturn(0); 390 } 391 392 #undef __FUNC__ 393 #define __FUNC__ /*<a name=""></a>*/"ISLocalToGlobalMappingGetInfo" 394 /*@C 395 ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 396 each index shared by more than one processor 397 398 Collective on ISLocalToGlobalMapping 399 400 Input Parameters: 401 . mapping - the mapping from local to global indexing 402 403 Output Parameter: 404 + nproc - number of processors that are connected to this one 405 . proc - neighboring processors 406 . indices - indices of local nodes shared with neighbor (sorted by global numbering) 407 408 Level: advanced 409 410 .keywords: IS, local-to-global mapping, neighbors 411 412 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate() 413 @*/ 414 int ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices) 415 { 416 int i,n = mapping->n,ierr,Ng,ng = PETSC_DECIDE,max = 0,*lindices = mapping->indices; 417 int size,rank,*nprocs,*owner,nsends,*sends,j,*starts,*work,nmax,nrecvs,*recvs,proc; 418 int tag,cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned; 419 int node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs; 420 MPI_Request *recv_waits,*send_waits; 421 MPI_Status recv_status,*send_status,*recv_statuses; 422 MPI_Comm comm = mapping->comm; 423 424 PetscFunctionBegin; 425 /* 426 Notes on ISLocalToGlobalMappingGetInfo 427 428 globally owned node - the nodes that have been assigned to this processor in global 429 numbering, just for this routine. 430 431 nontrivial globally owned node - node assigned to this processor that is on a subdomain 432 boundary (i.e. is has more than one local owner) 433 434 locally owned node - node that exists on this processors subdomain 435 436 nontrivial locally owned node - node that is not in the interior (i.e. has more than one 437 local subdomain 438 */ 439 ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag);CHKERRQ(ierr); 440 441 for (i=0; i<n; i++) { 442 if (lindices[i] > max) max = lindices[i]; 443 } 444 ierr = MPI_Allreduce(&max,&Ng,1,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr); 445 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 446 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 447 scale = Ng/size + 1; 448 ng = scale; if (rank == size-1) ng = Ng - scale*(size-1); 449 rstart = scale*rank; 450 451 /* determine ownership ranges of global indices */ 452 nprocs = (int*)PetscMalloc((2*size+1)*sizeof(int));CHKPTRQ(nprocs); 453 ierr = PetscMemzero(nprocs,size*sizeof(int));CHKERRQ(ierr); 454 455 /* determine owners of each local node */ 456 owner = (int*)PetscMalloc((n+1)*sizeof(int));CHKPTRQ(owner); 457 for (i=0; i<n; i++) { 458 proc = lindices[i]/scale; /* processor that globally owns this index */ 459 nprocs[size+proc] = 1; /* processor globally owns at least one of ours */ 460 owner[i] = proc; 461 nprocs[proc]++; /* count of how many that processor globally owns of ours */ 462 } 463 nsends = 0; for (i=0; i<size; i++) nsends += nprocs[size + i]; 464 PLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends); 465 466 /* inform other processors of number of messages and max length*/ 467 work = (int*)PetscMalloc(2*size*sizeof(int));CHKPTRQ(work); 468 ierr = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr); 469 nmax = work[rank]; 470 nrecvs = work[size+rank]; 471 ierr = PetscFree(work);CHKERRQ(ierr); 472 PLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs); 473 474 /* post receives for owned rows */ 475 recvs = (int*)PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(int));CHKPTRQ(recvs); 476 recv_waits = (MPI_Request*)PetscMalloc((nrecvs+1)*sizeof(MPI_Request));CHKPTRQ(recv_waits); 477 for (i=0; i<nrecvs; i++) { 478 ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPI_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr); 479 } 480 481 /* pack messages containing lists of local nodes to owners */ 482 sends = (int*)PetscMalloc((2*n+1)*sizeof(int));CHKPTRQ(sends); 483 starts = (int*)PetscMalloc((size+1)*sizeof(int));CHKPTRQ(starts); 484 starts[0] = 0; 485 for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];} 486 for (i=0; i<n; i++) { 487 sends[starts[owner[i]]++] = lindices[i]; 488 sends[starts[owner[i]]++] = i; 489 } 490 ierr = PetscFree(owner);CHKERRQ(ierr); 491 starts[0] = 0; 492 for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];} 493 494 /* send the messages */ 495 send_waits = (MPI_Request*)PetscMalloc((nsends+1)*sizeof(MPI_Request));CHKPTRQ(send_waits); 496 dest = (int*)PetscMalloc((nsends+1)*sizeof(int));CHKPTRQ(dest); 497 cnt = 0; 498 for (i=0; i<size; i++) { 499 if (nprocs[i]) { 500 ierr = MPI_Isend(sends+starts[i],2*nprocs[i],MPI_INT,i,tag,comm,send_waits+cnt);CHKERRQ(ierr); 501 dest[cnt] = i; 502 cnt++; 503 } 504 } 505 ierr = PetscFree(starts);CHKERRQ(ierr); 506 507 /* wait on receives */ 508 source = (int*)PetscMalloc((2*nrecvs+1)*sizeof(int));CHKPTRQ(source); 509 len = source + nrecvs; 510 cnt = nrecvs; 511 nownedsenders = (int*)PetscMalloc((ng+1)*sizeof(int));CHKPTRQ(nownedsenders); 512 ierr = PetscMemzero(nownedsenders,ng*sizeof(int));CHKERRQ(ierr); 513 while (cnt) { 514 ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr); 515 /* unpack receives into our local space */ 516 ierr = MPI_Get_count(&recv_status,MPI_INT,&len[imdex]);CHKERRQ(ierr); 517 source[imdex] = recv_status.MPI_SOURCE; 518 len[imdex] = len[imdex]/2; 519 /* count how many local owners for each of my global owned indices */ 520 for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++; 521 cnt--; 522 } 523 ierr = PetscFree(recv_waits);CHKERRQ(ierr); 524 525 /* count how many globally owned indices are on an edge multiplied by how many processors own them. */ 526 nowned = 0; 527 nownedm = 0; 528 for (i=0; i<ng; i++) { 529 if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;} 530 } 531 532 /* create single array to contain rank of all local owners of each globally owned index */ 533 ownedsenders = (int*)PetscMalloc((nownedm+1)*sizeof(int));CHKERRQ(ierr); 534 starts = (int*)PetscMalloc((ng+1)*sizeof(int));CHKPTRQ(starts); 535 starts[0] = 0; 536 for (i=1; i<ng; i++) { 537 if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1]; 538 else starts[i] = starts[i-1]; 539 } 540 541 /* for each nontrival globally owned node list all arriving processors */ 542 for (i=0; i<nrecvs; i++) { 543 for (j=0; j<len[i]; j++) { 544 node = recvs[2*i*nmax+2*j]-rstart; 545 if (nownedsenders[node] > 1) { 546 ownedsenders[starts[node]++] = source[i]; 547 } 548 } 549 } 550 551 /* ----------------------------------- */ 552 starts[0] = 0; 553 for (i=1; i<ng; i++) { 554 if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1]; 555 else starts[i] = starts[i-1]; 556 } 557 for (i=0; i<ng; i++) { 558 if (nownedsenders[i] > 1) { 559 ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr); 560 for (j=0; j<nownedsenders[i]; j++) { 561 ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr); 562 } 563 ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr); 564 } 565 } 566 ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr); 567 /* ----------------------------------- */ 568 569 /* wait on original sends */ 570 if (nsends) { 571 send_status = (MPI_Status*)PetscMalloc(nsends*sizeof(MPI_Status));CHKPTRQ(send_status); 572 ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr); 573 ierr = PetscFree(send_status);CHKERRQ(ierr); 574 } 575 ierr = PetscFree(send_waits);CHKERRQ(ierr); 576 ierr = PetscFree(sends);CHKERRQ(ierr); 577 ierr = PetscFree(nprocs);CHKERRQ(ierr); 578 579 /* pack messages to send back to local owners */ 580 starts[0] = 0; 581 for (i=1; i<ng; i++) { 582 if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1]; 583 else starts[i] = starts[i-1]; 584 } 585 nsends2 = nrecvs; 586 nprocs = (int*)PetscMalloc((nsends2+1)*sizeof(int));CHKPTRQ(nprocs); /* length of each message */ 587 cnt = 0; 588 for (i=0; i<nrecvs; i++) { 589 nprocs[i] = 1; 590 for (j=0; j<len[i]; j++) { 591 node = recvs[2*i*nmax+2*j]-rstart; 592 if (nownedsenders[node] > 1) { 593 nprocs[i] += 2 + nownedsenders[node]; 594 } 595 } 596 } 597 nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i]; 598 sends2 = (int*)PetscMalloc((nt+1)*sizeof(int));CHKPTRQ(sends2); 599 starts2 = (int*)PetscMalloc((nsends2+1)*sizeof(int));CHKPTRQ(starts2); 600 starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1]; 601 /* 602 Each message is 1 + nprocs[i] long, and consists of 603 (0) the number of nodes being sent back 604 (1) the local node number, 605 (2) the number of processors sharing it, 606 (3) the processors sharing it 607 */ 608 for (i=0; i<nsends2; i++) { 609 cnt = 1; 610 sends2[starts2[i]] = 0; 611 for (j=0; j<len[i]; j++) { 612 node = recvs[2*i*nmax+2*j]-rstart; 613 if (nownedsenders[node] > 1) { 614 sends2[starts2[i]]++; 615 sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1]; 616 sends2[starts2[i]+cnt++] = nownedsenders[node]; 617 ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(int));CHKERRQ(ierr); 618 cnt += nownedsenders[node]; 619 } 620 } 621 } 622 623 /* send the message lengths */ 624 for (i=0; i<nsends2; i++) { 625 ierr = MPI_Send(&nprocs[i],1,MPI_INT,source[i],tag-1,comm);CHKERRQ(ierr); 626 } 627 628 /* receive the message lengths */ 629 nrecvs2 = nsends; 630 lens2 = (int*)PetscMalloc((nrecvs2+1)*sizeof(int));CHKPTRQ(lens2); 631 starts3 = (int*)PetscMalloc((nrecvs2+1)*sizeof(int));CHKPTRQ(starts3); 632 nt = 0; 633 for (i=0; i<nrecvs2; i++) { 634 ierr = MPI_Recv(&lens2[i],1,MPI_INT,dest[i],tag-1,comm,&recv_status);CHKERRQ(ierr); 635 nt += lens2[i]; 636 } 637 starts3[0] = 0; 638 for (i=0; i<nrecvs2-1; i++) { 639 starts3[i+1] = starts3[i] + lens2[i]; 640 } 641 recvs2 = (int*)PetscMalloc((nt+1)*sizeof(int));CHKPTRQ(recvs2); 642 recv_waits = (MPI_Request*)PetscMalloc((nrecvs+1)*sizeof(MPI_Request));CHKPTRQ(recv_waits); 643 for (i=0; i<nrecvs; i++) { 644 ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPI_INT,dest[i],tag-2,comm,recv_waits+i);CHKERRQ(ierr); 645 } 646 647 /* send the messages */ 648 send_waits = (MPI_Request*)PetscMalloc((nsends2+1)*sizeof(MPI_Request));CHKPTRQ(send_waits); 649 for (i=0; i<nsends2; i++) { 650 ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPI_INT,source[i],tag-2,comm,send_waits+i);CHKERRQ(ierr); 651 } 652 653 /* wait on receives */ 654 recv_statuses = (MPI_Status*)PetscMalloc((nrecvs+1)*sizeof(MPI_Status));CHKPTRQ(recv_statuses); 655 ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr); 656 ierr = PetscFree(recv_statuses);CHKERRQ(ierr); 657 ierr = PetscFree(recv_waits);CHKERRQ(ierr); 658 ierr = PetscFree(nprocs);CHKERRQ(ierr); 659 660 /* ----------------------------------- */ 661 cnt = 0; 662 for (i=0; i<nrecvs2; i++) { 663 nt = recvs2[cnt++]; 664 for (j=0; j<nt; j++) { 665 ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr); 666 for (k=0; k<recvs2[cnt+1]; k++) { 667 ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr); 668 } 669 cnt += 2 + recvs2[cnt+1]; 670 ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr); 671 } 672 } 673 ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr); 674 /* ----------------------------------- */ 675 676 /* count number subdomains for each local node */ 677 nprocs = (int*)PetscMalloc(size*sizeof(int));CHKPTRQ(nprocs); 678 ierr = PetscMemzero(nprocs,size*sizeof(int));CHKERRQ(ierr); 679 cnt = 0; 680 for (i=0; i<nrecvs2; i++) { 681 nt = recvs2[cnt++]; 682 for (j=0; j<nt; j++) { 683 for (k=0; k<recvs2[cnt+1]; k++) { 684 nprocs[recvs2[cnt+2+k]]++; 685 } 686 cnt += 2 + recvs2[cnt+1]; 687 } 688 } 689 nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0); 690 *nproc = nt; 691 *procs = (int*)PetscMalloc((nt+1)*sizeof(int));CHKPTRQ(procs); 692 *numprocs = (int*)PetscMalloc((nt+1)*sizeof(int));CHKPTRQ(numprocs); 693 *indices = (int**)PetscMalloc((nt+1)*sizeof(int*));CHKPTRQ(procs); 694 bprocs = (int*)PetscMalloc(size*sizeof(int));CHKERRQ(ierr); 695 cnt = 0; 696 for (i=0; i<size; i++) { 697 if (nprocs[i] > 0) { 698 bprocs[i] = cnt; 699 (*procs)[cnt] = i; 700 (*numprocs)[cnt] = nprocs[i]; 701 (*indices)[cnt] = (int*)PetscMalloc(nprocs[i]*sizeof(int));CHKPTRQ((*indices)[cnt]); 702 cnt++; 703 } 704 } 705 706 /* make the list of subdomains for each nontrivial local node */ 707 ierr = PetscMemzero(*numprocs,nt*sizeof(int));CHKERRQ(ierr); 708 cnt = 0; 709 for (i=0; i<nrecvs2; i++) { 710 nt = recvs2[cnt++]; 711 for (j=0; j<nt; j++) { 712 for (k=0; k<recvs2[cnt+1]; k++) { 713 (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt]; 714 } 715 cnt += 2 + recvs2[cnt+1]; 716 } 717 } 718 ierr = PetscFree(bprocs);CHKERRQ(ierr); 719 720 /* ----------------------------------- */ 721 nt = *nproc; 722 for (i=0; i<nt; i++) { 723 ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr); 724 for (j=0; j<(*numprocs)[i]; j++) { 725 ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr); 726 } 727 ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr); 728 } 729 ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr); 730 /* ----------------------------------- */ 731 732 733 ierr = PetscFree(recvs2);CHKERRQ(ierr); 734 735 /* wait on sends */ 736 if (nsends2) { 737 send_status = (MPI_Status*)PetscMalloc(nsends2*sizeof(MPI_Status));CHKPTRQ(send_status); 738 ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr); 739 ierr = PetscFree(send_status);CHKERRQ(ierr); 740 } 741 742 743 ierr = PetscFree(starts3);CHKERRQ(ierr); 744 ierr = PetscFree(dest);CHKERRQ(ierr); 745 ierr = PetscFree(send_waits);CHKERRQ(ierr); 746 747 ierr = PetscFree(nownedsenders);CHKERRQ(ierr); 748 ierr = PetscFree(ownedsenders);CHKERRQ(ierr); 749 ierr = PetscFree(starts);CHKERRQ(ierr); 750 ierr = PetscFree(starts2);CHKERRQ(ierr); 751 ierr = PetscFree(lens2);CHKERRQ(ierr); 752 753 ierr = PetscFree(source);CHKERRQ(ierr); 754 ierr = PetscFree(recvs);CHKERRQ(ierr); 755 ierr = PetscFree(nprocs);CHKERRQ(ierr); 756 ierr = PetscFree(sends2);CHKERRQ(ierr); 757 PetscFunctionReturn(0); 758 } 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778