xref: /petsc/src/vec/is/utils/isltog.c (revision 292f8084fb157dadf9a2ae26c5bd14368ed7ffcb)
1 
2 #include "petscsys.h"   /*I "petscsys.h" I*/
3 #include "src/vec/is/isimpl.h"    /*I "petscis.h"  I*/
4 
5 EXTERN PetscErrorCode VecInitializePackage(char *);
6 PetscCookie IS_LTOGM_COOKIE = -1;
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "ISLocalToGlobalMappingGetSize"
10 /*@C
11     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
12 
13     Not Collective
14 
15     Input Parameter:
16 .   ltog - local to global mapping
17 
18     Output Parameter:
19 .   n - the number of entries in the local mapping
20 
21     Level: advanced
22 
23     Concepts: mapping^local to global
24 
25 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
26 @*/
27 PetscErrorCode ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
28 {
29   PetscFunctionBegin;
30   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
31   PetscValidIntPointer(n,2);
32   *n = mapping->n;
33   PetscFunctionReturn(0);
34 }
35 
36 #undef __FUNCT__
37 #define __FUNCT__ "ISLocalToGlobalMappingView"
38 /*@C
39     ISLocalToGlobalMappingView - View a local to global mapping
40 
41     Not Collective
42 
43     Input Parameters:
44 +   ltog - local to global mapping
45 -   viewer - viewer
46 
47     Level: advanced
48 
49     Concepts: mapping^local to global
50 
51 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
52 @*/
53 PetscErrorCode ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
54 {
55   PetscInt        i;
56   PetscMPIInt     rank;
57   PetscTruth      iascii;
58   PetscErrorCode  ierr;
59 
60   PetscFunctionBegin;
61   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE,1);
62   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);
63   PetscValidHeaderSpecific(viewer,PETSC_VIEWER_COOKIE,2);
64 
65   ierr = MPI_Comm_rank(mapping->comm,&rank);CHKERRQ(ierr);
66   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
67   if (iascii) {
68     for (i=0; i<mapping->n; i++) {
69       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
70     }
71     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
72   } else {
73     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
74   }
75 
76   PetscFunctionReturn(0);
77 }
78 
79 #undef __FUNCT__
80 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS"
81 /*@C
82     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
83     ordering and a global parallel ordering.
84 
85     Not collective
86 
87     Input Parameter:
88 .   is - index set containing the global numbers for each local
89 
90     Output Parameter:
91 .   mapping - new mapping data structure
92 
93     Level: advanced
94 
95     Concepts: mapping^local to global
96 
97 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
98 @*/
99 PetscErrorCode ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
100 {
101   PetscErrorCode ierr;
102   PetscInt      n,*indices;
103   MPI_Comm comm;
104 
105   PetscFunctionBegin;
106   PetscValidHeaderSpecific(is,IS_COOKIE,1);
107   PetscValidPointer(mapping,2);
108 
109   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
110   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
111   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
112   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr);
113   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
114 
115   PetscFunctionReturn(0);
116 }
117 
118 
119 #undef __FUNCT__
120 #define __FUNCT__ "ISLocalToGlobalMappingCreate"
121 /*@C
122     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
123     ordering and a global parallel ordering.
124 
125     Not Collective, but communicator may have more than one process
126 
127     Input Parameters:
128 +   comm - MPI communicator
129 .   n - the number of local elements
130 -   indices - the global index for each local element
131 
132     Output Parameter:
133 .   mapping - new mapping data structure
134 
135     Level: advanced
136 
137     Concepts: mapping^local to global
138 
139 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
140 @*/
141 PetscErrorCode ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
142 {
143   PetscErrorCode ierr;
144   PetscInt *in;
145 
146   PetscFunctionBegin;
147   PetscValidIntPointer(indices,3);
148   PetscValidPointer(mapping,4);
149   ierr = PetscMalloc((n+1)*sizeof(PetscInt),&in);CHKERRQ(ierr);
150   ierr = PetscMemcpy(in,indices,n*sizeof(PetscInt));CHKERRQ(ierr);
151   ierr = ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);CHKERRQ(ierr);
152   PetscFunctionReturn(0);
153 }
154 
155 #undef __FUNCT__
156 #define __FUNCT__ "ISLocalToGlobalMappingCreateNC"
157 /*@C
158     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
159     ordering and a global parallel ordering.
160 
161     Not Collective, but communicator may have more than one process
162 
163     Input Parameters:
164 +   comm - MPI communicator
165 .   n - the number of local elements
166 -   indices - the global index for each local element
167 
168     Output Parameter:
169 .   mapping - new mapping data structure
170 
171     Level: developer
172 
173     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
174     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.
175 
176     Concepts: mapping^local to global
177 
178 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
179 @*/
180 PetscErrorCode ISLocalToGlobalMappingCreateNC(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
181 {
182   PetscErrorCode ierr;
183 
184   PetscFunctionBegin;
185   PetscValidIntPointer(indices,3);
186   PetscValidPointer(mapping,4);
187   *mapping = PETSC_NULL;
188 #ifndef PETSC_USE_DYNAMIC_LIBRARIES
189   ierr = VecInitializePackage(PETSC_NULL);CHKERRQ(ierr);
190 #endif
191   if (IS_LTOGM_COOKIE == -1) {
192     ierr = PetscLogClassRegister(&IS_LTOGM_COOKIE,"IS Local to global mapping");CHKERRQ(ierr);
193   }
194 
195   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
196                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
197   PetscLogObjectCreate(*mapping);
198   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(PetscInt));
199 
200   (*mapping)->n       = n;
201   (*mapping)->indices = (PetscInt*)indices;
202 
203   /*
204       Do not create the global to local mapping. This is only created if
205      ISGlobalToLocalMapping() is called
206   */
207   (*mapping)->globals = 0;
208   PetscFunctionReturn(0);
209 }
210 
211 #undef __FUNCT__
212 #define __FUNCT__ "ISLocalToGlobalMappingBlock"
213 /*@C
214     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
215        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
216        and VecSetLocalToGlobalMappingBlock().
217 
218     Not Collective, but communicator may have more than one process
219 
220     Input Parameters:
221 +    inmap - original point-wise mapping
222 -    bs - block size
223 
224     Output Parameter:
225 .   outmap - block based mapping
226 
227     Level: advanced
228 
229     Concepts: mapping^local to global
230 
231 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
232 @*/
233 PetscErrorCode ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
234 {
235   PetscErrorCode ierr;
236   PetscInt            *ii,i,n;
237 
238   PetscFunctionBegin;
239   if (bs > 1) {
240     n    = inmap->n/bs;
241     ierr = PetscMalloc(n*sizeof(PetscInt),&ii);CHKERRQ(ierr);
242     for (i=0; i<n; i++) {
243       ii[i] = inmap->indices[bs*i]/bs;
244     }
245     ierr = ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);CHKERRQ(ierr);
246     ierr = PetscFree(ii);CHKERRQ(ierr);
247   } else {
248     *outmap = inmap;
249     ierr    = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr);
250   }
251   PetscFunctionReturn(0);
252 }
253 
254 #undef __FUNCT__
255 #define __FUNCT__ "ISLocalToGlobalMappingDestroy"
256 /*@
257    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
258    ordering and a global parallel ordering.
259 
260    Note Collective
261 
262    Input Parameters:
263 .  mapping - mapping data structure
264 
265    Level: advanced
266 
267 .seealso: ISLocalToGlobalMappingCreate()
268 @*/
269 PetscErrorCode ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
270 {
271   PetscErrorCode ierr;
272   PetscFunctionBegin;
273   PetscValidPointer(mapping,1);
274   if (--mapping->refct > 0) PetscFunctionReturn(0);
275   if (mapping->refct < 0) {
276     SETERRQ(PETSC_ERR_PLIB,"Mapping already destroyed");
277   }
278 
279   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
280   if (mapping->globals) {ierr = PetscFree(mapping->globals);CHKERRQ(ierr);}
281   PetscLogObjectDestroy(mapping);
282   PetscHeaderDestroy(mapping);
283   PetscFunctionReturn(0);
284 }
285 
286 #undef __FUNCT__
287 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS"
288 /*@
289     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
290     a new index set using the global numbering defined in an ISLocalToGlobalMapping
291     context.
292 
293     Not collective
294 
295     Input Parameters:
296 +   mapping - mapping between local and global numbering
297 -   is - index set in local numbering
298 
299     Output Parameters:
300 .   newis - index set in global numbering
301 
302     Level: advanced
303 
304     Concepts: mapping^local to global
305 
306 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
307           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
308 @*/
309 PetscErrorCode ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
310 {
311   PetscErrorCode ierr;
312   PetscInt            n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;
313 
314   PetscFunctionBegin;
315   PetscValidPointer(mapping,1);
316   PetscValidHeaderSpecific(is,IS_COOKIE,2);
317   PetscValidPointer(newis,3);
318 
319   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
320   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
321   idxmap = mapping->indices;
322 
323   ierr = PetscMalloc((n+1)*sizeof(PetscInt),&idxout);CHKERRQ(ierr);
324   for (i=0; i<n; i++) {
325     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
326     idxout[i] = idxmap[idxin[i]];
327   }
328   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
329   ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr);
330   ierr = PetscFree(idxout);CHKERRQ(ierr);
331   PetscFunctionReturn(0);
332 }
333 
334 /*MC
335    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
336    and converts them to the global numbering.
337 
338    Not collective
339 
340    Input Parameters:
341 +  mapping - the local to global mapping context
342 .  N - number of integers
343 -  in - input indices in local numbering
344 
345    Output Parameter:
346 .  out - indices in global numbering
347 
348    Synopsis:
349    int ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
350 
351    Notes:
352    The in and out array parameters may be identical.
353 
354    Level: advanced
355 
356 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
357           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
358           AOPetscToApplication(), ISGlobalToLocalMappingApply()
359 
360     Concepts: mapping^local to global
361 
362 M*/
363 
364 /* -----------------------------------------------------------------------------------------*/
365 
366 #undef __FUNCT__
367 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private"
368 /*
369     Creates the global fields in the ISLocalToGlobalMapping structure
370 */
371 static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
372 {
373   PetscErrorCode ierr;
374   PetscInt            i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
375 
376   PetscFunctionBegin;
377   end   = 0;
378   start = 100000000;
379 
380   for (i=0; i<n; i++) {
381     if (idx[i] < 0) continue;
382     if (idx[i] < start) start = idx[i];
383     if (idx[i] > end)   end   = idx[i];
384   }
385   if (start > end) {start = 0; end = -1;}
386   mapping->globalstart = start;
387   mapping->globalend   = end;
388 
389   ierr             = PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);CHKERRQ(ierr);
390   mapping->globals = globals;
391   for (i=0; i<end-start+1; i++) {
392     globals[i] = -1;
393   }
394   for (i=0; i<n; i++) {
395     if (idx[i] < 0) continue;
396     globals[idx[i] - start] = i;
397   }
398 
399   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));
400   PetscFunctionReturn(0);
401 }
402 
403 #undef __FUNCT__
404 #define __FUNCT__ "ISGlobalToLocalMappingApply"
405 /*@
406     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
407     specified with a global numbering.
408 
409     Not collective
410 
411     Input Parameters:
412 +   mapping - mapping between local and global numbering
413 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
414            IS_GTOLM_DROP - drops the indices with no local value from the output list
415 .   n - number of global indices to map
416 -   idx - global indices to map
417 
418     Output Parameters:
419 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
420 -   idxout - local index of each global index, one must pass in an array long enough
421              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
422              idxout == PETSC_NULL to determine the required length (returned in nout)
423              and then allocate the required space and call ISGlobalToLocalMappingApply()
424              a second time to set the values.
425 
426     Notes:
427     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
428 
429     This is not scalable in memory usage. Each processor requires O(Nglobal) size
430     array to compute these.
431 
432     Level: advanced
433 
434     Concepts: mapping^global to local
435 
436 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
437           ISLocalToGlobalMappingDestroy()
438 @*/
439 PetscErrorCode ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
440                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
441 {
442   PetscInt i,*globals,nf = 0,tmp,start,end;
443   PetscErrorCode ierr;
444 
445   PetscFunctionBegin;
446   if (!mapping->globals) {
447     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
448   }
449   globals = mapping->globals;
450   start   = mapping->globalstart;
451   end     = mapping->globalend;
452 
453   if (type == IS_GTOLM_MASK) {
454     if (idxout) {
455       for (i=0; i<n; i++) {
456         if (idx[i] < 0) idxout[i] = idx[i];
457         else if (idx[i] < start) idxout[i] = -1;
458         else if (idx[i] > end)   idxout[i] = -1;
459         else                     idxout[i] = globals[idx[i] - start];
460       }
461     }
462     if (nout) *nout = n;
463   } else {
464     if (idxout) {
465       for (i=0; i<n; i++) {
466         if (idx[i] < 0) continue;
467         if (idx[i] < start) continue;
468         if (idx[i] > end) continue;
469         tmp = globals[idx[i] - start];
470         if (tmp < 0) continue;
471         idxout[nf++] = tmp;
472       }
473     } else {
474       for (i=0; i<n; i++) {
475         if (idx[i] < 0) continue;
476         if (idx[i] < start) continue;
477         if (idx[i] > end) continue;
478         tmp = globals[idx[i] - start];
479         if (tmp < 0) continue;
480         nf++;
481       }
482     }
483     if (nout) *nout = nf;
484   }
485 
486   PetscFunctionReturn(0);
487 }
488 
489 #undef __FUNCT__
490 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo"
491 /*@C
492     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
493      each index shared by more than one processor
494 
495     Collective on ISLocalToGlobalMapping
496 
497     Input Parameters:
498 .   mapping - the mapping from local to global indexing
499 
500     Output Parameter:
501 +   nproc - number of processors that are connected to this one
502 .   proc - neighboring processors
503 .   numproc - number of indices for each subdomain (processor)
504 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
505 
506     Level: advanced
507 
508     Concepts: mapping^local to global
509 
510 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
511           ISLocalToGlobalMappingRestoreInfo()
512 @*/
513 PetscErrorCode ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
514 {
515   PetscErrorCode ierr;
516   PetscMPIInt    size,rank,tag1,tag2,tag3;
517   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
518   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
519   PetscInt       cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned;
520   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
521   PetscInt       first_procs,first_numprocs,*first_indices;
522   MPI_Request    *recv_waits,*send_waits;
523   MPI_Status     recv_status,*send_status,*recv_statuses;
524   MPI_Comm       comm = mapping->comm;
525   PetscTruth     debug = PETSC_FALSE;
526 
527   PetscFunctionBegin;
528   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
529   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
530   if (size == 1) {
531     *nproc         = 0;
532     *procs         = PETSC_NULL;
533     ierr           = PetscMalloc(sizeof(PetscInt),numprocs);CHKERRQ(ierr);
534     (*numprocs)[0] = 0;
535     ierr           = PetscMalloc(sizeof(PetscInt*),indices);CHKERRQ(ierr);
536     (*indices)[0]  = PETSC_NULL;
537     PetscFunctionReturn(0);
538   }
539 
540   ierr = PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);CHKERRQ(ierr);
541 
542   /*
543     Notes on ISLocalToGlobalMappingGetInfo
544 
545     globally owned node - the nodes that have been assigned to this processor in global
546            numbering, just for this routine.
547 
548     nontrivial globally owned node - node assigned to this processor that is on a subdomain
549            boundary (i.e. is has more than one local owner)
550 
551     locally owned node - node that exists on this processors subdomain
552 
553     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
554            local subdomain
555   */
556   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
557   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
558   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
559 
560   for (i=0; i<n; i++) {
561     if (lindices[i] > max) max = lindices[i];
562   }
563   ierr   = MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
564   Ng++;
565   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
566   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
567   scale  = Ng/size + 1;
568   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
569   rstart = scale*rank;
570 
571   /* determine ownership ranges of global indices */
572   ierr = PetscMalloc((2*size+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
573   ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
574 
575   /* determine owners of each local node  */
576   ierr = PetscMalloc((n+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr);
577   for (i=0; i<n; i++) {
578     proc             = lindices[i]/scale; /* processor that globally owns this index */
579     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
580     owner[i]         = proc;
581     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
582   }
583   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
584   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends);
585 
586   /* inform other processors of number of messages and max length*/
587   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
588   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs);
589 
590   /* post receives for owned rows */
591   ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);CHKERRQ(ierr);
592   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
593   for (i=0; i<nrecvs; i++) {
594     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
595   }
596 
597   /* pack messages containing lists of local nodes to owners */
598   ierr       = PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);CHKERRQ(ierr);
599   ierr       = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
600   starts[0]  = 0;
601   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
602   for (i=0; i<n; i++) {
603     sends[starts[owner[i]]++] = lindices[i];
604     sends[starts[owner[i]]++] = i;
605   }
606   ierr = PetscFree(owner);CHKERRQ(ierr);
607   starts[0]  = 0;
608   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
609 
610   /* send the messages */
611   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
612   ierr = PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);CHKERRQ(ierr);
613   cnt = 0;
614   for (i=0; i<size; i++) {
615     if (nprocs[2*i]) {
616       ierr      = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
617       dest[cnt] = i;
618       cnt++;
619     }
620   }
621   ierr = PetscFree(starts);CHKERRQ(ierr);
622 
623   /* wait on receives */
624   ierr = PetscMalloc((2*nrecvs+1)*sizeof(PetscInt),&source);CHKERRQ(ierr);
625   len  = source + nrecvs;
626   cnt  = nrecvs;
627   ierr = PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);CHKERRQ(ierr);
628   ierr = PetscMemzero(nownedsenders,ng*sizeof(PetscInt));CHKERRQ(ierr);
629   while (cnt) {
630     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
631     /* unpack receives into our local space */
632     ierr           = MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);CHKERRQ(ierr);
633     source[imdex]  = recv_status.MPI_SOURCE;
634     len[imdex]     = len[imdex]/2;
635     /* count how many local owners for each of my global owned indices */
636     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
637     cnt--;
638   }
639   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
640 
641   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
642   nowned  = 0;
643   nownedm = 0;
644   for (i=0; i<ng; i++) {
645     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
646   }
647 
648   /* create single array to contain rank of all local owners of each globally owned index */
649   ierr      = PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);CHKERRQ(ierr);
650   ierr      = PetscMalloc((ng+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
651   starts[0] = 0;
652   for (i=1; i<ng; i++) {
653     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
654     else starts[i] = starts[i-1];
655   }
656 
657   /* for each nontrival globally owned node list all arriving processors */
658   for (i=0; i<nrecvs; i++) {
659     for (j=0; j<len[i]; j++) {
660       node = recvs[2*i*nmax+2*j]-rstart;
661       if (nownedsenders[node] > 1) {
662         ownedsenders[starts[node]++] = source[i];
663       }
664     }
665   }
666 
667   if (debug) { /* -----------------------------------  */
668     starts[0]    = 0;
669     for (i=1; i<ng; i++) {
670       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
671       else starts[i] = starts[i-1];
672     }
673     for (i=0; i<ng; i++) {
674       if (nownedsenders[i] > 1) {
675         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
676         for (j=0; j<nownedsenders[i]; j++) {
677           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
678         }
679         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
680       }
681     }
682     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
683   }/* -----------------------------------  */
684 
685   /* wait on original sends */
686   if (nsends) {
687     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
688     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
689     ierr = PetscFree(send_status);CHKERRQ(ierr);
690   }
691   ierr = PetscFree(send_waits);CHKERRQ(ierr);
692   ierr = PetscFree(sends);CHKERRQ(ierr);
693   ierr = PetscFree(nprocs);CHKERRQ(ierr);
694 
695   /* pack messages to send back to local owners */
696   starts[0]    = 0;
697   for (i=1; i<ng; i++) {
698     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
699     else starts[i] = starts[i-1];
700   }
701   nsends2 = nrecvs;
702   ierr    = PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); /* length of each message */
703   for (i=0; i<nrecvs; i++) {
704     nprocs[i] = 1;
705     for (j=0; j<len[i]; j++) {
706       node = recvs[2*i*nmax+2*j]-rstart;
707       if (nownedsenders[node] > 1) {
708         nprocs[i] += 2 + nownedsenders[node];
709       }
710     }
711   }
712   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
713   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);CHKERRQ(ierr);
714   ierr = PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);CHKERRQ(ierr);
715   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
716   /*
717      Each message is 1 + nprocs[i] long, and consists of
718        (0) the number of nodes being sent back
719        (1) the local node number,
720        (2) the number of processors sharing it,
721        (3) the processors sharing it
722   */
723   for (i=0; i<nsends2; i++) {
724     cnt = 1;
725     sends2[starts2[i]] = 0;
726     for (j=0; j<len[i]; j++) {
727       node = recvs[2*i*nmax+2*j]-rstart;
728       if (nownedsenders[node] > 1) {
729         sends2[starts2[i]]++;
730         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
731         sends2[starts2[i]+cnt++] = nownedsenders[node];
732         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));CHKERRQ(ierr);
733         cnt += nownedsenders[node];
734       }
735     }
736   }
737 
738   /* send the message lengths */
739   for (i=0; i<nsends2; i++) {
740     ierr = MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);CHKERRQ(ierr);
741   }
742 
743   /* receive the message lengths */
744   nrecvs2 = nsends;
745   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);CHKERRQ(ierr);
746   ierr = PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);CHKERRQ(ierr);
747   nt      = 0;
748   for (i=0; i<nrecvs2; i++) {
749     ierr =  MPI_Recv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,&recv_status);CHKERRQ(ierr);
750     nt   += lens2[i];
751   }
752   starts3[0] = 0;
753   for (i=0; i<nrecvs2-1; i++) {
754     starts3[i+1] = starts3[i] + lens2[i];
755   }
756   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);CHKERRQ(ierr);
757   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
758   for (i=0; i<nrecvs2; i++) {
759     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
760   }
761 
762   /* send the messages */
763   ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
764   for (i=0; i<nsends2; i++) {
765     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
766   }
767 
768   /* wait on receives */
769   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
770   ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
771   ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
772   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
773   ierr = PetscFree(nprocs);CHKERRQ(ierr);
774 
775   if (debug) { /* -----------------------------------  */
776     cnt = 0;
777     for (i=0; i<nrecvs2; i++) {
778       nt = recvs2[cnt++];
779       for (j=0; j<nt; j++) {
780         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
781         for (k=0; k<recvs2[cnt+1]; k++) {
782           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
783         }
784         cnt += 2 + recvs2[cnt+1];
785         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
786       }
787     }
788     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
789   } /* -----------------------------------  */
790 
791   /* count number subdomains for each local node */
792   ierr = PetscMalloc(size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
793   ierr = PetscMemzero(nprocs,size*sizeof(PetscInt));CHKERRQ(ierr);
794   cnt  = 0;
795   for (i=0; i<nrecvs2; i++) {
796     nt = recvs2[cnt++];
797     for (j=0; j<nt; j++) {
798       for (k=0; k<recvs2[cnt+1]; k++) {
799         nprocs[recvs2[cnt+2+k]]++;
800       }
801       cnt += 2 + recvs2[cnt+1];
802     }
803   }
804   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
805   *nproc    = nt;
806   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),procs);CHKERRQ(ierr);
807   ierr = PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);CHKERRQ(ierr);
808   ierr = PetscMalloc((nt+1)*sizeof(PetscInt*),indices);CHKERRQ(ierr);
809   ierr = PetscMalloc(size*sizeof(PetscInt),&bprocs);CHKERRQ(ierr);
810   cnt       = 0;
811   for (i=0; i<size; i++) {
812     if (nprocs[i] > 0) {
813       bprocs[i]        = cnt;
814       (*procs)[cnt]    = i;
815       (*numprocs)[cnt] = nprocs[i];
816       ierr             = PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);CHKERRQ(ierr);
817       cnt++;
818     }
819   }
820 
821   /* make the list of subdomains for each nontrivial local node */
822   ierr = PetscMemzero(*numprocs,nt*sizeof(PetscInt));CHKERRQ(ierr);
823   cnt  = 0;
824   for (i=0; i<nrecvs2; i++) {
825     nt = recvs2[cnt++];
826     for (j=0; j<nt; j++) {
827       for (k=0; k<recvs2[cnt+1]; k++) {
828         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
829       }
830       cnt += 2 + recvs2[cnt+1];
831     }
832   }
833   ierr = PetscFree(bprocs);CHKERRQ(ierr);
834   ierr = PetscFree(recvs2);CHKERRQ(ierr);
835 
836   /* sort the node indexing by their global numbers */
837   nt = *nproc;
838   for (i=0; i<nt; i++) {
839     ierr = PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);CHKERRQ(ierr);
840     for (j=0; j<(*numprocs)[i]; j++) {
841       tmp[j] = lindices[(*indices)[i][j]];
842     }
843     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
844     ierr = PetscFree(tmp);CHKERRQ(ierr);
845   }
846 
847   if (debug) { /* -----------------------------------  */
848     nt = *nproc;
849     for (i=0; i<nt; i++) {
850       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
851       for (j=0; j<(*numprocs)[i]; j++) {
852         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
853       }
854       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
855     }
856     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
857   } /* -----------------------------------  */
858 
859   /* wait on sends */
860   if (nsends2) {
861     ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
862     ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
863     ierr = PetscFree(send_status);CHKERRQ(ierr);
864   }
865 
866   ierr = PetscFree(starts3);CHKERRQ(ierr);
867   ierr = PetscFree(dest);CHKERRQ(ierr);
868   ierr = PetscFree(send_waits);CHKERRQ(ierr);
869 
870   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
871   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
872   ierr = PetscFree(starts);CHKERRQ(ierr);
873   ierr = PetscFree(starts2);CHKERRQ(ierr);
874   ierr = PetscFree(lens2);CHKERRQ(ierr);
875 
876   ierr = PetscFree(source);CHKERRQ(ierr);
877   ierr = PetscFree(recvs);CHKERRQ(ierr);
878   ierr = PetscFree(nprocs);CHKERRQ(ierr);
879   ierr = PetscFree(sends2);CHKERRQ(ierr);
880 
881   /* put the information about myself as the first entry in the list */
882   first_procs    = (*procs)[0];
883   first_numprocs = (*numprocs)[0];
884   first_indices  = (*indices)[0];
885   for (i=0; i<*nproc; i++) {
886     if ((*procs)[i] == rank) {
887       (*procs)[0]    = (*procs)[i];
888       (*numprocs)[0] = (*numprocs)[i];
889       (*indices)[0]  = (*indices)[i];
890       (*procs)[i]    = first_procs;
891       (*numprocs)[i] = first_numprocs;
892       (*indices)[i]  = first_indices;
893       break;
894     }
895   }
896 
897   PetscFunctionReturn(0);
898 }
899 
900 #undef __FUNCT__
901 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo"
902 /*@C
903     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
904 
905     Collective on ISLocalToGlobalMapping
906 
907     Input Parameters:
908 .   mapping - the mapping from local to global indexing
909 
910     Output Parameter:
911 +   nproc - number of processors that are connected to this one
912 .   proc - neighboring processors
913 .   numproc - number of indices for each processor
914 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
915 
916     Level: advanced
917 
918 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
919           ISLocalToGlobalMappingGetInfo()
920 @*/
921 PetscErrorCode ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
922 {
923   PetscErrorCode ierr;
924   PetscInt i;
925 
926   PetscFunctionBegin;
927   if (*procs) {ierr = PetscFree(*procs);CHKERRQ(ierr);}
928   if (*numprocs) {ierr = PetscFree(*numprocs);CHKERRQ(ierr);}
929   if (*indices) {
930     if ((*indices)[0]) {ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);}
931     for (i=1; i<*nproc; i++) {
932       if ((*indices)[i]) {ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);}
933     }
934     ierr = PetscFree(*indices);CHKERRQ(ierr);
935   }
936   PetscFunctionReturn(0);
937 }
938 
939 
940 
941 
942 
943 
944 
945 
946 
947 
948 
949 
950 
951 
952 
953 
954 
955 
956