xref: /petsc/src/vec/is/utils/isltog.c (revision f6cc79bd0334691d264e95eacfaee85c3da0d04a)
1 /*$Id: isltog.c,v 1.65 2001/05/21 14:16:29 bsmith Exp $*/
2 
3 #include "petscsys.h"   /*I "petscsys.h" I*/
4 #include "src/vec/is/isimpl.h"    /*I "petscis.h"  I*/
5 
6 EXTERN int VecInitializePackage(char *);
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "ISLocalToGlobalMappingGetSize"
10 /*@C
11     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.
12 
13     Not Collective
14 
15     Input Parameter:
16 .   ltog - local to global mapping
17 
18     Output Parameter:
19 .   n - the number of entries in the local mapping
20 
21     Level: advanced
22 
23     Concepts: mapping^local to global
24 
25 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
26 @*/
27 int ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,int *n)
28 {
29   PetscFunctionBegin;
30   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE);
31   *n = mapping->n;
32   PetscFunctionReturn(0);
33 }
34 
35 #undef __FUNCT__
36 #define __FUNCT__ "ISLocalToGlobalMappingView"
37 /*@C
38     ISLocalToGlobalMappingView - View a local to global mapping
39 
40     Not Collective
41 
42     Input Parameters:
43 +   ltog - local to global mapping
44 -   viewer - viewer
45 
46     Level: advanced
47 
48     Concepts: mapping^local to global
49 
50 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
51 @*/
52 int ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
53 {
54   int        i,ierr,rank;
55   PetscTruth isascii;
56 
57   PetscFunctionBegin;
58   PetscValidHeaderSpecific(mapping,IS_LTOGM_COOKIE);
59   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);
60   PetscValidHeaderSpecific(viewer,PETSC_VIEWER_COOKIE);
61 
62   ierr = MPI_Comm_rank(mapping->comm,&rank);CHKERRQ(ierr);
63   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
64   if (isascii) {
65     for (i=0; i<mapping->n; i++) {
66       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);CHKERRQ(ierr);
67     }
68     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
69   } else {
70     SETERRQ1(1,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
71   }
72 
73   PetscFunctionReturn(0);
74 }
75 
76 #undef __FUNCT__
77 #define __FUNCT__ "ISLocalToGlobalMappingCreateIS"
78 /*@C
79     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
80     ordering and a global parallel ordering.
81 
82     Not collective
83 
84     Input Parameter:
85 .   is - index set containing the global numbers for each local
86 
87     Output Parameter:
88 .   mapping - new mapping data structure
89 
90     Level: advanced
91 
92     Concepts: mapping^local to global
93 
94 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
95 @*/
96 int ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
97 {
98   int      n,*indices,ierr;
99   MPI_Comm comm;
100 
101   PetscFunctionBegin;
102   PetscValidHeaderSpecific(is,IS_COOKIE);
103 
104   ierr = PetscObjectGetComm((PetscObject)is,&comm);CHKERRQ(ierr);
105   ierr = ISGetLocalSize(is,&n);CHKERRQ(ierr);
106   ierr = ISGetIndices(is,&indices);CHKERRQ(ierr);
107   ierr = ISLocalToGlobalMappingCreate(comm,n,indices,mapping);CHKERRQ(ierr);
108   ierr = ISRestoreIndices(is,&indices);CHKERRQ(ierr);
109 
110   PetscFunctionReturn(0);
111 }
112 
113 #undef __FUNCT__
114 #define __FUNCT__ "ISLocalToGlobalMappingCreate"
115 /*@C
116     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
117     ordering and a global parallel ordering.
118 
119     Not Collective, but communicator may have more than one process
120 
121     Input Parameters:
122 +   comm - MPI communicator
123 .   n - the number of local elements
124 -   indices - the global index for each local element
125 
126     Output Parameter:
127 .   mapping - new mapping data structure
128 
129     Level: advanced
130 
131     Concepts: mapping^local to global
132 
133 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS()
134 @*/
135 int ISLocalToGlobalMappingCreate(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
136 {
137   int ierr;
138 
139   PetscFunctionBegin;
140   PetscValidIntPointer(indices);
141   PetscValidPointer(mapping);
142   *mapping = PETSC_NULL;
143 #ifndef PETSC_USE_DYNAMIC_LIBRARIES
144   ierr = VecInitializePackage(PETSC_NULL);                                                                CHKERRQ(ierr);
145 #endif
146 
147   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
148                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
149   PetscLogObjectCreate(*mapping);
150   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(int));
151 
152   (*mapping)->n       = n;
153   ierr = PetscMalloc((n+1)*sizeof(int),&(*mapping)->indices);CHKERRQ(ierr);
154   ierr = PetscMemcpy((*mapping)->indices,indices,n*sizeof(int));CHKERRQ(ierr);
155 
156   /*
157       Do not create the global to local mapping. This is only created if
158      ISGlobalToLocalMapping() is called
159   */
160   (*mapping)->globals = 0;
161   PetscFunctionReturn(0);
162 }
163 
164 #undef __FUNCT__
165 #define __FUNCT__ "ISLocalToGlobalMappingBlock"
166 /*@C
167     ISLocalToGlobalMappingBlock - Creates a blocked index version of an
168        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
169        and VecSetLocalToGlobalMappingBlock().
170 
171     Not Collective, but communicator may have more than one process
172 
173     Input Parameters:
174 +    inmap - original point-wise mapping
175 -    bs - block size
176 
177     Output Parameter:
178 .   outmap - block based mapping
179 
180     Level: advanced
181 
182     Concepts: mapping^local to global
183 
184 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
185 @*/
186 int ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,int bs,ISLocalToGlobalMapping *outmap)
187 {
188   int ierr,*ii,i,n;
189 
190   PetscFunctionBegin;
191 
192   if (bs > 1) {
193     n    = inmap->n/bs;
194     ierr = PetscMalloc(n*sizeof(int),&ii);CHKERRQ(ierr);
195     for (i=0; i<n; i++) {
196       ii[i] = inmap->indices[bs*i]/bs;
197     }
198     ierr = ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);CHKERRQ(ierr);
199     ierr = PetscFree(ii);CHKERRQ(ierr);
200   } else {
201     *outmap = inmap;
202     ierr    = PetscObjectReference((PetscObject)inmap);CHKERRQ(ierr);
203   }
204   PetscFunctionReturn(0);
205 }
206 
207 #undef __FUNCT__
208 #define __FUNCT__ "ISLocalToGlobalMappingDestroy"
209 /*@
210    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
211    ordering and a global parallel ordering.
212 
213    Note Collective
214 
215    Input Parameters:
216 .  mapping - mapping data structure
217 
218    Level: advanced
219 
220 .seealso: ISLocalToGlobalMappingCreate()
221 @*/
222 int ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
223 {
224   int ierr;
225   PetscFunctionBegin;
226   PetscValidPointer(mapping);
227   if (--mapping->refct > 0) PetscFunctionReturn(0);
228   if (mapping->refct < 0) {
229     SETERRQ(1,"Mapping already destroyed");
230   }
231 
232   ierr = PetscFree(mapping->indices);CHKERRQ(ierr);
233   if (mapping->globals) {ierr = PetscFree(mapping->globals);CHKERRQ(ierr);}
234   PetscLogObjectDestroy(mapping);
235   PetscHeaderDestroy(mapping);
236   PetscFunctionReturn(0);
237 }
238 
239 #undef __FUNCT__
240 #define __FUNCT__ "ISLocalToGlobalMappingApplyIS"
241 /*@
242     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
243     a new index set using the global numbering defined in an ISLocalToGlobalMapping
244     context.
245 
246     Not collective
247 
248     Input Parameters:
249 +   mapping - mapping between local and global numbering
250 -   is - index set in local numbering
251 
252     Output Parameters:
253 .   newis - index set in global numbering
254 
255     Level: advanced
256 
257     Concepts: mapping^local to global
258 
259 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
260           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
261 @*/
262 int ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
263 {
264   int ierr,n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;
265 
266   PetscFunctionBegin;
267   PetscValidPointer(mapping);
268   PetscValidHeaderSpecific(is,IS_COOKIE);
269   PetscValidPointer(newis);
270 
271   ierr   = ISGetLocalSize(is,&n);CHKERRQ(ierr);
272   ierr   = ISGetIndices(is,&idxin);CHKERRQ(ierr);
273   idxmap = mapping->indices;
274 
275   ierr = PetscMalloc((n+1)*sizeof(int),&idxout);CHKERRQ(ierr);
276   for (i=0; i<n; i++) {
277     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax,i);
278     idxout[i] = idxmap[idxin[i]];
279   }
280   ierr = ISRestoreIndices(is,&idxin);CHKERRQ(ierr);
281   ierr = ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);CHKERRQ(ierr);
282   ierr = PetscFree(idxout);CHKERRQ(ierr);
283   PetscFunctionReturn(0);
284 }
285 
286 /*MC
287    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
288    and converts them to the global numbering.
289 
290    Not collective
291 
292    Input Parameters:
293 +  mapping - the local to global mapping context
294 .  N - number of integers
295 -  in - input indices in local numbering
296 
297    Output Parameter:
298 .  out - indices in global numbering
299 
300    Synopsis:
301    int ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])
302 
303    Notes:
304    The in and out array parameters may be identical.
305 
306    Level: advanced
307 
308 .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(),
309           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
310           AOPetscToApplication(), ISGlobalToLocalMappingApply()
311 
312     Concepts: mapping^local to global
313 
314 M*/
315 
316 /* -----------------------------------------------------------------------------------------*/
317 
318 #undef __FUNCT__
319 #define __FUNCT__ "ISGlobalToLocalMappingSetUp_Private"
320 /*
321     Creates the global fields in the ISLocalToGlobalMapping structure
322 */
323 static int ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
324 {
325   int ierr,i,*idx = mapping->indices,n = mapping->n,end,start,*globals;
326 
327   PetscFunctionBegin;
328   end   = 0;
329   start = 100000000;
330 
331   for (i=0; i<n; i++) {
332     if (idx[i] < 0) continue;
333     if (idx[i] < start) start = idx[i];
334     if (idx[i] > end)   end   = idx[i];
335   }
336   if (start > end) {start = 0; end = -1;}
337   mapping->globalstart = start;
338   mapping->globalend   = end;
339 
340   ierr             = PetscMalloc((end-start+2)*sizeof(int),&globals);CHKERRQ(ierr);
341   mapping->globals = globals;
342   for (i=0; i<end-start+1; i++) {
343     globals[i] = -1;
344   }
345   for (i=0; i<n; i++) {
346     if (idx[i] < 0) continue;
347     globals[idx[i] - start] = i;
348   }
349 
350   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(int));
351   PetscFunctionReturn(0);
352 }
353 
354 #undef __FUNCT__
355 #define __FUNCT__ "ISGlobalToLocalMappingApply"
356 /*@
357     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
358     specified with a global numbering.
359 
360     Not collective
361 
362     Input Parameters:
363 +   mapping - mapping between local and global numbering
364 .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
365            IS_GTOLM_DROP - drops the indices with no local value from the output list
366 .   n - number of global indices to map
367 -   idx - global indices to map
368 
369     Output Parameters:
370 +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
371 -   idxout - local index of each global index, one must pass in an array long enough
372              to hold all the indices. You can call ISGlobalToLocalMappingApply() with
373              idxout == PETSC_NULL to determine the required length (returned in nout)
374              and then allocate the required space and call ISGlobalToLocalMappingApply()
375              a second time to set the values.
376 
377     Notes:
378     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.
379 
380     This is not scalable in memory usage. Each processor requires O(Nglobal) size
381     array to compute these.
382 
383     Level: advanced
384 
385     Concepts: mapping^global to local
386 
387 .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
388           ISLocalToGlobalMappingDestroy()
389 @*/
390 int ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
391                                   int n,const int idx[],int *nout,int idxout[])
392 {
393   int i,ierr,*globals,nf = 0,tmp,start,end;
394 
395   PetscFunctionBegin;
396   if (!mapping->globals) {
397     ierr = ISGlobalToLocalMappingSetUp_Private(mapping);CHKERRQ(ierr);
398   }
399   globals = mapping->globals;
400   start   = mapping->globalstart;
401   end     = mapping->globalend;
402 
403   if (type == IS_GTOLM_MASK) {
404     if (idxout) {
405       for (i=0; i<n; i++) {
406         if (idx[i] < 0) idxout[i] = idx[i];
407         else if (idx[i] < start) idxout[i] = -1;
408         else if (idx[i] > end)   idxout[i] = -1;
409         else                     idxout[i] = globals[idx[i] - start];
410       }
411     }
412     if (nout) *nout = n;
413   } else {
414     if (idxout) {
415       for (i=0; i<n; i++) {
416         if (idx[i] < 0) continue;
417         if (idx[i] < start) continue;
418         if (idx[i] > end) continue;
419         tmp = globals[idx[i] - start];
420         if (tmp < 0) continue;
421         idxout[nf++] = tmp;
422       }
423     } else {
424       for (i=0; i<n; i++) {
425         if (idx[i] < 0) continue;
426         if (idx[i] < start) continue;
427         if (idx[i] > end) continue;
428         tmp = globals[idx[i] - start];
429         if (tmp < 0) continue;
430         nf++;
431       }
432     }
433     if (nout) *nout = nf;
434   }
435 
436   PetscFunctionReturn(0);
437 }
438 
439 #undef __FUNCT__
440 #define __FUNCT__ "ISLocalToGlobalMappingGetInfo"
441 /*@C
442     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and
443      each index shared by more than one processor
444 
445     Collective on ISLocalToGlobalMapping
446 
447     Input Parameters:
448 .   mapping - the mapping from local to global indexing
449 
450     Output Parameter:
451 +   nproc - number of processors that are connected to this one
452 .   proc - neighboring processors
453 .   numproc - number of indices for each subdomain (processor)
454 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
455 
456     Level: advanced
457 
458     Concepts: mapping^local to global
459 
460 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
461           ISLocalToGlobalMappingRestoreInfo()
462 @*/
463 int ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
464 {
465   int         i,n = mapping->n,ierr,Ng,ng,max = 0,*lindices = mapping->indices;
466   int         size,rank,*nprocs,*owner,nsends,*sends,j,*starts,*work,nmax,nrecvs,*recvs,proc;
467   int         tag1,tag2,tag3,cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned;
468   int         node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
469   int         first_procs,first_numprocs,*first_indices;
470   MPI_Request *recv_waits,*send_waits;
471   MPI_Status  recv_status,*send_status,*recv_statuses;
472   MPI_Comm    comm = mapping->comm;
473   PetscTruth  debug = PETSC_FALSE;
474 
475   PetscFunctionBegin;
476   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
477   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
478   if (size == 1) {
479     *nproc         = 0;
480     *procs         = PETSC_NULL;
481     ierr           = PetscMalloc(sizeof(int),numprocs);CHKERRQ(ierr);
482     (*numprocs)[0] = 0;
483     ierr           = PetscMalloc(sizeof(int*),indices);CHKERRQ(ierr);
484     (*indices)[0]  = PETSC_NULL;
485     PetscFunctionReturn(0);
486   }
487 
488   ierr = PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);CHKERRQ(ierr);
489 
490   /*
491     Notes on ISLocalToGlobalMappingGetInfo
492 
493     globally owned node - the nodes that have been assigned to this processor in global
494            numbering, just for this routine.
495 
496     nontrivial globally owned node - node assigned to this processor that is on a subdomain
497            boundary (i.e. is has more than one local owner)
498 
499     locally owned node - node that exists on this processors subdomain
500 
501     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
502            local subdomain
503   */
504   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag1);CHKERRQ(ierr);
505   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag2);CHKERRQ(ierr);
506   ierr = PetscObjectGetNewTag((PetscObject)mapping,&tag3);CHKERRQ(ierr);
507 
508   for (i=0; i<n; i++) {
509     if (lindices[i] > max) max = lindices[i];
510   }
511   ierr   = MPI_Allreduce(&max,&Ng,1,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
512   Ng++;
513   ierr   = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
514   ierr   = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
515   scale  = Ng/size + 1;
516   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
517   rstart = scale*rank;
518 
519   /* determine ownership ranges of global indices */
520   ierr = PetscMalloc((2*size+1)*sizeof(int),&nprocs);CHKERRQ(ierr);
521   ierr = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
522 
523   /* determine owners of each local node  */
524   ierr = PetscMalloc((n+1)*sizeof(int),&owner);CHKERRQ(ierr);
525   for (i=0; i<n; i++) {
526     proc              = lindices[i]/scale; /* processor that globally owns this index */
527     nprocs[size+proc] = 1;                 /* processor globally owns at least one of ours */
528     owner[i]          = proc;
529     nprocs[proc]++;                        /* count of how many that processor globally owns of ours */
530   }
531   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[size + i];
532   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %d\n",nsends);
533 
534   /* inform other processors of number of messages and max length*/
535   ierr = PetscMalloc(2*size*sizeof(int),&work);CHKERRQ(ierr);
536   ierr   = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
537   nmax   = work[rank];
538   nrecvs = work[size+rank];
539   ierr   = PetscFree(work);CHKERRQ(ierr);
540   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %d\n",nrecvs);
541 
542   /* post receives for owned rows */
543   ierr = PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(int),&recvs);CHKERRQ(ierr);
544   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
545   for (i=0; i<nrecvs; i++) {
546     ierr = MPI_Irecv(recvs+2*nmax*i,2*nmax,MPI_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);CHKERRQ(ierr);
547   }
548 
549   /* pack messages containing lists of local nodes to owners */
550   ierr       = PetscMalloc((2*n+1)*sizeof(int),&sends);CHKERRQ(ierr);
551   ierr       = PetscMalloc((size+1)*sizeof(int),&starts);CHKERRQ(ierr);
552   starts[0]  = 0;
553   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}
554   for (i=0; i<n; i++) {
555     sends[starts[owner[i]]++] = lindices[i];
556     sends[starts[owner[i]]++] = i;
557   }
558   ierr = PetscFree(owner);CHKERRQ(ierr);
559   starts[0]  = 0;
560   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[i-1];}
561 
562   /* send the messages */
563   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
564   ierr = PetscMalloc((nsends+1)*sizeof(int),&dest);CHKERRQ(ierr);
565   cnt = 0;
566   for (i=0; i<size; i++) {
567     if (nprocs[i]) {
568       ierr      = MPI_Isend(sends+starts[i],2*nprocs[i],MPI_INT,i,tag1,comm,send_waits+cnt);CHKERRQ(ierr);
569       dest[cnt] = i;
570       cnt++;
571     }
572   }
573   ierr = PetscFree(starts);CHKERRQ(ierr);
574 
575   /* wait on receives */
576   ierr = PetscMalloc((2*nrecvs+1)*sizeof(int),&source);CHKERRQ(ierr);
577   len  = source + nrecvs;
578   cnt  = nrecvs;
579   ierr = PetscMalloc((ng+1)*sizeof(int),&nownedsenders);CHKERRQ(ierr);
580   ierr = PetscMemzero(nownedsenders,ng*sizeof(int));CHKERRQ(ierr);
581   while (cnt) {
582     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
583     /* unpack receives into our local space */
584     ierr           = MPI_Get_count(&recv_status,MPI_INT,&len[imdex]);CHKERRQ(ierr);
585     source[imdex]  = recv_status.MPI_SOURCE;
586     len[imdex]     = len[imdex]/2;
587     /* count how many local owners for each of my global owned indices */
588     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
589     cnt--;
590   }
591   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
592 
593   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
594   nowned  = 0;
595   nownedm = 0;
596   for (i=0; i<ng; i++) {
597     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
598   }
599 
600   /* create single array to contain rank of all local owners of each globally owned index */
601   ierr      = PetscMalloc((nownedm+1)*sizeof(int),&ownedsenders);CHKERRQ(ierr);
602   ierr      = PetscMalloc((ng+1)*sizeof(int),&starts);CHKERRQ(ierr);
603   starts[0] = 0;
604   for (i=1; i<ng; i++) {
605     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
606     else starts[i] = starts[i-1];
607   }
608 
609   /* for each nontrival globally owned node list all arriving processors */
610   for (i=0; i<nrecvs; i++) {
611     for (j=0; j<len[i]; j++) {
612       node = recvs[2*i*nmax+2*j]-rstart;
613       if (nownedsenders[node] > 1) {
614         ownedsenders[starts[node]++] = source[i];
615       }
616     }
617   }
618 
619   if (debug) { /* -----------------------------------  */
620     starts[0]    = 0;
621     for (i=1; i<ng; i++) {
622       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
623       else starts[i] = starts[i-1];
624     }
625     for (i=0; i<ng; i++) {
626       if (nownedsenders[i] > 1) {
627         ierr = PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);CHKERRQ(ierr);
628         for (j=0; j<nownedsenders[i]; j++) {
629           ierr = PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);CHKERRQ(ierr);
630         }
631         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
632       }
633     }
634     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
635   }/* -----------------------------------  */
636 
637   /* wait on original sends */
638   if (nsends) {
639     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
640     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
641     ierr = PetscFree(send_status);CHKERRQ(ierr);
642   }
643   ierr = PetscFree(send_waits);CHKERRQ(ierr);
644   ierr = PetscFree(sends);CHKERRQ(ierr);
645   ierr = PetscFree(nprocs);CHKERRQ(ierr);
646 
647   /* pack messages to send back to local owners */
648   starts[0]    = 0;
649   for (i=1; i<ng; i++) {
650     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
651     else starts[i] = starts[i-1];
652   }
653   nsends2 = nrecvs;
654   ierr    = PetscMalloc((nsends2+1)*sizeof(int),&nprocs);CHKERRQ(ierr); /* length of each message */
655   for (i=0; i<nrecvs; i++) {
656     nprocs[i] = 1;
657     for (j=0; j<len[i]; j++) {
658       node = recvs[2*i*nmax+2*j]-rstart;
659       if (nownedsenders[node] > 1) {
660         nprocs[i] += 2 + nownedsenders[node];
661       }
662     }
663   }
664   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
665   ierr = PetscMalloc((nt+1)*sizeof(int),&sends2);CHKERRQ(ierr);
666   ierr = PetscMalloc((nsends2+1)*sizeof(int),&starts2);CHKERRQ(ierr);
667   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
668   /*
669      Each message is 1 + nprocs[i] long, and consists of
670        (0) the number of nodes being sent back
671        (1) the local node number,
672        (2) the number of processors sharing it,
673        (3) the processors sharing it
674   */
675   for (i=0; i<nsends2; i++) {
676     cnt = 1;
677     sends2[starts2[i]] = 0;
678     for (j=0; j<len[i]; j++) {
679       node = recvs[2*i*nmax+2*j]-rstart;
680       if (nownedsenders[node] > 1) {
681         sends2[starts2[i]]++;
682         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
683         sends2[starts2[i]+cnt++] = nownedsenders[node];
684         ierr = PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(int));CHKERRQ(ierr);
685         cnt += nownedsenders[node];
686       }
687     }
688   }
689 
690   /* send the message lengths */
691   for (i=0; i<nsends2; i++) {
692     ierr = MPI_Send(&nprocs[i],1,MPI_INT,source[i],tag2,comm);CHKERRQ(ierr);
693   }
694 
695   /* receive the message lengths */
696   nrecvs2 = nsends;
697   ierr = PetscMalloc((nrecvs2+1)*sizeof(int),&lens2);CHKERRQ(ierr);
698   ierr = PetscMalloc((nrecvs2+1)*sizeof(int),&starts3);CHKERRQ(ierr);
699   nt      = 0;
700   for (i=0; i<nrecvs2; i++) {
701     ierr =  MPI_Recv(&lens2[i],1,MPI_INT,dest[i],tag2,comm,&recv_status);CHKERRQ(ierr);
702     nt   += lens2[i];
703   }
704   starts3[0] = 0;
705   for (i=0; i<nrecvs2-1; i++) {
706     starts3[i+1] = starts3[i] + lens2[i];
707   }
708   ierr = PetscMalloc((nt+1)*sizeof(int),&recvs2);CHKERRQ(ierr);
709   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
710   for (i=0; i<nrecvs2; i++) {
711     ierr = MPI_Irecv(recvs2+starts3[i],lens2[i],MPI_INT,dest[i],tag3,comm,recv_waits+i);CHKERRQ(ierr);
712   }
713 
714   /* send the messages */
715   ierr = PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
716   for (i=0; i<nsends2; i++) {
717     ierr = MPI_Isend(sends2+starts2[i],nprocs[i],MPI_INT,source[i],tag3,comm,send_waits+i);CHKERRQ(ierr);
718   }
719 
720   /* wait on receives */
721   ierr = PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);CHKERRQ(ierr);
722   ierr = MPI_Waitall(nrecvs2,recv_waits,recv_statuses);CHKERRQ(ierr);
723   ierr = PetscFree(recv_statuses);CHKERRQ(ierr);
724   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
725   ierr = PetscFree(nprocs);CHKERRQ(ierr);
726 
727   if (debug) { /* -----------------------------------  */
728     cnt = 0;
729     for (i=0; i<nrecvs2; i++) {
730       nt = recvs2[cnt++];
731       for (j=0; j<nt; j++) {
732         ierr = PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);CHKERRQ(ierr);
733         for (k=0; k<recvs2[cnt+1]; k++) {
734           ierr = PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);CHKERRQ(ierr);
735         }
736         cnt += 2 + recvs2[cnt+1];
737         ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
738       }
739     }
740     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
741   } /* -----------------------------------  */
742 
743   /* count number subdomains for each local node */
744   ierr = PetscMalloc(size*sizeof(int),&nprocs);CHKERRQ(ierr);
745   ierr = PetscMemzero(nprocs,size*sizeof(int));CHKERRQ(ierr);
746   cnt  = 0;
747   for (i=0; i<nrecvs2; i++) {
748     nt = recvs2[cnt++];
749     for (j=0; j<nt; j++) {
750       for (k=0; k<recvs2[cnt+1]; k++) {
751         nprocs[recvs2[cnt+2+k]]++;
752       }
753       cnt += 2 + recvs2[cnt+1];
754     }
755   }
756   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
757   *nproc    = nt;
758   ierr = PetscMalloc((nt+1)*sizeof(int),procs);CHKERRQ(ierr);
759   ierr = PetscMalloc((nt+1)*sizeof(int),numprocs);CHKERRQ(ierr);
760   ierr = PetscMalloc((nt+1)*sizeof(int*),indices);CHKERRQ(ierr);
761   ierr = PetscMalloc(size*sizeof(int),&bprocs);CHKERRQ(ierr);
762   cnt       = 0;
763   for (i=0; i<size; i++) {
764     if (nprocs[i] > 0) {
765       bprocs[i]        = cnt;
766       (*procs)[cnt]    = i;
767       (*numprocs)[cnt] = nprocs[i];
768       ierr             = PetscMalloc(nprocs[i]*sizeof(int),&(*indices)[cnt]);CHKERRQ(ierr);
769       cnt++;
770     }
771   }
772 
773   /* make the list of subdomains for each nontrivial local node */
774   ierr = PetscMemzero(*numprocs,nt*sizeof(int));CHKERRQ(ierr);
775   cnt  = 0;
776   for (i=0; i<nrecvs2; i++) {
777     nt = recvs2[cnt++];
778     for (j=0; j<nt; j++) {
779       for (k=0; k<recvs2[cnt+1]; k++) {
780         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
781       }
782       cnt += 2 + recvs2[cnt+1];
783     }
784   }
785   ierr = PetscFree(bprocs);CHKERRQ(ierr);
786   ierr = PetscFree(recvs2);CHKERRQ(ierr);
787 
788   /* sort the node indexing by their global numbers */
789   nt = *nproc;
790   for (i=0; i<nt; i++) {
791     ierr = PetscMalloc(((*numprocs)[i])*sizeof(int),&tmp);CHKERRQ(ierr);
792     for (j=0; j<(*numprocs)[i]; j++) {
793       tmp[j] = lindices[(*indices)[i][j]];
794     }
795     ierr = PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);CHKERRQ(ierr);
796     ierr = PetscFree(tmp);CHKERRQ(ierr);
797   }
798 
799   if (debug) { /* -----------------------------------  */
800     nt = *nproc;
801     for (i=0; i<nt; i++) {
802       ierr = PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);CHKERRQ(ierr);
803       for (j=0; j<(*numprocs)[i]; j++) {
804         ierr = PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);CHKERRQ(ierr);
805       }
806       ierr = PetscSynchronizedPrintf(comm,"\n");CHKERRQ(ierr);
807     }
808     ierr = PetscSynchronizedFlush(comm);CHKERRQ(ierr);
809   } /* -----------------------------------  */
810 
811   /* wait on sends */
812   if (nsends2) {
813     ierr = PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
814     ierr = MPI_Waitall(nsends2,send_waits,send_status);CHKERRQ(ierr);
815     ierr = PetscFree(send_status);CHKERRQ(ierr);
816   }
817 
818   ierr = PetscFree(starts3);CHKERRQ(ierr);
819   ierr = PetscFree(dest);CHKERRQ(ierr);
820   ierr = PetscFree(send_waits);CHKERRQ(ierr);
821 
822   ierr = PetscFree(nownedsenders);CHKERRQ(ierr);
823   ierr = PetscFree(ownedsenders);CHKERRQ(ierr);
824   ierr = PetscFree(starts);CHKERRQ(ierr);
825   ierr = PetscFree(starts2);CHKERRQ(ierr);
826   ierr = PetscFree(lens2);CHKERRQ(ierr);
827 
828   ierr = PetscFree(source);CHKERRQ(ierr);
829   ierr = PetscFree(recvs);CHKERRQ(ierr);
830   ierr = PetscFree(nprocs);CHKERRQ(ierr);
831   ierr = PetscFree(sends2);CHKERRQ(ierr);
832 
833   /* put the information about myself as the first entry in the list */
834   first_procs    = (*procs)[0];
835   first_numprocs = (*numprocs)[0];
836   first_indices  = (*indices)[0];
837   for (i=0; i<*nproc; i++) {
838     if ((*procs)[i] == rank) {
839       (*procs)[0]    = (*procs)[i];
840       (*numprocs)[0] = (*numprocs)[i];
841       (*indices)[0]  = (*indices)[i];
842       (*procs)[i]    = first_procs;
843       (*numprocs)[i] = first_numprocs;
844       (*indices)[i]  = first_indices;
845       break;
846     }
847   }
848 
849   PetscFunctionReturn(0);
850 }
851 
852 #undef __FUNCT__
853 #define __FUNCT__ "ISLocalToGlobalMappingRestoreInfo"
854 /*@C
855     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()
856 
857     Collective on ISLocalToGlobalMapping
858 
859     Input Parameters:
860 .   mapping - the mapping from local to global indexing
861 
862     Output Parameter:
863 +   nproc - number of processors that are connected to this one
864 .   proc - neighboring processors
865 .   numproc - number of indices for each processor
866 -   indices - indices of local nodes shared with neighbor (sorted by global numbering)
867 
868     Level: advanced
869 
870 .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
871           ISLocalToGlobalMappingGetInfo()
872 @*/
873 int ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
874 {
875   int ierr,i;
876 
877   PetscFunctionBegin;
878   if (*procs) {ierr = PetscFree(*procs);CHKERRQ(ierr);}
879   if (*numprocs) {ierr = PetscFree(*numprocs);CHKERRQ(ierr);}
880   if (*indices) {
881     if ((*indices)[0]) {ierr = PetscFree((*indices)[0]);CHKERRQ(ierr);}
882     for (i=1; i<*nproc; i++) {
883       if ((*indices)[i]) {ierr = PetscFree((*indices)[i]);CHKERRQ(ierr);}
884     }
885     ierr = PetscFree(*indices);CHKERRQ(ierr);
886   }
887   PetscFunctionReturn(0);
888 }
889 
890 
891 
892 
893 
894 
895 
896 
897 
898 
899 
900 
901 
902 
903 
904 
905 
906 
907